diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 20:58:12 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-04 20:58:12 +0300 |
commit | e5a594643a3444d39c1467040e638bf08a4e0db8 (patch) | |
tree | e65c94ef60a51559db467055232ce1021ec263e1 /lib | |
parent | f956d08a56732c61a4d44e8034eeeedfc06fe721 (diff) | |
parent | 2550bbfd495227945e17ed1fa1c05bce4753b86b (diff) | |
download | linux-e5a594643a3444d39c1467040e638bf08a4e0db8.tar.xz |
Merge tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- replace the force_dma flag with a dma_configure bus method. (Nipun
Gupta, although one patch is Ñ–ncorrectly attributed to me due to a
git rebase bug)
- use GFP_DMA32 more agressively in dma-direct. (Takashi Iwai)
- remove PCI_DMA_BUS_IS_PHYS and rely on the dma-mapping API to do the
right thing for bounce buffering.
- move dma-debug initialization to common code, and apply a few
cleanups to the dma-debug code.
- cleanup the Kconfig mess around swiotlb selection
- swiotlb comment fixup (Yisheng Xie)
- a trivial swiotlb fix. (Dan Carpenter)
- support swiotlb on RISC-V. (based on a patch from Palmer Dabbelt)
- add a new generic dma-noncoherent dma_map_ops implementation and use
it for arc, c6x and nds32.
- improve scatterlist validity checking in dma-debug. (Robin Murphy)
- add a struct device quirk to limit the dma-mask to 32-bit due to
bridge/system issues, and switch x86 to use it instead of a local
hack for VIA bridges.
- handle devices without a dma_mask more gracefully in the dma-direct
code.
* tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping: (48 commits)
dma-direct: don't crash on device without dma_mask
nds32: use generic dma_noncoherent_ops
nds32: implement the unmap_sg DMA operation
nds32: consolidate DMA cache maintainance routines
x86/pci-dma: switch the VIA 32-bit DMA quirk to use the struct device flag
x86/pci-dma: remove the explicit nodac and allowdac option
x86/pci-dma: remove the experimental forcesac boot option
Documentation/x86: remove a stray reference to pci-nommu.c
core, dma-direct: add a flag 32-bit dma limits
dma-mapping: remove unused gfp_t parameter to arch_dma_alloc_attrs
dma-debug: check scatterlist segments
c6x: use generic dma_noncoherent_ops
arc: use generic dma_noncoherent_ops
arc: fix arc_dma_{map,unmap}_page
arc: fix arc_dma_sync_sg_for_{cpu,device}
arc: simplify arc_dma_sync_single_for_{cpu,device}
dma-mapping: provide a generic dma-noncoherent implementation
dma-mapping: simplify Kconfig dependencies
riscv: add swiotlb support
riscv: only enable ZONE_DMA32 for 64-bit
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 43 | ||||
-rw-r--r-- | lib/Kconfig.debug | 19 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/dma-debug.c | 65 | ||||
-rw-r--r-- | lib/dma-direct.c | 29 | ||||
-rw-r--r-- | lib/dma-noncoherent.c | 102 | ||||
-rw-r--r-- | lib/iommu-common.c | 267 | ||||
-rw-r--r-- | lib/iommu-helper.c | 14 | ||||
-rw-r--r-- | lib/swiotlb.c | 11 |
9 files changed, 232 insertions, 321 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5fe577673b98..7a913937888b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -429,15 +429,50 @@ config SGL_ALLOC bool default n +config NEED_SG_DMA_LENGTH + bool + +config NEED_DMA_MAP_STATE + bool + +config ARCH_DMA_ADDR_T_64BIT + def_bool 64BIT || PHYS_ADDR_T_64BIT + +config IOMMU_HELPER + bool + +config ARCH_HAS_SYNC_DMA_FOR_DEVICE + bool + +config ARCH_HAS_SYNC_DMA_FOR_CPU + bool + select NEED_DMA_MAP_STATE + config DMA_DIRECT_OPS bool - depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) - default n + depends on HAS_DMA + +config DMA_NONCOHERENT_OPS + bool + depends on HAS_DMA + select DMA_DIRECT_OPS + +config DMA_NONCOHERENT_MMAP + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_NONCOHERENT_CACHE_SYNC + bool + depends on DMA_NONCOHERENT_OPS config DMA_VIRT_OPS bool - depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) - default n + depends on HAS_DMA + +config SWIOTLB + bool + select DMA_DIRECT_OPS + select NEED_DMA_MAP_STATE config CHECK_SIGNATURE bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c40c7b734cd1..76555479ae36 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1634,7 +1634,7 @@ config PROVIDE_OHCI1394_DMA_INIT config DMA_API_DEBUG bool "Enable debugging of DMA-API usage" - depends on HAVE_DMA_API_DEBUG + select NEED_DMA_MAP_STATE help Enable this option to debug the use of the DMA API by device drivers. With this option you will be able to detect common bugs in device @@ -1651,6 +1651,23 @@ config DMA_API_DEBUG If unsure, say N. +config DMA_API_DEBUG_SG + bool "Debug DMA scatter-gather usage" + default y + depends on DMA_API_DEBUG + help + Perform extra checking that callers of dma_map_sg() have respected the + appropriate segment length/boundary limits for the given device when + preparing DMA scatterlists. + + This is particularly likely to have been overlooked in cases where the + dma_map_sg() API is used for general bulk mapping of pages rather than + preparing literal scatter-gather descriptors, where there is a risk of + unexpected behaviour from DMA API implementations if the scatterlist + is technically out-of-spec. + + If unsure, say N. + menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" def_bool y diff --git a/lib/Makefile b/lib/Makefile index ce20696d5a92..9f18c8152281 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,6 +30,7 @@ lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o +lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o @@ -147,7 +148,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o obj-$(CONFIG_SWIOTLB) += swiotlb.o -obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o +obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 7f5cdc1e6b29..c007d25bee09 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -41,6 +41,11 @@ #define HASH_FN_SHIFT 13 #define HASH_FN_MASK (HASH_SIZE - 1) +/* allow architectures to override this if absolutely required */ +#ifndef PREALLOC_DMA_DEBUG_ENTRIES +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) +#endif + enum { dma_debug_single, dma_debug_page, @@ -127,7 +132,7 @@ static u32 min_free_entries; static u32 nr_total_entries; /* number of preallocated entries requested by kernel cmdline */ -static u32 req_entries; +static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; /* debugfs dentry's for the stuff above */ static struct dentry *dma_debug_dent __read_mostly; @@ -439,7 +444,6 @@ void debug_dma_dump_mappings(struct device *dev) spin_unlock_irqrestore(&bucket->lock, flags); } } -EXPORT_SYMBOL(debug_dma_dump_mappings); /* * For each mapping (initial cacheline in the case of @@ -748,7 +752,6 @@ int dma_debug_resize_entries(u32 num_entries) return ret; } -EXPORT_SYMBOL(dma_debug_resize_entries); /* * DMA-API debugging init code @@ -1004,10 +1007,7 @@ void dma_debug_add_bus(struct bus_type *bus) bus_register_notifier(bus, nb); } -/* - * Let the architectures decide how many entries should be preallocated. - */ -void dma_debug_init(u32 num_entries) +static int dma_debug_init(void) { int i; @@ -1015,7 +1015,7 @@ void dma_debug_init(u32 num_entries) * called to set dma_debug_initialized */ if (global_disable) - return; + return 0; for (i = 0; i < HASH_SIZE; ++i) { INIT_LIST_HEAD(&dma_entry_hash[i].list); @@ -1026,17 +1026,14 @@ void dma_debug_init(u32 num_entries) pr_err("DMA-API: error creating debugfs entries - disabling\n"); global_disable = true; - return; + return 0; } - if (req_entries) - num_entries = req_entries; - - if (prealloc_memory(num_entries) != 0) { + if (prealloc_memory(nr_prealloc_entries) != 0) { pr_err("DMA-API: debugging out of memory error - disabled\n"); global_disable = true; - return; + return 0; } nr_total_entries = num_free_entries; @@ -1044,7 +1041,9 @@ void dma_debug_init(u32 num_entries) dma_debug_initialized = true; pr_info("DMA-API: debugging enabled by kernel config\n"); + return 0; } +core_initcall(dma_debug_init); static __init int dma_debug_cmdline(char *str) { @@ -1061,16 +1060,10 @@ static __init int dma_debug_cmdline(char *str) static __init int dma_debug_entries_cmdline(char *str) { - int res; - if (!str) return -EINVAL; - - res = get_option(&str, &req_entries); - - if (!res) - req_entries = 0; - + if (!get_option(&str, &nr_prealloc_entries)) + nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; return 0; } @@ -1293,6 +1286,32 @@ out: put_hash_bucket(bucket, &flags); } +static void check_sg_segment(struct device *dev, struct scatterlist *sg) +{ +#ifdef CONFIG_DMA_API_DEBUG_SG + unsigned int max_seg = dma_get_max_seg_size(dev); + u64 start, end, boundary = dma_get_seg_boundary(dev); + + /* + * Either the driver forgot to set dma_parms appropriately, or + * whoever generated the list forgot to check them. + */ + if (sg->length > max_seg) + err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", + sg->length, max_seg); + /* + * In some cases this could potentially be the DMA API + * implementation's fault, but it would usually imply that + * the scatterlist was built inappropriately to begin with. + */ + start = sg_dma_address(sg); + end = start + sg_dma_len(sg) - 1; + if ((start ^ end) & ~boundary) + err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", + start, end, boundary); +#endif +} + void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, bool map_single) @@ -1423,6 +1442,8 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); } + check_sg_segment(dev, s); + add_dma_entry(entry); } } diff --git a/lib/dma-direct.c b/lib/dma-direct.c index bbfb229aa067..8be8106270c2 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c @@ -34,6 +34,13 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, const char *caller) { if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { + if (!dev->dma_mask) { + dev_err(dev, + "%s: call on device without dma_mask\n", + caller); + return false; + } + if (*dev->dma_mask >= DMA_BIT_MASK(32)) { dev_err(dev, "%s: overflow %pad+%zu of device mask %llx\n", @@ -84,6 +91,13 @@ again: __free_pages(page, page_order); page = NULL; + if (IS_ENABLED(CONFIG_ZONE_DMA32) && + dev->coherent_dma_mask < DMA_BIT_MASK(64) && + !(gfp & (GFP_DMA32 | GFP_DMA))) { + gfp |= GFP_DMA32; + goto again; + } + if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { @@ -121,7 +135,7 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, free_pages((unsigned long)cpu_addr, page_order); } -static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -132,8 +146,8 @@ static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, return dma_addr; } -static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) { int i; struct scatterlist *sg; @@ -165,10 +179,16 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < DMA_BIT_MASK(32)) return 0; #endif + /* + * Various PCI/PCIe bridges have broken support for > 32bit DMA even + * if the device itself might support it. + */ + if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) + return 0; return 1; } -static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DIRECT_MAPPING_ERROR; } @@ -180,6 +200,5 @@ const struct dma_map_ops dma_direct_ops = { .map_sg = dma_direct_map_sg, .dma_supported = dma_direct_supported, .mapping_error = dma_direct_mapping_error, - .is_phys = 1, }; EXPORT_SYMBOL(dma_direct_ops); diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/lib/dma-noncoherent.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without providing cache + * coherence. + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-direct.h> +#include <linux/dma-noncoherent.h> +#include <linux/scatterlist.h> + +static void dma_noncoherent_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t addr; + + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(dev, page_to_phys(page) + offset, + size, dir); + return addr; +} + +static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); + if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); + return nents; +} + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +static void dma_noncoherent_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); +} + +static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + +const struct dma_map_ops dma_noncoherent_ops = { + .alloc = arch_dma_alloc, + .free = arch_dma_free, + .mmap = arch_dma_mmap, + .sync_single_for_device = dma_noncoherent_sync_single_for_device, + .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, + .map_page = dma_noncoherent_map_page, + .map_sg = dma_noncoherent_map_sg, +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU + .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, + .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, + .unmap_page = dma_noncoherent_unmap_page, + .unmap_sg = dma_noncoherent_unmap_sg, +#endif + .dma_supported = dma_direct_supported, + .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, +}; +EXPORT_SYMBOL(dma_noncoherent_ops); diff --git a/lib/iommu-common.c b/lib/iommu-common.c deleted file mode 100644 index 55b00de106b5..000000000000 --- a/lib/iommu-common.c +++ /dev/null @@ -1,267 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * IOMMU mmap management and range allocation functions. - * Based almost entirely upon the powerpc iommu allocator. - */ - -#include <linux/export.h> -#include <linux/bitmap.h> -#include <linux/bug.h> -#include <linux/iommu-helper.h> -#include <linux/iommu-common.h> -#include <linux/dma-mapping.h> -#include <linux/hash.h> - -static unsigned long iommu_large_alloc = 15; - -static DEFINE_PER_CPU(unsigned int, iommu_hash_common); - -static inline bool need_flush(struct iommu_map_table *iommu) -{ - return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); -} - -static inline void set_flush(struct iommu_map_table *iommu) -{ - iommu->flags |= IOMMU_NEED_FLUSH; -} - -static inline void clear_flush(struct iommu_map_table *iommu) -{ - iommu->flags &= ~IOMMU_NEED_FLUSH; -} - -static void setup_iommu_pool_hash(void) -{ - unsigned int i; - static bool do_once; - - if (do_once) - return; - do_once = true; - for_each_possible_cpu(i) - per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); -} - -/* - * Initialize iommu_pool entries for the iommu_map_table. `num_entries' - * is the number of table entries. If `large_pool' is set to true, - * the top 1/4 of the table will be set aside for pool allocations - * of more than iommu_large_alloc pages. - */ -void iommu_tbl_pool_init(struct iommu_map_table *iommu, - unsigned long num_entries, - u32 table_shift, - void (*lazy_flush)(struct iommu_map_table *), - bool large_pool, u32 npools, - bool skip_span_boundary_check) -{ - unsigned int start, i; - struct iommu_pool *p = &(iommu->large_pool); - - setup_iommu_pool_hash(); - if (npools == 0) - iommu->nr_pools = IOMMU_NR_POOLS; - else - iommu->nr_pools = npools; - BUG_ON(npools > IOMMU_NR_POOLS); - - iommu->table_shift = table_shift; - iommu->lazy_flush = lazy_flush; - start = 0; - if (skip_span_boundary_check) - iommu->flags |= IOMMU_NO_SPAN_BOUND; - if (large_pool) - iommu->flags |= IOMMU_HAS_LARGE_POOL; - - if (!large_pool) - iommu->poolsize = num_entries/iommu->nr_pools; - else - iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; - for (i = 0; i < iommu->nr_pools; i++) { - spin_lock_init(&(iommu->pools[i].lock)); - iommu->pools[i].start = start; - iommu->pools[i].hint = start; - start += iommu->poolsize; /* start for next pool */ - iommu->pools[i].end = start - 1; - } - if (!large_pool) - return; - /* initialize large_pool */ - spin_lock_init(&(p->lock)); - p->start = start; - p->hint = p->start; - p->end = num_entries; -} -EXPORT_SYMBOL(iommu_tbl_pool_init); - -unsigned long iommu_tbl_range_alloc(struct device *dev, - struct iommu_map_table *iommu, - unsigned long npages, - unsigned long *handle, - unsigned long mask, - unsigned int align_order) -{ - unsigned int pool_hash = __this_cpu_read(iommu_hash_common); - unsigned long n, end, start, limit, boundary_size; - struct iommu_pool *pool; - int pass = 0; - unsigned int pool_nr; - unsigned int npools = iommu->nr_pools; - unsigned long flags; - bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); - bool largealloc = (large_pool && npages > iommu_large_alloc); - unsigned long shift; - unsigned long align_mask = 0; - - if (align_order > 0) - align_mask = ~0ul >> (BITS_PER_LONG - align_order); - - /* Sanity check */ - if (unlikely(npages == 0)) { - WARN_ON_ONCE(1); - return IOMMU_ERROR_CODE; - } - - if (largealloc) { - pool = &(iommu->large_pool); - pool_nr = 0; /* to keep compiler happy */ - } else { - /* pick out pool_nr */ - pool_nr = pool_hash & (npools - 1); - pool = &(iommu->pools[pool_nr]); - } - spin_lock_irqsave(&pool->lock, flags); - - again: - if (pass == 0 && handle && *handle && - (*handle >= pool->start) && (*handle < pool->end)) - start = *handle; - else - start = pool->hint; - - limit = pool->end; - - /* The case below can happen if we have a small segment appended - * to a large, or when the previous alloc was at the very end of - * the available space. If so, go back to the beginning. If a - * flush is needed, it will get done based on the return value - * from iommu_area_alloc() below. - */ - if (start >= limit) - start = pool->start; - shift = iommu->table_map_base >> iommu->table_shift; - if (limit + shift > mask) { - limit = mask - shift + 1; - /* If we're constrained on address range, first try - * at the masked hint to avoid O(n) search complexity, - * but on second pass, start at 0 in pool 0. - */ - if ((start & mask) >= limit || pass > 0) { - spin_unlock(&(pool->lock)); - pool = &(iommu->pools[0]); - spin_lock(&(pool->lock)); - start = pool->start; - } else { - start &= mask; - } - } - - if (dev) - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - 1 << iommu->table_shift); - else - boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); - - boundary_size = boundary_size >> iommu->table_shift; - /* - * if the skip_span_boundary_check had been set during init, we set - * things up so that iommu_is_span_boundary() merely checks if the - * (index + npages) < num_tsb_entries - */ - if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { - shift = 0; - boundary_size = iommu->poolsize * iommu->nr_pools; - } - n = iommu_area_alloc(iommu->map, limit, start, npages, shift, - boundary_size, align_mask); - if (n == -1) { - if (likely(pass == 0)) { - /* First failure, rescan from the beginning. */ - pool->hint = pool->start; - set_flush(iommu); - pass++; - goto again; - } else if (!largealloc && pass <= iommu->nr_pools) { - spin_unlock(&(pool->lock)); - pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); - pool = &(iommu->pools[pool_nr]); - spin_lock(&(pool->lock)); - pool->hint = pool->start; - set_flush(iommu); - pass++; - goto again; - } else { - /* give up */ - n = IOMMU_ERROR_CODE; - goto bail; - } - } - if (iommu->lazy_flush && - (n < pool->hint || need_flush(iommu))) { - clear_flush(iommu); - iommu->lazy_flush(iommu); - } - - end = n + npages; - pool->hint = end; - - /* Update handle for SG allocations */ - if (handle) - *handle = end; -bail: - spin_unlock_irqrestore(&(pool->lock), flags); - - return n; -} -EXPORT_SYMBOL(iommu_tbl_range_alloc); - -static struct iommu_pool *get_pool(struct iommu_map_table *tbl, - unsigned long entry) -{ - struct iommu_pool *p; - unsigned long largepool_start = tbl->large_pool.start; - bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); - - /* The large pool is the last pool at the top of the table */ - if (large_pool && entry >= largepool_start) { - p = &tbl->large_pool; - } else { - unsigned int pool_nr = entry / tbl->poolsize; - - BUG_ON(pool_nr >= tbl->nr_pools); - p = &tbl->pools[pool_nr]; - } - return p; -} - -/* Caller supplies the index of the entry into the iommu map table - * itself when the mapping from dma_addr to the entry is not the - * default addr->entry mapping below. - */ -void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, - unsigned long npages, unsigned long entry) -{ - struct iommu_pool *pool; - unsigned long flags; - unsigned long shift = iommu->table_shift; - - if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ - entry = (dma_addr - iommu->table_map_base) >> shift; - pool = get_pool(iommu, entry); - - spin_lock_irqsave(&(pool->lock), flags); - bitmap_clear(iommu->map, entry, npages); - spin_unlock_irqrestore(&(pool->lock), flags); -} -EXPORT_SYMBOL(iommu_tbl_range_free); diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 23633c0fda4a..92a9f243c0e2 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -3,19 +3,8 @@ * IOMMU helper functions for the free area management */ -#include <linux/export.h> #include <linux/bitmap.h> -#include <linux/bug.h> - -int iommu_is_span_boundary(unsigned int index, unsigned int nr, - unsigned long shift, - unsigned long boundary_size) -{ - BUG_ON(!is_power_of_2(boundary_size)); - - shift = (shift + index) & (boundary_size - 1); - return shift + nr > boundary_size; -} +#include <linux/iommu-helper.h> unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, @@ -38,4 +27,3 @@ again: } return -1; } -EXPORT_SYMBOL(iommu_area_alloc); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cc640588f145..04b68d9dffac 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -593,9 +593,8 @@ found: } /* - * Allocates bounce buffer and returns its kernel virtual address. + * Allocates bounce buffer and returns its physical address. */ - static phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -614,7 +613,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, } /* - * dma_addr is the kernel virtual address of the bounce buffer to unmap. + * tlb_addr is the physical address of the bounce buffer to unmap. */ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, @@ -692,7 +691,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, } } -#ifdef CONFIG_DMA_DIRECT_OPS static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, size_t size) { @@ -727,7 +725,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, out_unmap: dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)(dev ? dev->coherent_dma_mask : 0), + (unsigned long long)dev->coherent_dma_mask, (unsigned long long)*dma_handle); /* @@ -764,7 +762,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size, DMA_ATTR_SKIP_CPU_SYNC); return true; } -#endif static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, @@ -1045,7 +1042,6 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask) return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } -#ifdef CONFIG_DMA_DIRECT_OPS void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -1089,4 +1085,3 @@ const struct dma_map_ops swiotlb_dma_ops = { .unmap_page = swiotlb_unmap_page, .dma_supported = dma_direct_supported, }; -#endif /* CONFIG_DMA_DIRECT_OPS */ |