diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 210 |
1 files changed, 58 insertions, 152 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7a996aaa061e..c245d903927f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -12,6 +12,7 @@ #include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> +#include <linux/genalloc.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/list.h> @@ -298,57 +299,29 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - struct vm_struct *area; - unsigned long addr; - /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(page_to_pfn(page)); - - if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { - vunmap((void *)addr); - return NULL; - } - return (void *)addr; + return dma_common_contiguous_remap(page, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, + prot, caller); } static void __dma_free_remap(void *cpu_addr, size_t size) { - unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; - struct vm_struct *area = find_vm_area(cpu_addr); - if (!area || (area->flags & flags) != flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); - return; - } - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static struct gen_pool *atomic_pool; -struct dma_pool { - size_t size; - spinlock_t lock; - unsigned long *bitmap; - unsigned long nr_pages; - void *vaddr; - struct page **pages; -}; - -static struct dma_pool atomic_pool = { - .size = DEFAULT_DMA_COHERENT_POOL_SIZE, -}; +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; static int __init early_coherent_pool(char *p) { - atomic_pool.size = memparse(p, &p); + atomic_pool_size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); @@ -358,14 +331,14 @@ void __init init_dma_coherent_pool_size(unsigned long size) /* * Catch any attempt to set the pool size too late. */ - BUG_ON(atomic_pool.vaddr); + BUG_ON(atomic_pool); /* * Set architecture specific coherent pool size only if * it has not been changed by kernel command line parameter. */ - if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) - atomic_pool.size = size; + if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool_size = size; } /* @@ -373,52 +346,44 @@ void __init init_dma_coherent_pool_size(unsigned long size) */ static int __init atomic_pool_init(void) { - struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); gfp_t gfp = GFP_KERNEL | GFP_DMA; - unsigned long nr_pages = pool->size >> PAGE_SHIFT; - unsigned long *bitmap; struct page *page; - struct page **pages; void *ptr; - int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); - bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!bitmap) - goto no_bitmap; - - pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); - if (!pages) - goto no_pages; + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto out; if (dev_get_cma_area(NULL)) - ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, - atomic_pool_init); + ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, + &page, atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, - atomic_pool_init); + ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, + &page, atomic_pool_init); if (ptr) { - int i; - - for (i = 0; i < nr_pages; i++) - pages[i] = page + i; - - spin_lock_init(&pool->lock); - pool->vaddr = ptr; - pool->pages = pages; - pool->bitmap = bitmap; - pool->nr_pages = nr_pages; - pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", - (unsigned)pool->size / 1024); + int ret; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto destroy_genpool; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", + atomic_pool_size / 1024); return 0; } - kfree(pages); -no_pages: - kfree(bitmap); -no_bitmap: - pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", - (unsigned)pool->size / 1024); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +out: + pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); return -ENOMEM; } /* @@ -522,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__alloc_from_pool(size_t size, struct page **ret_page) { - struct dma_pool *pool = &atomic_pool; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned int pageno; - unsigned long flags; + unsigned long val; void *ptr = NULL; - unsigned long align_mask; - if (!pool->vaddr) { + if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } - /* - * Align the region allocation - allocations from pool are rather - * small, so align them to their order in pages, minimum is a page - * size. This helps reduce fragmentation of the DMA space. - */ - align_mask = (1 << get_order(size)) - 1; - - spin_lock_irqsave(&pool->lock, flags); - pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, align_mask); - if (pageno < pool->nr_pages) { - bitmap_set(pool->bitmap, pageno, count); - ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->pages[pageno]; - } else { - pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" - "Please increase it with coherent_pool= kernel parameter!\n", - (unsigned)pool->size / 1024); + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; } - spin_unlock_irqrestore(&pool->lock, flags); return ptr; } static bool __in_atomic_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - void *end = start + size; - void *pool_start = pool->vaddr; - void *pool_end = pool->vaddr + pool->size; - - if (start < pool_start || start >= pool_end) - return false; - - if (end <= pool_end) - return true; - - WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", - start, end - 1, pool_start, pool_end - 1); - - return false; + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); } static int __free_from_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - unsigned long pageno, count; - unsigned long flags; - if (!__in_atomic_pool(start, size)) return 0; - pageno = (start - pool->vaddr) >> PAGE_SHIFT; - count = size >> PAGE_SHIFT; - - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, pageno, count); - spin_unlock_irqrestore(&pool->lock, flags); + gen_pool_free(atomic_pool, (unsigned long)start, size); return 1; } @@ -1271,29 +1196,8 @@ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct vm_struct *area; - unsigned long p; - - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - - area->pages = pages; - area->nr_pages = nr_pages; - p = (unsigned long)area->addr; - - for (i = 0; i < nr_pages; i++) { - phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); - if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) - goto err; - p += PAGE_SIZE; - } - return area->addr; -err: - unmap_kernel_range((unsigned long)area->addr, size); - vunmap(area->addr); + return dma_common_pages_remap(pages, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); return NULL; } @@ -1355,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si static struct page **__atomic_get_pages(void *addr) { - struct dma_pool *pool = &atomic_pool; - struct page **pages = pool->pages; - int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + struct page *page; + phys_addr_t phys; + + phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); + page = phys_to_page(phys); - return pages + offs; + return (struct page **)page; } static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) @@ -1501,8 +1407,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } __iommu_remove_mapping(dev, handle, size); |