diff options
-rw-r--r-- | arch/arm/include/asm/dma-contiguous.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-contiguous.h | 1 | ||||
-rw-r--r-- | drivers/base/dma-contiguous.c | 119 | ||||
-rw-r--r-- | include/asm-generic/dma-contiguous.h | 28 | ||||
-rw-r--r-- | include/linux/device.h | 2 | ||||
-rw-r--r-- | include/linux/dma-contiguous.h | 62 |
6 files changed, 105 insertions, 108 deletions
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h index e072bb2ba1b1..4f8e9e5514b1 100644 --- a/arch/arm/include/asm/dma-contiguous.h +++ b/arch/arm/include/asm/dma-contiguous.h @@ -5,7 +5,6 @@ #ifdef CONFIG_DMA_CMA #include <linux/types.h> -#include <asm-generic/dma-contiguous.h> void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h index c09241659971..b4b38bacb404 100644 --- a/arch/x86/include/asm/dma-contiguous.h +++ b/arch/x86/include/asm/dma-contiguous.h @@ -4,7 +4,6 @@ #ifdef __KERNEL__ #include <linux/types.h> -#include <asm-generic/dma-contiguous.h> static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 0ca54421ce97..99802d6f3c60 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -96,7 +96,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) #endif /** - * dma_contiguous_reserve() - reserve area for contiguous memory handling + * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be @@ -124,22 +124,29 @@ void __init dma_contiguous_reserve(phys_addr_t limit) #endif } - if (selected_size) { + if (selected_size && !dma_contiguous_default_area) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_declare_contiguous(NULL, selected_size, 0, limit); + dma_contiguous_reserve_area(selected_size, 0, limit, + &dma_contiguous_default_area); } }; static DEFINE_MUTEX(cma_mutex); -static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) +static int __init cma_activate_area(struct cma *cma) { - unsigned long pfn = base_pfn; - unsigned i = count >> pageblock_order; + int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned i = cma->count >> pageblock_order; struct zone *zone; + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) + return -ENOMEM; + WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -153,92 +160,53 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); - return 0; -} - -static __init struct cma *cma_create_area(unsigned long base_pfn, - unsigned long count) -{ - int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); - struct cma *cma; - int ret = -ENOMEM; - - pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); - - cma = kmalloc(sizeof *cma, GFP_KERNEL); - if (!cma) - return ERR_PTR(-ENOMEM); - - cma->base_pfn = base_pfn; - cma->count = count; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!cma->bitmap) - goto no_mem; - - ret = cma_activate_area(base_pfn, count); - if (ret) - goto error; - - pr_debug("%s: returned %p\n", __func__, (void *)cma); - return cma; - -error: - kfree(cma->bitmap); -no_mem: - kfree(cma); - return ERR_PTR(ret); + return 0; } -static struct cma_reserved { - phys_addr_t start; - unsigned long size; - struct device *dev; -} cma_reserved[MAX_CMA_AREAS] __initdata; -static unsigned cma_reserved_count __initdata; +static struct cma cma_areas[MAX_CMA_AREAS]; +static unsigned cma_area_count; static int __init cma_init_reserved_areas(void) { - struct cma_reserved *r = cma_reserved; - unsigned i = cma_reserved_count; - - pr_debug("%s()\n", __func__); + int i; - for (; i; --i, ++r) { - struct cma *cma; - cma = cma_create_area(PFN_DOWN(r->start), - r->size >> PAGE_SHIFT); - if (!IS_ERR(cma)) - dev_set_cma_area(r->dev, cma); + for (i = 0; i < cma_area_count; i++) { + int ret = cma_activate_area(&cma_areas[i]); + if (ret) + return ret; } + return 0; } core_initcall(cma_init_reserved_areas); /** - * dma_declare_contiguous() - reserve area for contiguous memory handling - * for particular device - * @dev: Pointer to device structure. - * @size: Size of the reserved memory. - * @base: Start address of the reserved memory (optional, 0 for any). + * dma_contiguous_reserve_area() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. * - * This function reserves memory for specified device. It should be - * called by board specific code when early allocator (memblock or bootmem) - * is still activate. + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. */ -int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, - phys_addr_t base, phys_addr_t limit) +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma) { - struct cma_reserved *r = &cma_reserved[cma_reserved_count]; + struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; + int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ - if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { + if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -256,7 +224,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, if (base) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { - base = -EBUSY; + ret = -EBUSY; goto err; } } else { @@ -266,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, */ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); if (!addr) { - base = -ENOMEM; + ret = -ENOMEM; goto err; } else { base = addr; @@ -277,10 +245,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ - r->start = base; - r->size = size; - r->dev = dev; - cma_reserved_count++; + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + *res_cma = cma; + cma_area_count++; + pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); @@ -289,7 +258,7 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, return 0; err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); - return base; + return ret; } /** diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h deleted file mode 100644 index 294b1e755ab2..000000000000 --- a/include/asm-generic/dma-contiguous.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef ASM_DMA_CONTIGUOUS_H -#define ASM_DMA_CONTIGUOUS_H - -#ifdef __KERNEL__ -#ifdef CONFIG_CMA - -#include <linux/device.h> -#include <linux/dma-contiguous.h> - -static inline struct cma *dev_get_cma_area(struct device *dev) -{ - if (dev && dev->cma_area) - return dev->cma_area; - return dma_contiguous_default_area; -} - -static inline void dev_set_cma_area(struct device *dev, struct cma *cma) -{ - if (dev) - dev->cma_area = cma; - if (!dev && !dma_contiguous_default_area) - dma_contiguous_default_area = cma; -} - -#endif -#endif - -#endif diff --git a/include/linux/device.h b/include/linux/device.h index bcf8c0d4cd98..9200cfd75f15 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -711,7 +711,7 @@ struct device { struct dma_coherent_mem *dma_mem; /* internal for coherent mem override */ -#ifdef CONFIG_CMA +#ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 00141d3325fe..3b28f937d959 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -67,9 +67,53 @@ struct device; extern struct cma *dma_contiguous_default_area; +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + if (dev && dev->cma_area) + return dev->cma_area; + return dma_contiguous_default_area; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) +{ + if (dev) + dev->cma_area = cma; +} + +static inline void dma_contiguous_set_default(struct cma *cma) +{ + dma_contiguous_default_area = cma; +} + void dma_contiguous_reserve(phys_addr_t addr_limit); -int dma_declare_contiguous(struct device *dev, phys_addr_t size, - phys_addr_t base, phys_addr_t limit); + +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma); + +/** + * dma_declare_contiguous() - reserve area for contiguous memory handling + * for particular device + * @dev: Pointer to device structure. + * @size: Size of the reserved memory. + * @base: Start address of the reserved memory (optional, 0 for any). + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory for specified device. It should be + * called by board specific code when early allocator (memblock or bootmem) + * is still activate. + */ + +static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + struct cma *cma; + int ret; + ret = dma_contiguous_reserve_area(size, base, limit, &cma); + if (ret == 0) + dev_set_cma_area(dev, cma); + + return ret; +} struct page *dma_alloc_from_contiguous(struct device *dev, int count, unsigned int order); @@ -80,8 +124,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, #define MAX_CMA_AREAS (0) +static inline struct cma *dev_get_cma_area(struct device *dev) +{ + return NULL; +} + +static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { } + +static inline void dma_contiguous_set_default(struct cma *cma) { } + static inline void dma_contiguous_reserve(phys_addr_t limit) { } +static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma) { + return -ENOSYS; +} + static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, phys_addr_t base, phys_addr_t limit) |