summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-22 13:51:44 +0300
committerChristoph Hellwig <hch@lst.de>2018-01-15 11:35:09 +0300
commit080321d3b3139b3b3ec0da31a7887eebd50b5979 (patch)
tree8ee0759cc37280fe67d2042d6f9c0751acc6564f
parent2797596992898034256a6c40168d357ccf0ba0b8 (diff)
downloadlinux-080321d3b3139b3b3ec0da31a7887eebd50b5979.tar.xz
dma-direct: add support for CMA allocation
Try the CMA allocator for coherent allocations if supported. Roughly modelled after the x86 code. Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--lib/dma-direct.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index 32fd4d9e4c47..a9ae98be7af3 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
#include <linux/pfn.h>
#define DIRECT_MAPPING_ERROR 0
@@ -29,19 +30,30 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
static void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- void *ret;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ int page_order = get_order(size);
+ struct page *page = NULL;
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = phys_to_dma(dev, virt_to_phys(ret));
+ /* CMA can be used only in the context which permits sleeping */
+ if (gfpflags_allow_blocking(gfp))
+ page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+ if (!page)
+ page = alloc_pages(gfp, page_order);
+ if (!page)
+ return NULL;
- return ret;
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ memset(page_address(page), 0, size);
+ return page_address(page);
}
static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- free_pages((unsigned long)cpu_addr, get_order(size));
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+ free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,