summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorSuzuki K. Poulose <suzuki.poulose@arm.com>2015-03-19 21:17:09 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2015-03-20 21:18:54 +0300
commit7132813c384515c9dede1ae20e56f3895feb7f1e (patch)
treea556d550a01f87e38b84ebe539bc7c598ea101d6 /arch
parent130c93fd10c4d150e39d8879420c1351aa207fa9 (diff)
downloadlinux-7132813c384515c9dede1ae20e56f3895feb7f1e.tar.xz
arm64: Honor __GFP_ZERO in dma allocations
Current implementation doesn't zero out the pages allocated. Honor the __GFP_ZERO flag and zero out if set. Cc: <stable@vger.kernel.org> # v3.14+ Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/dma-mapping.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 58e0c2bdde04..ef7d112f5ce0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
-static void *__alloc_from_pool(size_t size, struct page **ret_page)
+static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
+ if (flags & __GFP_ZERO)
+ memset(ptr, 0, size);
}
return ptr;
@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page;
+ void *addr;
size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page));
- return page_address(page);
+ addr = page_address(page);
+ if (flags & __GFP_ZERO)
+ memset(addr, 0, size);
+ return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page);
+ void *addr = __alloc_from_pool(size, &page, flags);
if (addr)
*dma_handle = phys_to_dma(dev, page_to_phys(page));