summaryrefslogtreecommitdiff
path: root/arch/arc/mm/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/dma.c')
-rw-r--r--arch/arc/mm/dma.c75
1 files changed, 50 insertions, 25 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 01eaf88bf821..8c8e36fa5659 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -24,22 +24,22 @@
static void *arc_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
- void *paddr, *kvaddr;
-
- /* This is linear addr (0x8000_0000 based) */
- paddr = alloc_pages_exact(size, gfp);
- if (!paddr)
+ unsigned long order = get_order(size);
+ struct page *page;
+ phys_addr_t paddr;
+ void *kvaddr;
+ int need_coh = 1, need_kvaddr = 0;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
return NULL;
- /* This is bus address, platform dependent */
- *dma_handle = (dma_addr_t)paddr;
-
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
*
* The gains with IOC are two pronged:
- * -For streaming data, elides needs for cache maintenance, saving
+ * -For streaming data, elides need for cache maintenance, saving
* cycles in flush code, and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory
* -For coherent data, Read/Write to buffers terminate early in cache
@@ -47,12 +47,31 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
*/
if ((is_isa_arcv2() && ioc_exists) ||
dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
- return paddr;
+ need_coh = 0;
+
+ /*
+ * - A coherent buffer needs MMU mapping to enforce non-cachability
+ * - A highmem page needs a virtual handle (hence MMU mapping)
+ * independent of cachability
+ */
+ if (PageHighMem(page) || need_coh)
+ need_kvaddr = 1;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = page_to_phys(page);
+
+ *dma_handle = plat_phys_to_dma(dev, paddr);
/* This is kernel Virtual address (0x7000_0000 based) */
- kvaddr = ioremap_nocache((unsigned long)paddr, size);
- if (kvaddr == NULL)
- return NULL;
+ if (need_kvaddr) {
+ kvaddr = ioremap_nocache(paddr, size);
+ if (kvaddr == NULL) {
+ __free_pages(page, order);
+ return NULL;
+ }
+ } else {
+ kvaddr = (void *)(u32)paddr;
+ }
/*
* Evict any existing L1 and/or L2 lines for the backing page
@@ -64,7 +83,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
- dma_cache_wback_inv((unsigned long)paddr, size);
+ if (need_coh)
+ dma_cache_wback_inv(paddr, size);
return kvaddr;
}
@@ -72,11 +92,16 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
- !(is_isa_arcv2() && ioc_exists))
+ struct page *page = virt_to_page(dma_handle);
+ int is_non_coh = 1;
+
+ is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+ (is_isa_arcv2() && ioc_exists);
+
+ if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
- free_pages_exact((void *)dma_handle, size);
+ __free_pages(page, get_order(size));
}
/*
@@ -84,7 +109,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
-static void _dma_cache_sync(unsigned long paddr, size_t size,
+static void _dma_cache_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
@@ -98,7 +123,7 @@ static void _dma_cache_sync(unsigned long paddr, size_t size,
dma_cache_wback_inv(paddr, size);
break;
default:
- pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
}
}
@@ -106,9 +131,9 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- unsigned long paddr = page_to_phys(page) + offset;
+ phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
- return (dma_addr_t)paddr;
+ return plat_phys_to_dma(dev, paddr);
}
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
@@ -127,13 +152,13 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+ _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
}
static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
+ _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
}
static void arc_dma_sync_sg_for_cpu(struct device *dev,
@@ -144,7 +169,7 @@ static void arc_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+ _dma_cache_sync(sg_phys(sg), sg->length, dir);
}
static void arc_dma_sync_sg_for_device(struct device *dev,
@@ -155,7 +180,7 @@ static void arc_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+ _dma_cache_sync(sg_phys(sg), sg->length, dir);
}
static int arc_dma_supported(struct device *dev, u64 dma_mask)