summaryrefslogtreecommitdiff
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-17 20:27:37 +0300
committerThomas Gleixner <tglx@linutronix.de>2010-02-17 20:28:05 +0300
commitb7e56edba4b02f2079042c326a8cd72a44635817 (patch)
treeb5042002e9747cd8fb1278d61f86d8b92a74c018 /arch/arm/common/dmabounce.c
parent13ca0fcaa33f6b1984c4111b6ec5df42689fea6f (diff)
parentb0483e78e5c4c9871fc5541875b3bc006846d46b (diff)
downloadlinux-b7e56edba4b02f2079042c326a8cd72a44635817.tar.xz
Merge branch 'linus' into x86/mm
x86/mm is on 32-rc4 and missing the spinlock namespace changes which are needed for further commits into this topic. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 734ac9135998..cc32c1e54a59 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
+ __cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
@@ -342,6 +338,22 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
}
EXPORT_SYMBOL(dma_map_single);
+/*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+ * the data from the safe buffer back to the unsafe buffer and free up
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
+ __func__, (void *) dma_addr, size, dir);
+
+ unmap_single(dev, dma_addr, size, dir);
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
@@ -366,8 +378,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -375,7 +386,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
-EXPORT_SYMBOL(dma_unmap_single);
+EXPORT_SYMBOL(dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)