summaryrefslogtreecommitdiff
path: root/arch/arm/common
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-07-04 01:28:32 +0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-07-04 02:27:49 +0400
commit23bc9873ba60ee661d8e9f3a6b22fc3bcc4b7015 (patch)
tree74419df7fa856a7b261ea1914810b21c281e4fa0 /arch/arm/common
parent71695dd8b9eacfcda1b548a5b1780d34213ad654 (diff)
downloadlinux-23bc9873ba60ee661d8e9f3a6b22fc3bcc4b7015.tar.xz
ARM: dmabounce: separate out decision to bounce
Move the decision to perform DMA bouncing out of map_single() into its own stand-alone function. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/dmabounce.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 3e0fa1548582..643e1d660677 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -219,36 +219,46 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}
-static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
- dma_addr_t dma_addr;
- int needs_bounce = 0;
-
- if (device_info)
- DO_STATS ( device_info->map_op_count++ );
-
- dma_addr = virt_to_dma(dev, ptr);
+ if (!dev || !dev->archdata.dmabounce)
+ return 0;
if (dev->dma_mask) {
- unsigned long mask = *dev->dma_mask;
- unsigned long limit;
+ unsigned long limit, mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
- return ~0;
+ return -E2BIG;
}
- /*
- * Figure out if we need to bounce from the DMA mask.
- */
- needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
+ /* Figure out if we need to bounce from the DMA mask. */
+ if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+ return 1;
}
- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
+ return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0;
+}
+
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (device_info)
+ DO_STATS ( device_info->map_op_count++ );
+
+ dma_addr = virt_to_dma(dev, ptr);
+
+ ret = needs_bounce(dev, dma_addr, size);
+ if (ret < 0)
+ return ~0;
+
+ if (ret > 0) {
struct safe_buffer *buf;
buf = alloc_safe_buffer(device_info, ptr, size, dir);