summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-29 20:50:59 +0200
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-09-30 12:01:36 +0200
commit309dbbabee7b19e003e1ba4b98f43d28f390a84e (patch)
treeba748b84c0573f5eb151a581e333b95010576521 /arch/arm/mm
parent[ARM] dma: add validation of DMA params (diff)
downloadlinux-309dbbabee7b19e003e1ba4b98f43d28f390a84e.tar.xz
linux-309dbbabee7b19e003e1ba4b98f43d28f390a84e.zip
[ARM] dma: don't touch cache on dma_*_for_cpu()
As per the dma_unmap_* calls, we don't touch the cache when a DMA buffer transitions from device to CPU ownership. Presently, no problems have been identified with speculative cache prefetching which in itself is a new feature in later architectures. We may have to revisit the DMA API later for these architectures anyway. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0e28cf33f7dd..67960017dc8f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -585,12 +585,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- if (!arch_is_coherent())
- dma_cache_maint(sg_virt(s), s->length, dir);
+ dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);