summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-08-27 10:37:15 +0200
committerChristoph Hellwig <hch@lst.de>2017-10-19 16:37:49 +0200
commitc9eb6172c328dde7e14812f94f8da87b691e41b5 (patch)
tree5b27a749da30670bd6141f3f3b0ddb67d48e4520 /arch/parisc
parentsh: make dma_cache_sync a no-op (diff)
downloadlinux-c9eb6172c328dde7e14812f94f8da87b691e41b5.tar.xz
linux-c9eb6172c328dde7e14812f94f8da87b691e41b5.zip
dma-mapping: turn dma_cache_sync into a dma_map_ops method
After we removed all the dead wood it turns out only two architectures actually implement dma_cache_sync as a real op: mips and parisc. Add a cache_sync method to struct dma_map_ops and implement it for the mips defualt DMA ops, and the parisc pa11 ops. Note that arm, arc and openrisc support DMA_ATTR_NON_CONSISTENT, but never provided a functional dma_cache_sync implementations, which seems somewhat odd. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/dma-mapping.h8
-rw-r--r--arch/parisc/kernel/pci-dma.c8
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 2b16282add69..cb26bbd71d8a 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -32,14 +32,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return hppa_dma_ops;
}
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- if (hppa_dma_ops->sync_single_for_cpu)
- flush_kernel_dcache_range((unsigned long)vaddr, size);
-}
-
static inline void *
parisc_walk_tree(struct device *dev)
{
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index bd4c0a7471d3..f87c34cb3b43 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -571,6 +571,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
+static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
@@ -583,6 +589,7 @@ const struct dma_map_ops pcxl_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
@@ -619,4 +626,5 @@ const struct dma_map_ops pcx_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};