diff options
author | Becky Bruce <becky.bruce@freescale.com> | 2008-11-20 07:49:16 +0100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-03 10:46:36 +0100 |
commit | 15e09c0ecaab4a1b4a7ed69db536c38948b92279 (patch) | |
tree | 9cf8f2cbfe002e220db4d001d08fbc50bfaf4433 /arch/powerpc/kernel/dma.c | |
parent | powerpc: Allow the max stack trace depth to be configured (diff) | |
download | linux-15e09c0ecaab4a1b4a7ed69db536c38948b92279.tar.xz linux-15e09c0ecaab4a1b4a7ed69db536c38948b92279.zip |
powerpc: Add sync_*_for_* to dma_ops
We need to swap these out once we start using swiotlb, so add
them to dma_ops. Create CONFIG_PPC_NEED_DMA_SYNC_OPS Kconfig
option; this is currently enabled automatically if we're
CONFIG_NOT_COHERENT_CACHE. In the future, this will also
be enabled for builds that need swiotlb. If PPC_NEED_DMA_SYNC_OPS
is not defined, the dma_sync_*_for_* ops compile to nothing.
Otherwise, they access the dma_ops pointers for the sync ops.
This patch also changes dma_sync_single_range_* to actually
sync the range - previously it was using a generous
dma_sync_single. dma_sync_single_* is now implemented
as a dma_sync_single_range with an offset of 0.
Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3a6eaa876ee1..1c5c8a6fc129 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev, { } +#ifdef CONFIG_NOT_COHERENT_CACHE +static inline void dma_direct_sync_sg(struct device *dev, + struct scatterlist *sgl, int nents, + enum dma_data_direction direction) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); +} + +static inline void dma_direct_sync_single_range(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + __dma_sync(bus_to_virt(dma_handle+offset), size, direction); +} +#endif + struct dma_mapping_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, @@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = { .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, +#ifdef CONFIG_NOT_COHERENT_CACHE + .sync_single_range_for_cpu = dma_direct_sync_single_range, + .sync_single_range_for_device = dma_direct_sync_single_range, + .sync_sg_for_cpu = dma_direct_sync_sg, + .sync_sg_for_device = dma_direct_sync_sg, +#endif }; EXPORT_SYMBOL(dma_direct_ops); |