summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-02-10 19:55:20 +0100
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 15:06:18 +0200
commit15237e1f505b3e5c2276f240b01cd2133e110cbc (patch)
tree989e8a8580420ad3759a7bab81cd86347a3dadca /arch/arm/include/asm/dma-mapping.h
parentARM: dma-mapping: implement dma sg methods on top of any generic dma ops (diff)
downloadlinux-15237e1f505b3e5c2276f240b01cd2133e110cbc.tar.xz
linux-15237e1f505b3e5c2276f240b01cd2133e110cbc.zip
ARM: dma-mapping: move all dma bounce code to separate dma ops structure
This patch removes dma bounce hooks from the common dma mapping implementation on ARM architecture and creates a separate set of dma_map_ops for dma bounce devices. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h99
1 files changed, 2 insertions, 97 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 6725a08a5c21..7a7c3c762f5f 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -85,62 +85,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
+extern int dma_supported(struct device *dev, u64 mask);
+
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
extern void __init init_consistent_dma_size(unsigned long size);
-#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/*
- * The DMA API, implemented by dmabounce.c. See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
- unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
/*
* The scatter list versions of the above methods.