diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-08-10 13:10:49 +0200 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-08-10 13:10:49 +0200 |
commit | 98ed7d4b1a4eebc1ac25929b6968673bef4d54c3 (patch) | |
tree | b9c0e29f20666f433d346c80b557ef0f055336eb /arch/arm/include/asm/dma-mapping.h | |
parent | Merge branch 'for-rmk' of git://git.marvell.com/orion (diff) | |
download | linux-98ed7d4b1a4eebc1ac25929b6968673bef4d54c3.tar.xz linux-98ed7d4b1a4eebc1ac25929b6968673bef4d54c3.zip |
[ARM] dma-mapping: improve type-safeness of DMA translations
OMAP at least gets the return type(s) for the DMA translation functions
wrong, which can lead to subtle errors. Avoid this by moving the DMA
translation functions to asm/dma-mapping.h, and converting them to
inline functions.
Fix the OMAP DMA translation macros to use the correct argument and
result types.
Also, remove the unnecessary casts in dmabounce.c.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 51 |
1 files changed, 44 insertions, 7 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 45329fca1b64..6f45959fe2cc 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -3,11 +3,48 @@ #ifdef __KERNEL__ -#include <linux/mm.h> /* need struct page */ - +#include <linux/mm_types.h> #include <linux/scatterlist.h> #include <asm-generic/dma-coherent.h> +#include <asm/memory.h> + +/* + * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions + * used internally by the DMA-mapping API to provide DMA addresses. They + * must not be used by drivers. + */ +#ifndef __arch_page_to_dma +static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +{ + return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); +} + +static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) +{ + return (void *)__bus_to_virt(addr); +} + +static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) +{ + return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); +} +#else +static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +{ + return __arch_page_to_dma(dev, page); +} + +static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) +{ + return __arch_dma_to_virt(dev, addr); +} + +static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) +{ + return __arch_virt_to_dma(dev, addr); +} +#endif /* * DMA-consistent mapping functions. These allocate/free a region of @@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, if (!arch_is_coherent()) dma_cache_maint(cpu_addr, size, dir); - return virt_to_dma(dev, (unsigned long)cpu_addr); + return virt_to_dma(dev, cpu_addr); } #else extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); @@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { - return dma_map_single(dev, page_address(page) + offset, size, (int)dir); + return dma_map_single(dev, page_address(page) + offset, size, dir); } /** @@ -241,7 +278,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - dma_unmap_single(dev, handle, size, (int)dir); + dma_unmap_single(dev, handle, size, dir); } /** @@ -336,7 +373,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) - dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); + dma_cache_maint(dma_to_virt(dev, handle), size, dir); } static inline void @@ -344,7 +381,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) - dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); + dma_cache_maint(dma_to_virt(dev, handle), size, dir); } #else extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); |