diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2015-05-25 05:55:05 +0200 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2015-08-17 06:31:00 +0200 |
commit | c75959a6da19df2700556dc76861442e9716317d (patch) | |
tree | 81ad55a5431fd510c25d518983c5ffb388ca2864 /arch/xtensa | |
parent | xtensa: fix threadptr reload on return to userspace (diff) | |
download | linux-c75959a6da19df2700556dc76861442e9716317d.tar.xz linux-c75959a6da19df2700556dc76861442e9716317d.zip |
xtensa: reimplement DMA API using common helpers
- keep existing functionality: don't handle attributes, don't support
high memory;
- implement scatterlist primitives (map/unmap/sync);
- enable DMA API debug.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/Kconfig | 2 | ||||
-rw-r--r-- | arch/xtensa/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/xtensa/include/asm/device.h | 19 | ||||
-rw-r--r-- | arch/xtensa/include/asm/dma-mapping.h | 186 | ||||
-rw-r--r-- | arch/xtensa/kernel/pci-dma.c | 203 |
5 files changed, 233 insertions, 178 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e5b872ba2484..6ce10f92f481 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -14,6 +14,8 @@ config XTENSA select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select HAVE_DMA_API_DEBUG + select HAVE_DMA_ATTRS select HAVE_FUNCTION_TRACER select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 14d15bf1a95b..d870a59f5dce 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -2,7 +2,6 @@ generic-y += bitsperlong.h generic-y += bug.h generic-y += clkdev.h generic-y += cputime.h -generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h new file mode 100644 index 000000000000..fe1f5c878493 --- /dev/null +++ b/arch/xtensa/include/asm/device.h @@ -0,0 +1,19 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#ifndef _ASM_XTENSA_DEVICE_H +#define _ASM_XTENSA_DEVICE_H + +struct dma_map_ops; + +struct dev_archdata { + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; +}; + +struct pdev_archdata { +}; + +#endif /* _ASM_XTENSA_DEVICE_H */ diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 1f5f6dc09736..f01cb3044e50 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -1,11 +1,10 @@ /* - * include/asm-xtensa/dma-mapping.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. */ #ifndef _XTENSA_DMA_MAPPING_H @@ -13,142 +12,67 @@ #include <asm/cache.h> #include <asm/io.h> + +#include <asm-generic/dma-coherent.h> + #include <linux/mm.h> #include <linux/scatterlist.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -/* - * DMA-consistent mapping functions. - */ - -extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long); -extern void consistent_free(void*, size_t, dma_addr_t); -extern void consistent_sync(void*, size_t, int); - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); +extern struct dma_map_ops xtensa_dma_map_ops; -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - consistent_sync(ptr, size, direction); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG_ON(direction == DMA_NONE); + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + else + return &xtensa_dma_map_ops; } -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - BUG_ON(direction == DMA_NONE); - - for_each_sg(sglist, sg, nents, i) { - BUG_ON(!sg_page(sg)); +#include <asm-generic/dma-mapping-common.h> - sg->dma_address = sg_phys(sg); - consistent_sync(sg_virt(sg), sg->length, direction); - } +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) { - BUG_ON(direction == DMA_NONE); -} + void *ret; + struct dma_map_ops *ops = get_dma_ops(dev); + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) + return ret; -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); -} + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, ret); -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) -{ - consistent_sync((void *)bus_to_virt(dma_handle), size, direction); + return ret; } -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { + struct dma_map_ops *ops = get_dma_ops(dev); - consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction); -} -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); + ops->free(dev, size, vaddr, dma_handle, attrs); + debug_dma_free_coherent(dev, size, vaddr, dma_handle); } -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) - consistent_sync(sg_virt(sg), sg->length, dir); -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return 0; + struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_mapping_error(dev, dma_addr); + return ops->mapping_error(dev, dma_addr); } static inline int @@ -168,39 +92,7 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - consistent_sync(vaddr, size, direction); -} - -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - return NULL; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index e8b76b8e4b29..fb75ebf1463a 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -1,6 +1,4 @@ /* - * arch/xtensa/kernel/pci-dma.c - * * DMA coherent memory allocation. * * This program is free software; you can redistribute it and/or modify it @@ -9,6 +7,7 @@ * option) any later version. * * Copyright (C) 2002 - 2005 Tensilica Inc. + * Copyright (C) 2015 Cadence Design Systems Inc. * * Based on version for i386. * @@ -25,13 +24,107 @@ #include <asm/io.h> #include <asm/cacheflush.h> +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_BIDIRECTIONAL: + __flush_invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_FROM_DEVICE: + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_TO_DEVICE: + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + } +} +EXPORT_SYMBOL(dma_cache_sync); + +static void xtensa_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + vaddr = bus_to_virt(dma_handle); + __invalidate_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + void *vaddr; + + switch (dir) { + case DMA_BIDIRECTIONAL: + case DMA_TO_DEVICE: + vaddr = bus_to_virt(dma_handle); + __flush_dcache_range((unsigned long)vaddr, size); + break; + + case DMA_NONE: + BUG(); + break; + + default: + break; + } +} + +static void xtensa_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_cpu(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + +static void xtensa_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_sync_single_for_device(dev, sg_dma_address(s), + sg_dma_len(s), dir); + } +} + /* * Note: We assume that the full memory space is always mapped to 'kseg' * Otherwise we have to use page attributes (not implemented). */ -void * -dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) +static void *xtensa_dma_alloc(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t flag, + struct dma_attrs *attrs) { unsigned long ret; unsigned long uncached = 0; @@ -52,20 +145,15 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); + uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; + *handle = virt_to_bus((void *)ret); + __invalidate_dcache_range(ret, size); - if (ret != 0) { - memset((void*) ret, 0, size); - uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR; - *handle = virt_to_bus((void*)ret); - __flush_invalidate_dcache_range(ret, size); - } - - return (void*)uncached; + return (void *)uncached; } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; @@ -75,24 +163,79 @@ void dma_free_coherent(struct device *hwdev, size_t size, free_pages(addr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); +static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t dma_handle = page_to_phys(page) + offset; + + BUG_ON(PageHighMem(page)); + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; +} -void consistent_sync(void *vaddr, size_t size, int direction) +static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { - switch (direction) { - case PCI_DMA_NONE: - BUG(); - case PCI_DMA_FROMDEVICE: /* invalidate only */ - __invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); +} - case PCI_DMA_TODEVICE: /* writeback only */ - case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_invalidate_dcache_range((unsigned long)vaddr, - (unsigned long)size); - break; +static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + } + return nents; +} + +static void xtensa_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + xtensa_unmap_page(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); } } -EXPORT_SYMBOL(consistent_sync); + +int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +struct dma_map_ops xtensa_dma_map_ops = { + .alloc = xtensa_dma_alloc, + .free = xtensa_dma_free, + .map_page = xtensa_map_page, + .unmap_page = xtensa_unmap_page, + .map_sg = xtensa_map_sg, + .unmap_sg = xtensa_unmap_sg, + .sync_single_for_cpu = xtensa_sync_single_for_cpu, + .sync_single_for_device = xtensa_sync_single_for_device, + .sync_sg_for_cpu = xtensa_sync_sg_for_cpu, + .sync_sg_for_device = xtensa_sync_sg_for_device, + .mapping_error = xtensa_dma_mapping_error, +}; +EXPORT_SYMBOL(xtensa_dma_map_ops); + +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init xtensa_dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + return 0; +} +fs_initcall(xtensa_dma_init); |