diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 21:32:08 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-21 21:32:08 +0100 |
commit | eae21770b4fed5597623aad0d618190fa60426ff (patch) | |
tree | 23c59fb7a33e93a79525e2b10d56df54d40049d1 /arch/cris | |
parent | Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | MAINTAINERS: add/fix git URLs for various subsystems (diff) | |
download | linux-eae21770b4fed5597623aad0d618190fa60426ff.tar.xz linux-eae21770b4fed5597623aad0d618190fa60426ff.zip |
Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton:
"I'm pretty much done for -rc1 now:
- the rest of MM, basically
- lib/ updates
- checkpatch, epoll, hfs, fatfs, ptrace, coredump, exit
- cpu_mask simplifications
- kexec, rapidio, MAINTAINERS etc, etc.
- more dma-mapping cleanups/simplifications from hch"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits)
MAINTAINERS: add/fix git URLs for various subsystems
mm: memcontrol: add "sock" to cgroup2 memory.stat
mm: memcontrol: basic memory statistics in cgroup2 memory controller
mm: memcontrol: do not uncharge old page in page cache replacement
Documentation: cgroup: add memory.swap.{current,max} description
mm: free swap cache aggressively if memcg swap is full
mm: vmscan: do not scan anon pages if memcg swap limit is hit
swap.h: move memcg related stuff to the end of the file
mm: memcontrol: replace mem_cgroup_lruvec_online with mem_cgroup_online
mm: vmscan: pass memcg to get_scan_count()
mm: memcontrol: charge swap to cgroup2
mm: memcontrol: clean up alloc, online, offline, free functions
mm: memcontrol: flatten struct cg_proto
mm: memcontrol: rein in the CONFIG space madness
net: drop tcp_memcontrol.c
mm: memcontrol: introduce CONFIG_MEMCG_LEGACY_KMEM
mm: memcontrol: allow to disable kmem accounting for cgroup2
mm: memcontrol: account "kmem" consumers in cgroup2 memory controller
mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
mm: memcontrol: separate kmem code from legacy tcp accounting code
...
Diffstat (limited to 'arch/cris')
-rw-r--r-- | arch/cris/arch-v32/drivers/pci/dma.c | 54 | ||||
-rw-r--r-- | arch/cris/include/asm/dma-mapping.h | 161 |
2 files changed, 49 insertions, 166 deletions
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index ee55578d9834..8d5efa58cce1 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c @@ -16,21 +16,18 @@ #include <linux/gfp.h> #include <asm/io.h> -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) +static void *v32_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret; - int order = get_order(size); + /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; - ret = (void *)__get_free_pages(gfp, order); + ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); @@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size, return ret; } -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) +static void v32_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, struct dma_attrs *attrs) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} + +static inline dma_addr_t v32_dma_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { - int order = get_order(size); + return page_to_phys(page) + offset; +} - if (!dma_release_from_coherent(dev, order, vaddr)) - free_pages((unsigned long)vaddr, order); +static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + printk("Map sg\n"); + return nents; +} + +static inline int v32_dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if (mask < 0x00ffffff) + return 0; + return 1; } +struct dma_map_ops v32_dma_ops = { + .alloc = v32_dma_alloc, + .free = v32_dma_free, + .map_page = v32_dma_map_page, + .map_sg = v32_dma_map_sg, + .dma_supported = v32_dma_supported, +}; +EXPORT_SYMBOL(v32_dma_ops); diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 57f794ee6039..5a370178a0e9 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -1,156 +1,20 @@ -/* DMA mapping. Nothing tricky here, just virt_to_phys */ - #ifndef _ASM_CRIS_DMA_MAPPING_H #define _ASM_CRIS_DMA_MAPPING_H -#include <linux/mm.h> -#include <linux/kernel.h> -#include <linux/scatterlist.h> - -#include <asm/cache.h> -#include <asm/io.h> - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #ifdef CONFIG_PCI -#include <asm-generic/dma-coherent.h> - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); +extern struct dma_map_ops v32_dma_ops; -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); -#else -static inline void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG(); - return NULL; + return &v32_dma_ops; } - -static inline void -dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) +#else +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - BUG(); + BUG(); + return NULL; } #endif -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - printk("Map sg\n"); - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - return page_to_phys(page) + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -} - -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static inline int -dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if(mask < 0x00ffffff) - return 0; - - return 1; -} - -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, @@ -158,15 +22,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - - #endif |