diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-10-20 05:55:56 +0200 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-20 05:55:56 +0200 |
commit | 73c926bee0e4b7739bbb992a0a3df561178dd522 (patch) | |
tree | 1b57464ef1a105911ddd9dab514e404fa2aa7cb2 /arch/sh/kernel/dma-nommu.c | |
parent | sh: Fix up cacheflush routine symbol exports. (diff) | |
download | linux-73c926bee0e4b7739bbb992a0a3df561178dd522.tar.xz linux-73c926bee0e4b7739bbb992a0a3df561178dd522.zip |
sh: Convert to asm-generic/dma-mapping-common.h
This converts the old DMA mapping support to the new generic
dma-mapping-common.h abstraction.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/dma-nommu.c')
-rw-r--r-- | arch/sh/kernel/dma-nommu.c | 76 |
1 files changed, 76 insertions, 0 deletions
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c new file mode 100644 index 000000000000..e88fcebf860c --- /dev/null +++ b/arch/sh/kernel/dma-nommu.c @@ -0,0 +1,76 @@ +/* + * DMA mapping support for platforms lacking IOMMUs. + * + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/dma-mapping.h> +#include <linux/io.h> + +static dma_addr_t nommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + dma_addr_t addr = page_to_phys(page) + offset; + + WARN_ON(size == 0); + dma_cache_sync(dev, page_address(page) + offset, size, dir); + + return addr; +} + +static int nommu_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct scatterlist *s; + int i; + + WARN_ON(nents == 0 || sg[0].length == 0); + + for_each_sg(sg, s, nents, i) { + BUG_ON(!sg_page(s)); + + dma_cache_sync(dev, sg_virt(s), s->length, dir); + + s->dma_address = sg_phys(s); + s->dma_length = s->length; + } + + return nents; +} + +static void nommu_sync_single(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + dma_cache_sync(dev, phys_to_virt(addr), size, dir); +} + +static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nelems, i) + dma_cache_sync(dev, sg_virt(s), s->length, dir); +} + +struct dma_map_ops nommu_dma_ops = { + .map_page = nommu_map_page, + .map_sg = nommu_map_sg, + .sync_single_for_device = nommu_sync_single, + .sync_sg_for_device = nommu_sync_sg, + .is_phys = 1, +}; + +void __init no_iommu_init(void) +{ + if (dma_ops) + return; + dma_ops = &nommu_dma_ops; +} |