summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2008-12-12 00:33:36 +0100
committerRalf Baechle <ralf@linux-mips.org>2009-01-11 10:57:24 +0100
commit843aef4930b9953c9ca624a990b201440304b56f (patch)
tree9debbaa7d9caa8c73db65ea2674e7ed26e285893 /arch
parentMIPS: Add Cavium OCTEON slot into proper tlb category. (diff)
downloadlinux-843aef4930b9953c9ca624a990b201440304b56f.tar.xz
linux-843aef4930b9953c9ca624a990b201440304b56f.zip
MIPS: Adjust the dma-common.c platform hooks.
We add a dev parameter to plat_unmap_dma_mem(), and hooks for plat_dma_supported() and plat_extra_sync_for_device() which should be nop changes for all existing targets. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-lemote/dma-coherence.h26
-rw-r--r--arch/mips/mm/dma-default.c25
6 files changed, 135 insertions, 20 deletions
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 76e04e7feb84..36c611b6c597 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index ed7e6222dc15..4c21bfca10c3 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & ~(0xffUL << 56);
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 1; /* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index a5511ebb2d53..7ae40f4b1c80 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return addr;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0; /* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index d66979a124a8..1c7cd27efa7b 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return vdma_log2phys(dma_addr);
}
-static void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
vdma_free(dma_addr);
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h
index 7e914777ebc4..38fad7dfe7da 100644
--- a/arch/mips/include/asm/mach-lemote/dma-coherence.h
+++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h
@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & 0x7fffffff;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e6708b3ad343..546e6977d4ff 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
- plat_unmap_dma_mem(dma_handle);
+ plat_unmap_dma_mem(dev, dma_handle);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{
unsigned long addr = (unsigned long) vaddr;
- plat_unmap_dma_mem(dma_handle);
+ plat_unmap_dma_mem(dev, dma_handle);
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
__dma_sync(dma_addr_to_virt(dma_addr), size,
direction);
- plat_unmap_dma_mem(dma_addr);
+ plat_unmap_dma_mem(dev, dma_addr);
}
EXPORT_SYMBOL(dma_unmap_single);
@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
dma_cache_wback_inv(addr, size);
}
- plat_unmap_dma_mem(dma_address);
+ plat_unmap_dma_mem(dev, dma_address);
}
EXPORT_SYMBOL(dma_unmap_page);
@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
if (addr)
__dma_sync(addr, sg->length, direction);
}
- plat_unmap_dma_mem(sg->dma_address);
+ plat_unmap_dma_mem(dev, sg->dma_address);
}
}
@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device);
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return 0;
+ return plat_dma_mapping_error(dev, dma_addr);
}
EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
+ return plat_dma_supported(dev, mask);
}
EXPORT_SYMBOL(dma_supported);
@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)vaddr, size, direction);
}