summaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r--arch/arm/plat-omap/dma.c102
1 files changed, 88 insertions, 14 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b53125f41293..d17375e06a1e 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -32,9 +32,9 @@
#include <asm/system.h>
#include <mach/hardware.h>
-#include <mach/dma.h>
+#include <plat/dma.h>
-#include <mach/tc.h>
+#include <plat/tc.h>
#undef DEBUG
@@ -54,6 +54,12 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
static int enable_1510_mode;
+static struct omap_dma_global_context_registers {
+ u32 dma_irqenable_l0;
+ u32 dma_ocp_sysconfig;
+ u32 dma_gcr;
+} omap_dma_global_context;
+
struct omap_dma_lch {
int next_lch;
int dev_id;
@@ -691,13 +697,16 @@ static inline void disable_lnk(int lch)
static inline void omap2_enable_irq_lch(int lch)
{
u32 val;
+ unsigned long flags;
if (!cpu_class_is_omap2())
return;
+ spin_lock_irqsave(&dma_chan_lock, flags);
val = dma_read(IRQENABLE_L0);
val |= 1 << lch;
dma_write(val, IRQENABLE_L0);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
}
int omap_request_dma(int dev_id, const char *dev_name,
@@ -799,10 +808,13 @@ void omap_free_dma(int lch)
if (cpu_class_is_omap2()) {
u32 val;
+
+ spin_lock_irqsave(&dma_chan_lock, flags);
/* Disable interrupts */
val = dma_read(IRQENABLE_L0);
val &= ~(1 << lch);
dma_write(val, IRQENABLE_L0);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
/* Clear the CSR register and IRQ status register */
dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
@@ -1108,6 +1120,14 @@ int omap_dma_running(void)
{
int lch;
+ /*
+ * On OMAP1510, internal LCD controller will start the transfer
+ * when it gets enabled, so assume DMA running if LCD enabled.
+ */
+ if (cpu_is_omap1510())
+ if (omap_readw(0xfffec000 + 0x00) & (1 << 0))
+ return 1;
+
/* Check if LCD DMA is running */
if (cpu_is_omap16xx())
if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
@@ -1232,7 +1252,7 @@ static void create_dma_lch_chain(int lch_head, int lch_queue)
* OMAP_DMA_DYNAMIC_CHAIN
* @params - Channel parameters
*
- * @return - Succes : 0
+ * @return - Success : 0
* Failure: -EINVAL/-ENOMEM
*/
int omap_request_dma_chain(int dev_id, const char *dev_name,
@@ -2341,44 +2361,83 @@ void omap_stop_lcd_dma(void)
}
EXPORT_SYMBOL(omap_stop_lcd_dma);
+void omap_dma_global_context_save(void)
+{
+ omap_dma_global_context.dma_irqenable_l0 =
+ dma_read(IRQENABLE_L0);
+ omap_dma_global_context.dma_ocp_sysconfig =
+ dma_read(OCP_SYSCONFIG);
+ omap_dma_global_context.dma_gcr = dma_read(GCR);
+}
+
+void omap_dma_global_context_restore(void)
+{
+ int ch;
+
+ dma_write(omap_dma_global_context.dma_gcr, GCR);
+ dma_write(omap_dma_global_context.dma_ocp_sysconfig,
+ OCP_SYSCONFIG);
+ dma_write(omap_dma_global_context.dma_irqenable_l0,
+ IRQENABLE_L0);
+
+ /*
+ * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
+ * after secure sram context save and restore. Hence we need to
+ * manually clear those IRQs to avoid spurious interrupts. This
+ * affects only secure devices.
+ */
+ if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+ dma_write(0x3 , IRQSTATUS_L0);
+
+ for (ch = 0; ch < dma_chan_count; ch++)
+ if (dma_chan[ch].dev_id != -1)
+ omap_clear_dma(ch);
+}
+
/*----------------------------------------------------------------------------*/
static int __init omap_init_dma(void)
{
+ unsigned long base;
int ch, r;
if (cpu_class_is_omap1()) {
- omap_dma_base = OMAP1_IO_ADDRESS(OMAP1_DMA_BASE);
+ base = OMAP1_DMA_BASE;
dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
} else if (cpu_is_omap24xx()) {
- omap_dma_base = OMAP2_IO_ADDRESS(OMAP24XX_DMA4_BASE);
+ base = OMAP24XX_DMA4_BASE;
dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
} else if (cpu_is_omap34xx()) {
- omap_dma_base = OMAP2_IO_ADDRESS(OMAP34XX_DMA4_BASE);
+ base = OMAP34XX_DMA4_BASE;
dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
} else if (cpu_is_omap44xx()) {
- omap_dma_base = OMAP2_IO_ADDRESS(OMAP44XX_DMA4_BASE);
+ base = OMAP44XX_DMA4_BASE;
dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
} else {
pr_err("DMA init failed for unsupported omap\n");
return -ENODEV;
}
+ omap_dma_base = ioremap(base, SZ_4K);
+ BUG_ON(!omap_dma_base);
+
if (cpu_class_is_omap2() && omap_dma_reserve_channels
&& (omap_dma_reserve_channels <= dma_lch_count))
dma_lch_count = omap_dma_reserve_channels;
dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
GFP_KERNEL);
- if (!dma_chan)
- return -ENOMEM;
+ if (!dma_chan) {
+ r = -ENOMEM;
+ goto out_unmap;
+ }
if (cpu_class_is_omap2()) {
dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
dma_lch_count, GFP_KERNEL);
if (!dma_linked_lch) {
- kfree(dma_chan);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_free;
}
}
@@ -2452,7 +2511,7 @@ static int __init omap_init_dma(void)
for (i = 0; i < ch; i++)
free_irq(omap1_dma_irq[i],
(void *) (i + 1));
- return r;
+ goto out_free;
}
}
}
@@ -2470,8 +2529,8 @@ static int __init omap_init_dma(void)
setup_irq(irq, &omap24xx_dma_irq);
}
- /* Enable smartidle idlemodes and autoidle */
if (cpu_is_omap34xx()) {
+ /* Enable smartidle idlemodes and autoidle */
u32 v = dma_read(OCP_SYSCONFIG);
v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
DMA_SYSCONFIG_SIDLEMODE_MASK |
@@ -2480,6 +2539,13 @@ static int __init omap_init_dma(void)
DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
DMA_SYSCONFIG_AUTOIDLE);
dma_write(v , OCP_SYSCONFIG);
+ /* reserve dma channels 0 and 1 in high security devices */
+ if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
+ printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
+ "HS ROM code\n");
+ dma_chan[0].dev_id = 0;
+ dma_chan[1].dev_id = 1;
+ }
}
@@ -2494,11 +2560,19 @@ static int __init omap_init_dma(void)
"(error %d)\n", r);
for (i = 0; i < dma_chan_count; i++)
free_irq(omap1_dma_irq[i], (void *) (i + 1));
- return r;
+ goto out_free;
}
}
return 0;
+
+out_free:
+ kfree(dma_chan);
+
+out_unmap:
+ iounmap(omap_dma_base);
+
+ return r;
}
arch_initcall(omap_init_dma);