diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/imx-sdma.c | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 52 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 3 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 24 | ||||
-rw-r--r-- | drivers/dma/omap-dma.c | 659 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 4 |
9 files changed, 663 insertions, 93 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index edc739f986a8..ba06d1d2f99e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -346,6 +346,7 @@ config MOXART_DMA tristate "MOXART DMA support" depends on ARCH_MOXART select DMA_ENGINE + select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the MOXA ART SoC DMA controller. diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4e7918339b12..19041cefabb1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -449,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = { { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, + { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdma_dt_ids); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 87529181efcc..4e3549a16132 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { chan = ioat_chan_by_index(instance, bit); - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { struct ioat_chan_common *chan = data; - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; } @@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c chan->timer.function = device->timer_fn; chan->timer.data = data; tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); - tasklet_disable(&chan->cleanup_task); } /** @@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - tasklet_enable(&chan->cleanup_task); + set_bit(IOAT_RUN, &chan->state); ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", __func__, ioat->desccount); return ioat->desccount; } +void ioat_stop(struct ioat_chan_common *chan) +{ + struct ioatdma_device *device = chan->device; + struct pci_dev *pdev = device->pdev; + int chan_id = chan_num(chan); + struct msix_entry *msix; + + /* 1/ stop irq from firing tasklets + * 2/ stop the tasklet from re-arming irqs + */ + clear_bit(IOAT_RUN, &chan->state); + + /* flush inflight interrupts */ + switch (device->irq_mode) { + case IOAT_MSIX: + msix = &device->msix_entries[chan_id]; + synchronize_irq(msix->vector); + break; + case IOAT_MSI: + case IOAT_INTX: + synchronize_irq(pdev->irq); + break; + default: + break; + } + + /* flush inflight timers */ + del_timer_sync(&chan->timer); + + /* flush inflight tasklet runs */ + tasklet_kill(&chan->cleanup_task); + + /* final cleanup now that everything is quiesced and can't re-arm */ + device->cleanup_fn((unsigned long) &chan->common); +} + /** * ioat1_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned @@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) if (ioat->desccount == 0) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - ioat1_cleanup(ioat); + ioat_stop(chan); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, static void ioat1_cleanup_event(unsigned long data) { struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat1_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 11fb877ddca9..e982f00a9843 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *device); int ioat_dma_setup_interrupts(struct ioatdma_device *device); +void ioat_stop(struct ioat_chan_common *chan); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5d3affe7e976..8d1058085eeb 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) void ioat2_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat2_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->issued = 0; ioat->tail = 0; ioat->alloc_order = order; + set_bit(IOAT_RUN, &chan->state); spin_unlock_bh(&ioat->prep_lock); spin_unlock_bh(&chan->cleanup_lock); - tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); /* check that we got off the ground */ @@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); if (is_ioat_active(status) || is_ioat_idle(status)) { - set_bit(IOAT_RUN, &chan->state); return 1 << ioat->alloc_order; } else { u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); @@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) if (!ioat->ring) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - device->cleanup_fn((unsigned long) c); + ioat_stop(chan); device->reset_hw(chan); - clear_bit(IOAT_RUN, &chan->state); spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 820817e97e62..b9b38a1cf92f 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) static void ioat3_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat3_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 53fb0c8365b0..766b68ed505c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) if (!mv_can_chain(grp_start)) goto submit_done; - dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", - old_chain_tail->async_tx.phys); + dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", + &old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); @@ -527,7 +527,8 @@ submit_done: /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { - char *hw_desc; + void *virt_desc; + dma_addr_t dma_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; @@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) " %d descriptor slots", idx); break; } - hw_desc = (char *) mv_chan->dma_desc_pool_virt; - slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + virt_desc = mv_chan->dma_desc_pool_virt; + slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); - hw_desc = (char *) mv_chan->dma_desc_pool; - slot->async_tx.phys = - (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + dma_desc = mv_chan->dma_desc_pool; + slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); @@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, int slot_cnt; dev_dbg(mv_chan_to_devp(mv_chan), - "%s dest: %x src %x len: %u flags: %ld\n", - __func__, dest, src, len, flags); + "%s dest: %pad src %pad len: %u flags: %ld\n", + __func__, &dest, &src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; @@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan_to_devp(mv_chan), - "%s src_cnt: %d len: dest %x %u flags: %ld\n", - __func__, src_cnt, len, dest, flags); + "%s src_cnt: %d len: %u dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 928a0fa39f89..b19f04f4390b 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -5,6 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/err.h> @@ -26,11 +27,21 @@ struct omap_dmadev { spinlock_t lock; struct tasklet_struct task; struct list_head pending; + void __iomem *base; + const struct omap_dma_reg *reg_map; + struct omap_system_dma_plat_info *plat; + bool legacy; + spinlock_t irq_lock; + uint32_t irq_enable_mask; + struct omap_chan *lch_map[32]; }; struct omap_chan { struct virt_dma_chan vc; struct list_head node; + void __iomem *channel_base; + const struct omap_dma_reg *reg_map; + uint32_t ccr; struct dma_slave_config cfg; unsigned dma_sig; @@ -54,19 +65,93 @@ struct omap_desc { dma_addr_t dev_addr; int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ - uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ - uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ - uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ - uint8_t periph_port; /* Peripheral port */ + uint8_t es; /* CSDP_DATA_TYPE_xxx */ + uint32_t ccr; /* CCR value */ + uint16_t clnk_ctrl; /* CLNK_CTRL value */ + uint16_t cicr; /* CICR value */ + uint32_t csdp; /* CSDP value */ unsigned sglen; struct omap_sg sg[0]; }; +enum { + CCR_FS = BIT(5), + CCR_READ_PRIORITY = BIT(6), + CCR_ENABLE = BIT(7), + CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ + CCR_REPEAT = BIT(9), /* OMAP1 only */ + CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ + CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ + CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ + CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ + CCR_SRC_AMODE_CONSTANT = 0 << 12, + CCR_SRC_AMODE_POSTINC = 1 << 12, + CCR_SRC_AMODE_SGLIDX = 2 << 12, + CCR_SRC_AMODE_DBLIDX = 3 << 12, + CCR_DST_AMODE_CONSTANT = 0 << 14, + CCR_DST_AMODE_POSTINC = 1 << 14, + CCR_DST_AMODE_SGLIDX = 2 << 14, + CCR_DST_AMODE_DBLIDX = 3 << 14, + CCR_CONSTANT_FILL = BIT(16), + CCR_TRANSPARENT_COPY = BIT(17), + CCR_BS = BIT(18), + CCR_SUPERVISOR = BIT(22), + CCR_PREFETCH = BIT(23), + CCR_TRIGGER_SRC = BIT(24), + CCR_BUFFERING_DISABLE = BIT(25), + CCR_WRITE_PRIORITY = BIT(26), + CCR_SYNC_ELEMENT = 0, + CCR_SYNC_FRAME = CCR_FS, + CCR_SYNC_BLOCK = CCR_BS, + CCR_SYNC_PACKET = CCR_BS | CCR_FS, + + CSDP_DATA_TYPE_8 = 0, + CSDP_DATA_TYPE_16 = 1, + CSDP_DATA_TYPE_32 = 2, + CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ + CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ + CSDP_SRC_PACKED = BIT(6), + CSDP_SRC_BURST_1 = 0 << 7, + CSDP_SRC_BURST_16 = 1 << 7, + CSDP_SRC_BURST_32 = 2 << 7, + CSDP_SRC_BURST_64 = 3 << 7, + CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ + CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ + CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ + CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ + CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ + CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ + CSDP_DST_PACKED = BIT(13), + CSDP_DST_BURST_1 = 0 << 14, + CSDP_DST_BURST_16 = 1 << 14, + CSDP_DST_BURST_32 = 2 << 14, + CSDP_DST_BURST_64 = 3 << 14, + + CICR_TOUT_IE = BIT(0), /* OMAP1 only */ + CICR_DROP_IE = BIT(1), + CICR_HALF_IE = BIT(2), + CICR_FRAME_IE = BIT(3), + CICR_LAST_IE = BIT(4), + CICR_BLOCK_IE = BIT(5), + CICR_PKT_IE = BIT(7), /* OMAP2+ only */ + CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ + CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ + CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ + CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ + CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ + + CLNK_CTRL_ENABLE_LNK = BIT(15), +}; + static const unsigned es_bytes[] = { - [OMAP_DMA_DATA_TYPE_S8] = 1, - [OMAP_DMA_DATA_TYPE_S16] = 2, - [OMAP_DMA_DATA_TYPE_S32] = 4, + [CSDP_DATA_TYPE_8] = 1, + [CSDP_DATA_TYPE_16] = 2, + [CSDP_DATA_TYPE_32] = 4, }; static struct of_dma_filter_info omap_dma_info = { @@ -93,28 +178,214 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd) kfree(container_of(vd, struct omap_desc, vd)); } +static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) +{ + switch (type) { + case OMAP_DMA_REG_16BIT: + writew_relaxed(val, addr); + break; + case OMAP_DMA_REG_2X16BIT: + writew_relaxed(val, addr); + writew_relaxed(val >> 16, addr + 2); + break; + case OMAP_DMA_REG_32BIT: + writel_relaxed(val, addr); + break; + default: + WARN_ON(1); + } +} + +static unsigned omap_dma_read(unsigned type, void __iomem *addr) +{ + unsigned val; + + switch (type) { + case OMAP_DMA_REG_16BIT: + val = readw_relaxed(addr); + break; + case OMAP_DMA_REG_2X16BIT: + val = readw_relaxed(addr); + val |= readw_relaxed(addr + 2) << 16; + break; + case OMAP_DMA_REG_32BIT: + val = readl_relaxed(addr); + break; + default: + WARN_ON(1); + val = 0; + } + + return val; +} + +static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) +{ + const struct omap_dma_reg *r = od->reg_map + reg; + + WARN_ON(r->stride); + + omap_dma_write(val, r->type, od->base + r->offset); +} + +static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) +{ + const struct omap_dma_reg *r = od->reg_map + reg; + + WARN_ON(r->stride); + + return omap_dma_read(r->type, od->base + r->offset); +} + +static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) +{ + const struct omap_dma_reg *r = c->reg_map + reg; + + omap_dma_write(val, r->type, c->channel_base + r->offset); +} + +static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) +{ + const struct omap_dma_reg *r = c->reg_map + reg; + + return omap_dma_read(r->type, c->channel_base + r->offset); +} + +static void omap_dma_clear_csr(struct omap_chan *c) +{ + if (dma_omap1()) + omap_dma_chan_read(c, CSR); + else + omap_dma_chan_write(c, CSR, ~0); +} + +static unsigned omap_dma_get_csr(struct omap_chan *c) +{ + unsigned val = omap_dma_chan_read(c, CSR); + + if (!dma_omap1()) + omap_dma_chan_write(c, CSR, val); + + return val; +} + +static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, + unsigned lch) +{ + c->channel_base = od->base + od->plat->channel_stride * lch; + + od->lch_map[lch] = c; +} + +static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + + if (__dma_omap15xx(od->plat->dma_attr)) + omap_dma_chan_write(c, CPC, 0); + else + omap_dma_chan_write(c, CDAC, 0); + + omap_dma_clear_csr(c); + + /* Enable interrupts */ + omap_dma_chan_write(c, CICR, d->cicr); + + /* Enable channel */ + omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); +} + +static void omap_dma_stop(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint32_t val; + + /* disable irq */ + omap_dma_chan_write(c, CICR, 0); + + omap_dma_clear_csr(c); + + val = omap_dma_chan_read(c, CCR); + if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { + uint32_t sysconfig; + unsigned i; + + sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); + val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; + val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); + omap_dma_glbl_write(od, OCP_SYSCONFIG, val); + + val = omap_dma_chan_read(c, CCR); + val &= ~CCR_ENABLE; + omap_dma_chan_write(c, CCR, val); + + /* Wait for sDMA FIFO to drain */ + for (i = 0; ; i++) { + val = omap_dma_chan_read(c, CCR); + if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) + break; + + if (i > 100) + break; + + udelay(5); + } + + if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) + dev_err(c->vc.chan.device->dev, + "DMA drain did not complete on lch %d\n", + c->dma_ch); + + omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); + } else { + val &= ~CCR_ENABLE; + omap_dma_chan_write(c, CCR, val); + } + + mb(); + + if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { + val = omap_dma_chan_read(c, CLNK_CTRL); + + if (dma_omap1()) + val |= 1 << 14; /* set the STOP_LNK bit */ + else + val &= ~CLNK_CTRL_ENABLE_LNK; + + omap_dma_chan_write(c, CLNK_CTRL, val); + } +} + static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, unsigned idx) { struct omap_sg *sg = d->sg + idx; + unsigned cxsa, cxei, cxfi; - if (d->dir == DMA_DEV_TO_MEM) - omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, - OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); - else - omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, - OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + if (d->dir == DMA_DEV_TO_MEM) { + cxsa = CDSA; + cxei = CDEI; + cxfi = CDFI; + } else { + cxsa = CSSA; + cxei = CSEI; + cxfi = CSFI; + } - omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, - d->sync_mode, c->dma_sig, d->sync_type); + omap_dma_chan_write(c, cxsa, sg->addr); + omap_dma_chan_write(c, cxei, 0); + omap_dma_chan_write(c, cxfi, 0); + omap_dma_chan_write(c, CEN, sg->en); + omap_dma_chan_write(c, CFN, sg->fn); - omap_start_dma(c->dma_ch); + omap_dma_start(c, d); } static void omap_dma_start_desc(struct omap_chan *c) { struct virt_dma_desc *vd = vchan_next_desc(&c->vc); struct omap_desc *d; + unsigned cxsa, cxei, cxfi; if (!vd) { c->desc = NULL; @@ -126,12 +397,32 @@ static void omap_dma_start_desc(struct omap_chan *c) c->desc = d = to_omap_dma_desc(&vd->tx); c->sgidx = 0; - if (d->dir == DMA_DEV_TO_MEM) - omap_set_dma_src_params(c->dma_ch, d->periph_port, - OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); - else - omap_set_dma_dest_params(c->dma_ch, d->periph_port, - OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + /* + * This provides the necessary barrier to ensure data held in + * DMA coherent memory is visible to the DMA engine prior to + * the transfer starting. + */ + mb(); + + omap_dma_chan_write(c, CCR, d->ccr); + if (dma_omap1()) + omap_dma_chan_write(c, CCR2, d->ccr >> 16); + + if (d->dir == DMA_DEV_TO_MEM) { + cxsa = CSSA; + cxei = CSEI; + cxfi = CSFI; + } else { + cxsa = CDSA; + cxei = CDEI; + cxfi = CDFI; + } + + omap_dma_chan_write(c, cxsa, d->dev_addr); + omap_dma_chan_write(c, cxei, 0); + omap_dma_chan_write(c, cxfi, d->fi); + omap_dma_chan_write(c, CSDP, d->csdp); + omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); omap_dma_start_sg(c, d, 0); } @@ -186,24 +477,118 @@ static void omap_dma_sched(unsigned long data) } } +static irqreturn_t omap_dma_irq(int irq, void *devid) +{ + struct omap_dmadev *od = devid; + unsigned status, channel; + + spin_lock(&od->irq_lock); + + status = omap_dma_glbl_read(od, IRQSTATUS_L1); + status &= od->irq_enable_mask; + if (status == 0) { + spin_unlock(&od->irq_lock); + return IRQ_NONE; + } + + while ((channel = ffs(status)) != 0) { + unsigned mask, csr; + struct omap_chan *c; + + channel -= 1; + mask = BIT(channel); + status &= ~mask; + + c = od->lch_map[channel]; + if (c == NULL) { + /* This should never happen */ + dev_err(od->ddev.dev, "invalid channel %u\n", channel); + continue; + } + + csr = omap_dma_get_csr(c); + omap_dma_glbl_write(od, IRQSTATUS_L1, mask); + + omap_dma_callback(channel, csr, c); + } + + spin_unlock(&od->irq_lock); + + return IRQ_HANDLED; +} + static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + if (od->legacy) { + ret = omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); + } else { + ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, + &c->dma_ch); + } + + dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", + c->dma_ch, c->dma_sig); - dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + if (ret >= 0) { + omap_dma_assign(od, c, c->dma_ch); - return omap_request_dma(c->dma_sig, "DMA engine", - omap_dma_callback, c, &c->dma_ch); + if (!od->legacy) { + unsigned val; + + spin_lock_irq(&od->irq_lock); + val = BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQSTATUS_L1, val); + od->irq_enable_mask |= val; + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + + val = omap_dma_glbl_read(od, IRQENABLE_L0); + val &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L0, val); + spin_unlock_irq(&od->irq_lock); + } + } + + if (dma_omap1()) { + if (__dma_omap16xx(od->plat->dma_attr)) { + c->ccr = CCR_OMAP31_DISABLE; + /* Duplicate what plat-omap/dma.c does */ + c->ccr |= c->dma_ch + 1; + } else { + c->ccr = c->dma_sig & 0x1f; + } + } else { + c->ccr = c->dma_sig & 0x1f; + c->ccr |= (c->dma_sig & ~0x1f) << 14; + } + if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) + c->ccr |= CCR_BUFFERING_DISABLE; + + return ret; } static void omap_dma_free_chan_resources(struct dma_chan *chan) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); + if (!od->legacy) { + spin_lock_irq(&od->irq_lock); + od->irq_enable_mask &= ~BIT(c->dma_ch); + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); + spin_unlock_irq(&od->irq_lock); + } + + c->channel_base = NULL; + od->lch_map[c->dma_ch] = NULL; vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); + dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); } static size_t omap_dma_sg_size(struct omap_sg *sg) @@ -239,6 +624,74 @@ static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) return size; } +/* + * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is + * read before the DMA controller finished disabling the channel. + */ +static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint32_t val; + + val = omap_dma_chan_read(c, reg); + if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) + val = omap_dma_chan_read(c, reg); + + return val; +} + +static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + dma_addr_t addr, cdac; + + if (__dma_omap15xx(od->plat->dma_attr)) { + addr = omap_dma_chan_read(c, CPC); + } else { + addr = omap_dma_chan_read_3_3(c, CSAC); + cdac = omap_dma_chan_read_3_3(c, CDAC); + + /* + * CDAC == 0 indicates that the DMA transfer on the channel has + * not been started (no data has been transferred so far). + * Return the programmed source start address in this case. + */ + if (cdac == 0) + addr = omap_dma_chan_read(c, CSSA); + } + + if (dma_omap1()) + addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; + + return addr; +} + +static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) +{ + struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + dma_addr_t addr; + + if (__dma_omap15xx(od->plat->dma_attr)) { + addr = omap_dma_chan_read(c, CPC); + } else { + addr = omap_dma_chan_read_3_3(c, CDAC); + + /* + * CDAC == 0 indicates that the DMA transfer on the channel + * has not been started (no data has been transferred so + * far). Return the programmed destination start address in + * this case. + */ + if (addr == 0) + addr = omap_dma_chan_read(c, CDSA); + } + + if (dma_omap1()) + addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; + + return addr; +} + static enum dma_status omap_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { @@ -260,9 +713,9 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, dma_addr_t pos; if (d->dir == DMA_MEM_TO_DEV) - pos = omap_get_dma_src_pos(c->dma_ch); + pos = omap_dma_get_src_pos(c); else if (d->dir == DMA_DEV_TO_MEM) - pos = omap_get_dma_dst_pos(c->dma_ch); + pos = omap_dma_get_dst_pos(c); else pos = 0; @@ -304,24 +757,23 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); enum dma_slave_buswidth dev_width; struct scatterlist *sgent; struct omap_desc *d; dma_addr_t dev_addr; - unsigned i, j = 0, es, en, frame_bytes, sync_type; + unsigned i, j = 0, es, en, frame_bytes; u32 burst; if (dir == DMA_DEV_TO_MEM) { dev_addr = c->cfg.src_addr; dev_width = c->cfg.src_addr_width; burst = c->cfg.src_maxburst; - sync_type = OMAP_DMA_SRC_SYNC; } else if (dir == DMA_MEM_TO_DEV) { dev_addr = c->cfg.dst_addr; dev_width = c->cfg.dst_addr_width; burst = c->cfg.dst_maxburst; - sync_type = OMAP_DMA_DST_SYNC; } else { dev_err(chan->device->dev, "%s: bad direction?\n", __func__); return NULL; @@ -330,13 +782,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( /* Bus width translates to the element size (ES) */ switch (dev_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: - es = OMAP_DMA_DATA_TYPE_S8; + es = CSDP_DATA_TYPE_8; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: - es = OMAP_DMA_DATA_TYPE_S16; + es = CSDP_DATA_TYPE_16; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: - es = OMAP_DMA_DATA_TYPE_S32; + es = CSDP_DATA_TYPE_32; break; default: /* not reached */ return NULL; @@ -350,9 +802,31 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->dir = dir; d->dev_addr = dev_addr; d->es = es; - d->sync_mode = OMAP_DMA_SYNC_FRAME; - d->sync_type = sync_type; - d->periph_port = OMAP_DMA_PORT_TIPB; + + d->ccr = c->ccr | CCR_SYNC_FRAME; + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; + else + d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; + + d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; + d->csdp = es; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + + if (dir == DMA_DEV_TO_MEM) + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; + else + d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; + } else { + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_TRIGGER_SRC; + + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; + } + if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) + d->clnk_ctrl = c->dma_ch; /* * Build our scatterlist entries: each contains the address, @@ -382,23 +856,22 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( size_t period_len, enum dma_transfer_direction dir, unsigned long flags, void *context) { + struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); enum dma_slave_buswidth dev_width; struct omap_desc *d; dma_addr_t dev_addr; - unsigned es, sync_type; + unsigned es; u32 burst; if (dir == DMA_DEV_TO_MEM) { dev_addr = c->cfg.src_addr; dev_width = c->cfg.src_addr_width; burst = c->cfg.src_maxburst; - sync_type = OMAP_DMA_SRC_SYNC; } else if (dir == DMA_MEM_TO_DEV) { dev_addr = c->cfg.dst_addr; dev_width = c->cfg.dst_addr_width; burst = c->cfg.dst_maxburst; - sync_type = OMAP_DMA_DST_SYNC; } else { dev_err(chan->device->dev, "%s: bad direction?\n", __func__); return NULL; @@ -407,13 +880,13 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( /* Bus width translates to the element size (ES) */ switch (dev_width) { case DMA_SLAVE_BUSWIDTH_1_BYTE: - es = OMAP_DMA_DATA_TYPE_S8; + es = CSDP_DATA_TYPE_8; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: - es = OMAP_DMA_DATA_TYPE_S16; + es = CSDP_DATA_TYPE_16; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: - es = OMAP_DMA_DATA_TYPE_S32; + es = CSDP_DATA_TYPE_32; break; default: /* not reached */ return NULL; @@ -428,32 +901,51 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( d->dev_addr = dev_addr; d->fi = burst; d->es = es; - if (burst) - d->sync_mode = OMAP_DMA_SYNC_PACKET; - else - d->sync_mode = OMAP_DMA_SYNC_ELEMENT; - d->sync_type = sync_type; - d->periph_port = OMAP_DMA_PORT_MPUI; d->sg[0].addr = buf_addr; d->sg[0].en = period_len / es_bytes[es]; d->sg[0].fn = buf_len / period_len; d->sglen = 1; - if (!c->cyclic) { - c->cyclic = true; - omap_dma_link_lch(c->dma_ch, c->dma_ch); + d->ccr = c->ccr; + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; + else + d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; - if (flags & DMA_PREP_INTERRUPT) - omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + d->cicr = CICR_DROP_IE; + if (flags & DMA_PREP_INTERRUPT) + d->cicr |= CICR_FRAME_IE; - omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); - } + d->csdp = es; + + if (dma_omap1()) { + d->cicr |= CICR_TOUT_IE; + + if (dir == DMA_DEV_TO_MEM) + d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; + else + d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; + } else { + if (burst) + d->ccr |= CCR_SYNC_PACKET; + else + d->ccr |= CCR_SYNC_ELEMENT; + + if (dir == DMA_DEV_TO_MEM) + d->ccr |= CCR_TRIGGER_SRC; + + d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; - if (dma_omap2plus()) { - omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); - omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; } + if (__dma_omap15xx(od->plat->dma_attr)) + d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; + else + d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; + + c->cyclic = true; + return vchan_tx_prep(&c->vc, &d->vd, flags); } @@ -483,20 +975,19 @@ static int omap_dma_terminate_all(struct omap_chan *c) /* * Stop DMA activity: we assume the callback will not be called - * after omap_stop_dma() returns (even if it does, it will see + * after omap_dma_stop() returns (even if it does, it will see * c->desc is NULL and exit.) */ if (c->desc) { c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) - omap_stop_dma(c->dma_ch); + omap_dma_stop(c); } if (c->cyclic) { c->cyclic = false; c->paused = false; - omap_dma_unlink_lch(c->dma_ch, c->dma_ch); } vchan_get_all_descriptors(&c->vc, &head); @@ -513,7 +1004,7 @@ static int omap_dma_pause(struct omap_chan *c) return -EINVAL; if (!c->paused) { - omap_stop_dma(c->dma_ch); + omap_dma_stop(c); c->paused = true; } @@ -527,7 +1018,7 @@ static int omap_dma_resume(struct omap_chan *c) return -EINVAL; if (c->paused) { - omap_start_dma(c->dma_ch); + omap_dma_start(c, c->desc); c->paused = false; } @@ -573,6 +1064,7 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) if (!c) return -ENOMEM; + c->reg_map = od->reg_map; c->dma_sig = dma_sig; c->vc.desc_free = omap_dma_desc_free; vchan_init(&c->vc, &od->ddev); @@ -594,7 +1086,6 @@ static void omap_dma_free(struct omap_dmadev *od) tasklet_kill(&c->vc.task); kfree(c); } - kfree(od); } #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ @@ -617,12 +1108,24 @@ static int omap_dma_device_slave_caps(struct dma_chan *dchan, static int omap_dma_probe(struct platform_device *pdev) { struct omap_dmadev *od; - int rc, i; + struct resource *res; + int rc, i, irq; - od = kzalloc(sizeof(*od), GFP_KERNEL); + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + od->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(od->base)) + return PTR_ERR(od->base); + + od->plat = omap_get_plat_info(); + if (!od->plat) + return -EPROBE_DEFER; + + od->reg_map = od->plat->reg_map; + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; @@ -637,6 +1140,7 @@ static int omap_dma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&od->ddev.channels); INIT_LIST_HEAD(&od->pending); spin_lock_init(&od->lock); + spin_lock_init(&od->irq_lock); tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); @@ -648,6 +1152,21 @@ static int omap_dma_probe(struct platform_device *pdev) } } + irq = platform_get_irq(pdev, 1); + if (irq <= 0) { + dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); + od->legacy = true; + } else { + /* Disable all interrupts */ + od->irq_enable_mask = 0; + omap_dma_glbl_write(od, IRQENABLE_L1, 0); + + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, + IRQF_SHARED, "omap-dma-engine", od); + if (rc) + return rc; + } + rc = dma_async_device_register(&od->ddev); if (rc) { pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", @@ -684,6 +1203,12 @@ static int omap_dma_remove(struct platform_device *pdev) of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&od->ddev); + + if (!od->legacy) { + /* Disable all interrupts */ + omap_dma_glbl_write(od, IRQENABLE_L0, 0); + } + omap_dma_free(od); return 0; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 00a2de957b23..bf18c786ed40 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data) struct d40_chan *d40c = (struct d40_chan *) data; struct d40_desc *d40d; unsigned long flags; + bool callback_active; dma_async_tx_callback callback; void *callback_param; @@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data) } /* Callback to client */ + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; @@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data) spin_unlock_irqrestore(&d40c->lock, flags); - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) + if (callback_active && callback) callback(callback_param); return; |