diff options
Diffstat (limited to 'drivers/dma/dw-edma/dw-edma-core.c')
-rw-r--r-- | drivers/dma/dw-edma/dw-edma-core.c | 178 |
1 files changed, 113 insertions, 65 deletions
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index 08d71dafa001..53289927dd0d 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -81,8 +81,13 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) * - Even chunks originate CB equal to 1 */ chunk->cb = !(desc->chunks_alloc % 2); - chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; - chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; + if (chan->dir == EDMA_DIR_WRITE) { + chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr; + chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr; + } else { + chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr; + chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr; + } if (desc->chunk) { /* Create and add new element into the linked list */ @@ -329,22 +334,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) struct dw_edma_chunk *chunk; struct dw_edma_burst *burst; struct dw_edma_desc *desc; - u32 cnt; + u32 cnt = 0; int i; if (!chan->configured) return NULL; switch (chan->config.direction) { - case DMA_DEV_TO_MEM: /* local dma */ + case DMA_DEV_TO_MEM: /* local DMA */ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ) break; return NULL; - case DMA_MEM_TO_DEV: /* local dma */ + case DMA_MEM_TO_DEV: /* local DMA */ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) break; return NULL; - default: /* remote dma */ + default: /* remote DMA */ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ) break; if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE) @@ -352,12 +357,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) return NULL; } - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) return NULL; - } else { + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { if (xfer->xfer.sg.len < 1) return NULL; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (!xfer->xfer.il->numf) + return NULL; + if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0) + return NULL; + } else { + return NULL; } desc = dw_edma_alloc_desc(chan); @@ -368,18 +380,28 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (unlikely(!chunk)) goto err_alloc; - src_addr = chan->config.src_addr; - dst_addr = chan->config.dst_addr; + if (xfer->type == EDMA_XFER_INTERLEAVED) { + src_addr = xfer->xfer.il->src_start; + dst_addr = xfer->xfer.il->dst_start; + } else { + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + } - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { cnt = xfer->xfer.cyclic.cnt; - } else { + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { cnt = xfer->xfer.sg.len; sg = xfer->xfer.sg.sgl; + } else if (xfer->type == EDMA_XFER_INTERLEAVED) { + if (xfer->xfer.il->numf > 0) + cnt = xfer->xfer.il->numf; + else + cnt = xfer->xfer.il->frame_size; } for (i = 0; i < cnt; i++) { - if (!xfer->cyclic && !sg) + if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg) break; if (chunk->bursts_alloc == chan->ll_max) { @@ -392,20 +414,23 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (unlikely(!burst)) goto err_alloc; - if (xfer->cyclic) + if (xfer->type == EDMA_XFER_CYCLIC) burst->sz = xfer->xfer.cyclic.len; - else + else if (xfer->type == EDMA_XFER_SCATTER_GATHER) burst->sz = sg_dma_len(sg); + else if (xfer->type == EDMA_XFER_INTERLEAVED) + burst->sz = xfer->xfer.il->sgl[i].size; chunk->ll_region.sz += burst->sz; desc->alloc_sz += burst->sz; if (chan->dir == EDMA_DIR_WRITE) { burst->sar = src_addr; - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { burst->dar = xfer->xfer.cyclic.paddr; - } else { - burst->dar = dst_addr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + src_addr += sg_dma_len(sg); + burst->dar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -416,10 +441,11 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) } } else { burst->dar = dst_addr; - if (xfer->cyclic) { + if (xfer->type == EDMA_XFER_CYCLIC) { burst->sar = xfer->xfer.cyclic.paddr; - } else { - burst->sar = src_addr; + } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) { + dst_addr += sg_dma_len(sg); + burst->sar = sg_dma_address(sg); /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -430,10 +456,22 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) } } - if (!xfer->cyclic) { - src_addr += sg_dma_len(sg); - dst_addr += sg_dma_len(sg); + if (xfer->type == EDMA_XFER_SCATTER_GATHER) { sg = sg_next(sg); + } else if (xfer->type == EDMA_XFER_INTERLEAVED && + xfer->xfer.il->frame_size > 0) { + struct dma_interleaved_template *il = xfer->xfer.il; + struct data_chunk *dc = &il->sgl[i]; + + if (il->src_sgl) { + src_addr += burst->sz; + src_addr += dmaengine_get_src_icg(il, dc); + } + + if (il->dst_sgl) { + dst_addr += burst->sz; + dst_addr += dmaengine_get_dst_icg(il, dc); + } } } @@ -459,7 +497,7 @@ dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, xfer.xfer.sg.sgl = sgl; xfer.xfer.sg.len = len; xfer.flags = flags; - xfer.cyclic = false; + xfer.type = EDMA_XFER_SCATTER_GATHER; return dw_edma_device_transfer(&xfer); } @@ -478,7 +516,23 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, xfer.xfer.cyclic.len = len; xfer.xfer.cyclic.cnt = count; xfer.flags = flags; - xfer.cyclic = true; + xfer.type = EDMA_XFER_CYCLIC; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan, + struct dma_interleaved_template *ilt, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = ilt->dir; + xfer.xfer.il = ilt; + xfer.flags = flags; + xfer.type = EDMA_XFER_INTERLEAVED; return dw_edma_device_transfer(&xfer); } @@ -642,24 +696,13 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, struct device *dev = chip->dev; struct dw_edma *dw = chip->dw; struct dw_edma_chan *chan; - size_t ll_chunk, dt_chunk; struct dw_edma_irq *irq; struct dma_device *dma; - u32 i, j, cnt, ch_cnt; u32 alloc, off_alloc; + u32 i, j, cnt; int err = 0; u32 pos; - ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; - ll_chunk = dw->ll_region.sz; - dt_chunk = dw->dt_region.sz; - - /* Calculate linked list chunk for each channel */ - ll_chunk /= roundup_pow_of_two(ch_cnt); - - /* Calculate linked list chunk for each channel */ - dt_chunk /= roundup_pow_of_two(ch_cnt); - if (write) { i = 0; cnt = dw->wr_ch_cnt; @@ -691,14 +734,14 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, chan->request = EDMA_REQ_NONE; chan->status = EDMA_ST_IDLE; - chan->ll_off = (ll_chunk * i); - chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; - - chan->dt_off = (dt_chunk * i); + if (write) + chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ); + else + chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ); + chan->ll_max -= 1; - dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", - write ? "write" : "read", j, - chan->ll_off, chan->ll_max); + dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n", + write ? "write" : "read", j, chan->ll_max); if (dw->nr_irqs == 1) pos = 0; @@ -723,12 +766,15 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, chan->vc.desc_free = vchan_free_desc; vchan_init(&chan->vc, dma); - dt_region->paddr = dw->dt_region.paddr + chan->dt_off; - dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; - dt_region->sz = dt_chunk; - - dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", - write ? "write" : "read", j, chan->dt_off); + if (write) { + dt_region->paddr = dw->dt_region_wr[j].paddr; + dt_region->vaddr = dw->dt_region_wr[j].vaddr; + dt_region->sz = dw->dt_region_wr[j].sz; + } else { + dt_region->paddr = dw->dt_region_rd[j].paddr; + dt_region->vaddr = dw->dt_region_rd[j].vaddr; + dt_region->sz = dw->dt_region_rd[j].sz; + } dw_edma_v0_core_device_config(chan); } @@ -738,6 +784,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, dma_cap_set(DMA_SLAVE, dma->cap_mask); dma_cap_set(DMA_CYCLIC, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); @@ -756,6 +803,7 @@ static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, dma->device_tx_status = dw_edma_device_tx_status; dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma; dma_set_max_seg_size(dma->dev, U32_MAX); @@ -863,14 +911,15 @@ int dw_edma_probe(struct dw_edma_chip *chip) raw_spin_lock_init(&dw->lock); - /* Find out how many write channels are supported by hardware */ - dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); - if (!dw->wr_ch_cnt) - return -EINVAL; + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE)); + dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH); - /* Find out how many read channels are supported by hardware */ - dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); - if (!dw->rd_ch_cnt) + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, + dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ)); + dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH); + + if (!dw->wr_ch_cnt && !dw->rd_ch_cnt) return -EINVAL; dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", @@ -937,24 +986,23 @@ int dw_edma_remove(struct dw_edma_chip *chip) /* Power management */ pm_runtime_disable(dev); + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, vc.chan.device_node) { - list_del(&chan->vc.chan.device_node); tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); } + dma_async_device_unregister(&dw->rd_edma); list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, vc.chan.device_node) { - list_del(&chan->vc.chan.device_node); tasklet_kill(&chan->vc.task); + list_del(&chan->vc.chan.device_node); } - /* Deregister eDMA device */ - dma_async_device_unregister(&dw->wr_edma); - dma_async_device_unregister(&dw->rd_edma); - /* Turn debugfs off */ - dw_edma_v0_core_debugfs_off(); + dw_edma_v0_core_debugfs_off(chip); return 0; } |