diff options
Diffstat (limited to 'drivers/dma/dw_dmac.c')
-rw-r--r-- | drivers/dma/dw_dmac.c | 523 |
1 files changed, 368 insertions, 155 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index b33d1f6e1333..51c3ea2ed41a 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -1,6 +1,5 @@ /* - * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on - * AVR32 systems.) + * Core driver for the Synopsys DesignWare DMA Controller * * Copyright (C) 2007-2008 Atmel Corporation * Copyright (C) 2010-2011 ST Microelectronics @@ -9,11 +8,13 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -47,15 +48,32 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) return slave ? slave->src_master : 1; } +#define SRC_MASTER 0 +#define DST_MASTER 1 + +static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) +{ + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_dma_slave *dws = chan->private; + unsigned int m; + + if (master == SRC_MASTER) + m = dwc_get_sms(dws); + else + m = dwc_get_dms(dws); + + return min_t(unsigned int, dw->nr_masters - 1, m); +} + #define DWC_DEFAULT_CTLLO(_chan) ({ \ - struct dw_dma_slave *__slave = (_chan->private); \ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ - int _dms = dwc_get_dms(__slave); \ - int _sms = dwc_get_sms(__slave); \ - u8 _smsize = __slave ? _sconfig->src_maxburst : \ + bool _is_slave = is_slave_direction(_dwc->direction); \ + int _dms = dwc_get_master(_chan, DST_MASTER); \ + int _sms = dwc_get_master(_chan, SRC_MASTER); \ + u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ DW_DMA_MSIZE_16; \ - u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ + u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ DW_DMA_MSIZE_16; \ \ (DWC_CTLL_DST_MSIZE(_dmsize) \ @@ -73,15 +91,14 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) */ #define NR_DESCS_PER_CHANNEL 64 -/*----------------------------------------------------------------------*/ +static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) +{ + struct dw_dma *dw = to_dw_dma(chan->device); -/* - * Because we're not relying on writeback from the controller (it may not - * even be configured into the core!) we don't need to use dma_pool. These - * descriptors -- and associated data -- are cacheable. We do need to make - * sure their dcache entries are written back before handing them off to - * the controller, though. - */ + return dw->data_width[dwc_get_master(chan, master)]; +} + +/*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) { @@ -94,7 +111,7 @@ static struct device *chan2parent(struct dma_chan *chan) static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { - return list_entry(dwc->active_list.next, struct dw_desc, desc_node); + return to_dw_desc(dwc->active_list.next); } static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) @@ -121,19 +138,6 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) return ret; } -static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) -{ - struct dw_desc *child; - - list_for_each_entry(child, &desc->tx_list, desc_node) - dma_sync_single_for_cpu(chan2parent(&dwc->chan), - child->txd.phys, sizeof(child->lli), - DMA_TO_DEVICE); - dma_sync_single_for_cpu(chan2parent(&dwc->chan), - desc->txd.phys, sizeof(desc->lli), - DMA_TO_DEVICE); -} - /* * Move a descriptor, including any children, to the free list. * `desc' must not be on any lists. @@ -145,8 +149,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) if (desc) { struct dw_desc *child; - dwc_sync_desc_for_cpu(dwc, desc); - spin_lock_irqsave(&dwc->lock, flags); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dwc->chan), @@ -179,9 +181,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) cfghi = dws->cfg_hi; cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } else { - if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) + if (dwc->direction == DMA_MEM_TO_DEV) cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); - else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) + else if (dwc->direction == DMA_DEV_TO_MEM) cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); } @@ -223,7 +225,6 @@ static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) channel_readl(dwc, CTL_LO)); } - static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) { channel_clear_bit(dw, CH_EN, dwc->mask); @@ -249,6 +250,9 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, channel_writel(dwc, CTL_LO, ctllo); channel_writel(dwc, CTL_HI, desc->lli.ctlhi); channel_set_bit(dw, CH_EN, dwc->mask); + + /* Move pointer to next descriptor */ + dwc->tx_node_active = dwc->tx_node_active->next; } /* Called with dwc->lock held and bh disabled */ @@ -279,9 +283,10 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) dwc_initialize(dwc); - dwc->tx_list = &first->tx_list; - dwc->tx_node_active = first->tx_list.next; + dwc->residue = first->total_len; + dwc->tx_node_active = &first->tx_list; + /* Submit first block */ dwc_do_single_block(dwc, first); return; @@ -317,8 +322,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, param = txd->callback_param; } - dwc_sync_desc_for_cpu(dwc, desc); - /* async_tx_ack */ list_for_each_entry(child, &desc->tx_list, desc_node) async_tx_ack(&child->txd); @@ -327,29 +330,29 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - if (!dwc->chan.private) { + if (!is_slave_direction(dwc->direction)) { struct device *parent = chan2parent(&dwc->chan); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) dma_unmap_single(parent, desc->lli.dar, - desc->len, DMA_FROM_DEVICE); + desc->total_len, DMA_FROM_DEVICE); else dma_unmap_page(parent, desc->lli.dar, - desc->len, DMA_FROM_DEVICE); + desc->total_len, DMA_FROM_DEVICE); } if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) dma_unmap_single(parent, desc->lli.sar, - desc->len, DMA_TO_DEVICE); + desc->total_len, DMA_TO_DEVICE); else dma_unmap_page(parent, desc->lli.sar, - desc->len, DMA_TO_DEVICE); + desc->total_len, DMA_TO_DEVICE); } } spin_unlock_irqrestore(&dwc->lock, flags); - if (callback_required && callback) + if (callback) callback(param); } @@ -384,6 +387,15 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_descriptor_complete(dwc, desc, true); } +/* Returns how many bytes were already received from source */ +static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) +{ + u32 ctlhi = channel_readl(dwc, CTL_HI); + u32 ctllo = channel_readl(dwc, CTL_LO); + + return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); +} + static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) { dma_addr_t llp; @@ -399,6 +411,39 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) if (status_xfer & dwc->mask) { /* Everything we've submitted is done */ dma_writel(dw, CLEAR.XFER, dwc->mask); + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + struct list_head *head, *active = dwc->tx_node_active; + + /* + * We are inside first active descriptor. + * Otherwise something is really wrong. + */ + desc = dwc_first_active(dwc); + + head = &desc->tx_list; + if (active != head) { + /* Update desc to reflect last sent one */ + if (active != head->next) + desc = to_dw_desc(active->prev); + + dwc->residue -= desc->len; + + child = to_dw_desc(active); + + /* Submit next block */ + dwc_do_single_block(dwc, child); + + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + /* We are done here */ + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + } + + dwc->residue = 0; + spin_unlock_irqrestore(&dwc->lock, flags); dwc_complete_all(dw, dwc); @@ -406,6 +451,13 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) } if (list_empty(&dwc->active_list)) { + dwc->residue = 0; + spin_unlock_irqrestore(&dwc->lock, flags); + return; + } + + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { + dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); spin_unlock_irqrestore(&dwc->lock, flags); return; } @@ -414,6 +466,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) (unsigned long long)llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { + /* initial residue value */ + dwc->residue = desc->total_len; + /* check first descriptors addr */ if (desc->txd.phys == llp) { spin_unlock_irqrestore(&dwc->lock, flags); @@ -423,16 +478,21 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) /* check first descriptors llp */ if (desc->lli.llp == llp) { /* This one is currently in progress */ + dwc->residue -= dwc_get_sent(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return; } - list_for_each_entry(child, &desc->tx_list, desc_node) + dwc->residue -= desc->len; + list_for_each_entry(child, &desc->tx_list, desc_node) { if (child->lli.llp == llp) { /* Currently in progress */ + dwc->residue -= dwc_get_sent(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return; } + dwc->residue -= child->len; + } /* * No descriptors so far seem to be in progress, i.e. @@ -458,9 +518,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) { - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", - lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); + dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); } static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) @@ -488,16 +547,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_dostart(dwc, dwc_first_active(dwc)); /* - * KERN_CRITICAL may seem harsh, but since this only happens + * WARN may seem harsh, but since this only happens * when someone submits a bad physical address in a * descriptor, we should consider ourselves lucky that the * controller flagged an error instead of scribbling over * random memory locations. */ - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - "Bad descriptor submitted for DMA!\n"); - dev_printk(KERN_CRIT, chan2dev(&dwc->chan), - " cookie: %d\n", bad_desc->txd.cookie); + dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" + " cookie: %d\n", bad_desc->txd.cookie); dwc_dump_lli(dwc, &bad_desc->lli); list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, &child->lli); @@ -598,36 +655,8 @@ static void dw_dma_tasklet(unsigned long data) dwc_handle_cyclic(dw, dwc, status_err, status_xfer); else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); - else if (status_xfer & (1 << i)) { - unsigned long flags; - - spin_lock_irqsave(&dwc->lock, flags); - if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { - if (dwc->tx_node_active != dwc->tx_list) { - struct dw_desc *desc = - list_entry(dwc->tx_node_active, - struct dw_desc, - desc_node); - - dma_writel(dw, CLEAR.XFER, dwc->mask); - - /* move pointer to next descriptor */ - dwc->tx_node_active = - dwc->tx_node_active->next; - - dwc_do_single_block(dwc, desc); - - spin_unlock_irqrestore(&dwc->lock, flags); - continue; - } else { - /* we are done here */ - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - } - } - spin_unlock_irqrestore(&dwc->lock, flags); - + else if (status_xfer & (1 << i)) dwc_scan_descriptors(dw, dwc); - } } /* @@ -709,7 +738,6 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_slave *dws = chan->private; struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; @@ -730,8 +758,10 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return NULL; } - data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], - dwc->dw->data_width[dwc_get_dms(dws)]); + dwc->direction = DMA_MEM_TO_MEM; + + data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), + dwc_get_data_width(chan, DST_MASTER)); src_width = dst_width = min_t(unsigned int, data_width, dwc_fast_fls(src | dest | len)); @@ -756,32 +786,25 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, desc->lli.dar = dest + offset; desc->lli.ctllo = ctllo; desc->lli.ctlhi = xfer_count; + desc->len = xfer_count << src_width; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } - if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last block */ prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); - first->txd.flags = flags; - first->len = len; + first->total_len = len; return &first->txd; @@ -796,7 +819,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned long flags, void *context) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma_slave *dws = chan->private; struct dma_slave_config *sconfig = &dwc->dma_sconfig; struct dw_desc *prev; struct dw_desc *first; @@ -811,9 +833,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_vdbg(chan2dev(chan), "%s\n", __func__); - if (unlikely(!dws || !sg_len)) + if (unlikely(!is_slave_direction(direction) || !sg_len)) return NULL; + dwc->direction = direction; + prev = first = NULL; switch (direction) { @@ -828,7 +852,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : DWC_CTLL_FC(DW_DMA_FC_D_M2P); - data_width = dwc->dw->data_width[dwc_get_sms(dws)]; + data_width = dwc_get_data_width(chan, SRC_MASTER); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -861,15 +885,12 @@ slave_sg_todev_fill_desc: } desc->lli.ctlhi = dlen >> mem_width; + desc->len = dlen; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, - sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } @@ -891,7 +912,7 @@ slave_sg_todev_fill_desc: ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : DWC_CTLL_FC(DW_DMA_FC_D_P2M); - data_width = dwc->dw->data_width[dwc_get_dms(dws)]; + data_width = dwc_get_data_width(chan, DST_MASTER); for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -923,15 +944,12 @@ slave_sg_fromdev_fill_desc: len = 0; } desc->lli.ctlhi = dlen >> reg_width; + desc->len = dlen; if (!first) { first = desc; } else { prev->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, - sizeof(prev->lli), - DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } @@ -951,11 +969,7 @@ slave_sg_fromdev_fill_desc: prev->lli.ctllo |= DWC_CTLL_INT_EN; prev->lli.llp = 0; - dma_sync_single_for_device(chan2parent(chan), - prev->txd.phys, sizeof(prev->lli), - DMA_TO_DEVICE); - - first->len = total_len; + first->total_len = total_len; return &first->txd; @@ -985,11 +999,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - /* Check if it is chan is configured for slave transfers */ - if (!chan->private) + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(sconfig->direction)) return -EINVAL; memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); + dwc->direction = sconfig->direction; convert_burst(&dwc->dma_sconfig.src_maxburst); convert_burst(&dwc->dma_sconfig.dst_maxburst); @@ -997,6 +1012,26 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) return 0; } +static inline void dwc_chan_pause(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) + cpu_relax(); + + dwc->paused = true; +} + +static inline void dwc_chan_resume(struct dw_dma_chan *dwc) +{ + u32 cfglo = channel_readl(dwc, CFG_LO); + + channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); + + dwc->paused = false; +} + static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { @@ -1004,18 +1039,13 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc, *_desc; unsigned long flags; - u32 cfglo; LIST_HEAD(list); if (cmd == DMA_PAUSE) { spin_lock_irqsave(&dwc->lock, flags); - cfglo = channel_readl(dwc, CFG_LO); - channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) - cpu_relax(); + dwc_chan_pause(dwc); - dwc->paused = true; spin_unlock_irqrestore(&dwc->lock, flags); } else if (cmd == DMA_RESUME) { if (!dwc->paused) @@ -1023,9 +1053,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_lock_irqsave(&dwc->lock, flags); - cfglo = channel_readl(dwc, CFG_LO); - channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); - dwc->paused = false; + dwc_chan_resume(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } else if (cmd == DMA_TERMINATE_ALL) { @@ -1035,7 +1063,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, dwc_chan_disable(dw, dwc); - dwc->paused = false; + dwc_chan_resume(dwc); /* active_list entries will end up before queued entries */ list_splice_init(&dwc->queue, &list); @@ -1055,6 +1083,21 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return 0; } +static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) +{ + unsigned long flags; + u32 residue; + + spin_lock_irqsave(&dwc->lock, flags); + + residue = dwc->residue; + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) + residue -= dwc_get_sent(dwc); + + spin_unlock_irqrestore(&dwc->lock, flags); + return residue; +} + static enum dma_status dwc_tx_status(struct dma_chan *chan, dma_cookie_t cookie, @@ -1071,7 +1114,7 @@ dwc_tx_status(struct dma_chan *chan, } if (ret != DMA_SUCCESS) - dma_set_residue(txstate, dwc_first_active(dwc)->len); + dma_set_residue(txstate, dwc_get_residue(dwc)); if (dwc->paused) return DMA_PAUSED; @@ -1114,22 +1157,22 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { + dma_addr_t phys; + spin_unlock_irqrestore(&dwc->lock, flags); - desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); - if (!desc) { - dev_info(chan2dev(chan), - "only allocated %d descriptors\n", i); - spin_lock_irqsave(&dwc->lock, flags); - break; - } + desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); + if (!desc) + goto err_desc_alloc; + + memset(desc, 0, sizeof(struct dw_desc)); INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; - desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, - sizeof(desc->lli), DMA_TO_DEVICE); + desc->txd.phys = phys; + dwc_desc_put(dwc, desc); spin_lock_irqsave(&dwc->lock, flags); @@ -1141,6 +1184,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); return i; + +err_desc_alloc: + dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); + + return i; } static void dwc_free_chan_resources(struct dma_chan *chan) @@ -1172,14 +1220,56 @@ static void dwc_free_chan_resources(struct dma_chan *chan) list_for_each_entry_safe(desc, _desc, &list, desc_node) { dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); - dma_unmap_single(chan2parent(chan), desc->txd.phys, - sizeof(desc->lli), DMA_TO_DEVICE); - kfree(desc); + dma_pool_free(dw->desc_pool, desc, desc->txd.phys); } dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } +bool dw_dma_generic_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma *dw = to_dw_dma(chan->device); + static struct dw_dma *last_dw; + static char *last_bus_id; + int i = -1; + + /* + * dmaengine framework calls this routine for all channels of all dma + * controller, until true is returned. If 'param' bus_id is not + * registered with a dma controller (dw), then there is no need of + * running below function for all channels of dw. + * + * This block of code does this by saving the parameters of last + * failure. If dw and param are same, i.e. trying on same dw with + * different channel, return false. + */ + if ((last_dw == dw) && (last_bus_id == param)) + return false; + /* + * Return true: + * - If dw_dma's platform data is not filled with slave info, then all + * dma controllers are fine for transfer. + * - Or if param is NULL + */ + if (!dw->sd || !param) + return true; + + while (++i < dw->sd_count) { + if (!strcmp(dw->sd[i].bus_id, param)) { + chan->private = &dw->sd[i]; + last_dw = NULL; + last_bus_id = NULL; + + return true; + } + } + + last_dw = dw; + last_bus_id = param; + return false; +} +EXPORT_SYMBOL(dw_dma_generic_filter); + /* --------------------- Cyclic DMA API extensions -------------------- */ /** @@ -1299,6 +1389,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, retval = ERR_PTR(-EINVAL); + if (unlikely(!is_slave_direction(direction))) + goto out_err; + + dwc->direction = direction; + if (direction == DMA_MEM_TO_DEV) reg_width = __ffs(sconfig->dst_addr_width); else @@ -1313,8 +1408,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, goto out_err; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto out_err; - if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) - goto out_err; retval = ERR_PTR(-ENOMEM); @@ -1372,20 +1465,14 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, desc->lli.ctlhi = (period_len >> reg_width); cdesc->desc[i] = desc; - if (last) { + if (last) last->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - last->txd.phys, sizeof(last->lli), - DMA_TO_DEVICE); - } last = desc; } /* lets make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; - dma_sync_single_for_device(chan2parent(chan), last->txd.phys, - sizeof(last->lli), DMA_TO_DEVICE); dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " "period %zu periods %d\n", (unsigned long long)buf_addr, @@ -1463,6 +1550,91 @@ static void dw_dma_off(struct dw_dma *dw) dw->chan[i].initialized = false; } +#ifdef CONFIG_OF +static struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + struct device_node *sn, *cn, *np = pdev->dev.of_node; + struct dw_dma_platform_data *pdata; + struct dw_dma_slave *sd; + u32 tmp, arr[4]; + + if (!np) { + dev_err(&pdev->dev, "Missing DT data\n"); + return NULL; + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels)) + return NULL; + + if (of_property_read_bool(np, "is_private")) + pdata->is_private = true; + + if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) + pdata->chan_allocation_order = (unsigned char)tmp; + + if (!of_property_read_u32(np, "chan_priority", &tmp)) + pdata->chan_priority = tmp; + + if (!of_property_read_u32(np, "block_size", &tmp)) + pdata->block_size = tmp; + + if (!of_property_read_u32(np, "nr_masters", &tmp)) { + if (tmp > 4) + return NULL; + + pdata->nr_masters = tmp; + } + + if (!of_property_read_u32_array(np, "data_width", arr, + pdata->nr_masters)) + for (tmp = 0; tmp < pdata->nr_masters; tmp++) + pdata->data_width[tmp] = arr[tmp]; + + /* parse slave data */ + sn = of_find_node_by_name(np, "slave_info"); + if (!sn) + return pdata; + + /* calculate number of slaves */ + tmp = of_get_child_count(sn); + if (!tmp) + return NULL; + + sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL); + if (!sd) + return NULL; + + pdata->sd = sd; + pdata->sd_count = tmp; + + for_each_child_of_node(sn, cn) { + sd->dma_dev = &pdev->dev; + of_property_read_string(cn, "bus_id", &sd->bus_id); + of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi); + of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo); + if (!of_property_read_u32(cn, "src_master", &tmp)) + sd->src_master = tmp; + + if (!of_property_read_u32(cn, "dst_master", &tmp)) + sd->dst_master = tmp; + sd++; + } + + return pdata; +} +#else +static inline struct dw_dma_platform_data * +dw_dma_parse_dt(struct platform_device *pdev) +{ + return NULL; +} +#endif + static int dw_probe(struct platform_device *pdev) { struct dw_dma_platform_data *pdata; @@ -1478,10 +1650,6 @@ static int dw_probe(struct platform_device *pdev) int err; int i; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) - return -EINVAL; - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) return -EINVAL; @@ -1494,9 +1662,33 @@ static int dw_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); + /* Apply default dma_mask if needed */ + if (!pdev->dev.dma_mask) { + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + } + dw_params = dma_read_byaddr(regs, DW_PARAMS); autocfg = dw_params >> DW_PARAMS_EN & 0x1; + dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params); + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) + pdata = dw_dma_parse_dt(pdev); + + if (!pdata && autocfg) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Fill platform data with the default values */ + pdata->is_private = true; + pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; + pdata->chan_priority = CHAN_PRIORITY_ASCENDING; + } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) + return -EINVAL; + if (autocfg) nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; else @@ -1513,6 +1705,8 @@ static int dw_probe(struct platform_device *pdev) clk_prepare_enable(dw->clk); dw->regs = regs; + dw->sd = pdata->sd; + dw->sd_count = pdata->sd_count; /* get hardware configuration parameters */ if (autocfg) { @@ -1544,6 +1738,14 @@ static int dw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dw); + /* create a pool of consistent memory blocks for hardware descriptors */ + dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, + sizeof(struct dw_desc), 4, 0); + if (!dw->desc_pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); INIT_LIST_HEAD(&dw->dma.channels); @@ -1575,7 +1777,7 @@ static int dw_probe(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); - dwc->dw = dw; + dwc->direction = DMA_TRANS_NONE; /* hardware configuration */ if (autocfg) { @@ -1584,6 +1786,9 @@ static int dw_probe(struct platform_device *pdev) dwc_params = dma_read_byaddr(regs + r * sizeof(u32), DWC_PARAMS); + dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, + dwc_params); + /* Decode maximum block size for given channel. The * stored 4 bit value represents blocks from 0x00 for 3 * up to 0x0a for 4095. */ @@ -1627,8 +1832,8 @@ static int dw_probe(struct platform_device *pdev) dma_writel(dw, CFG, DW_CFG_DMA_EN); - printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", - dev_name(&pdev->dev), nr_channels); + dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n", + nr_channels); dma_async_device_register(&dw->dma); @@ -1658,7 +1863,7 @@ static void dw_shutdown(struct platform_device *pdev) { struct dw_dma *dw = platform_get_drvdata(pdev); - dw_dma_off(platform_get_drvdata(pdev)); + dw_dma_off(dw); clk_disable_unprepare(dw->clk); } @@ -1667,7 +1872,7 @@ static int dw_suspend_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct dw_dma *dw = platform_get_drvdata(pdev); - dw_dma_off(platform_get_drvdata(pdev)); + dw_dma_off(dw); clk_disable_unprepare(dw->clk); return 0; @@ -1680,6 +1885,7 @@ static int dw_resume_noirq(struct device *dev) clk_prepare_enable(dw->clk); dma_writel(dw, CFG, DW_CFG_DMA_EN); + return 0; } @@ -1700,7 +1906,13 @@ static const struct of_device_id dw_dma_id_table[] = { MODULE_DEVICE_TABLE(of, dw_dma_id_table); #endif +static const struct platform_device_id dw_dma_ids[] = { + { "INTL9C60", 0 }, + { } +}; + static struct platform_driver dw_driver = { + .probe = dw_probe, .remove = dw_remove, .shutdown = dw_shutdown, .driver = { @@ -1708,11 +1920,12 @@ static struct platform_driver dw_driver = { .pm = &dw_dev_pm_ops, .of_match_table = of_match_ptr(dw_dma_id_table), }, + .id_table = dw_dma_ids, }; static int __init dw_init(void) { - return platform_driver_probe(&dw_driver, dw_probe); + return platform_driver_register(&dw_driver); } subsys_initcall(dw_init); |