From cb9ab2d8e4661c811d5e9a8e687b6f736690c90e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:04 +0100 Subject: dma40: make init function static Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6e1d46a65d0e..52013ed4b442 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3060,7 +3060,7 @@ static struct platform_driver d40_driver = { }, }; -int __init stedma40_init(void) +static int __init stedma40_init(void) { return platform_driver_probe(&d40_driver, d40_probe); } -- cgit v1.2.3 From 262d2915d4f11e5e78e432ab68f0ee034ef3f75f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:05 +0100 Subject: dma40: ensure event lines get enabled The controller sometimes fails to register the enable of the event line when both src and dst event lines are used on the same logical channel. Implement the recommended software workaround, which is to retry the write until it works. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 63 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 52013ed4b442..42c88fc28815 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -303,6 +303,11 @@ struct d40_reg_val { unsigned int val; }; +static struct device *chan2dev(struct d40_chan *d40c) +{ + return &d40c->chan.dev->device; +} + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -701,17 +706,46 @@ static void d40_term_all(struct d40_chan *d40c) d40c->busy = false; } +static void __d40_config_set_event(struct d40_chan *d40c, bool enable, + u32 event, int reg) +{ + void __iomem *addr = d40c->base->virtbase + D40_DREG_PCBASE + + d40c->phy_chan->num * D40_DREG_PCDELTA + reg; + int tries; + + if (!enable) { + writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) + | ~D40_EVENTLINE_MASK(event), addr); + return; + } + + /* + * The hardware sometimes doesn't register the enable when src and dst + * event lines are active on the same logical channel. Retry to ensure + * it does. Usually only one retry is sufficient. + */ + tries = 100; + while (--tries) { + writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) + | ~D40_EVENTLINE_MASK(event), addr); + + if (readl(addr) & D40_EVENTLINE_MASK(event)) + break; + } + + if (tries != 99) + dev_dbg(chan2dev(d40c), + "[%s] workaround enable S%cLNK (%d tries)\n", + __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', + 100 - tries); + + WARN_ON(!tries); +} + static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) { - u32 val; unsigned long flags; - /* Notice, that disable requires the physical channel to be stopped */ - if (do_enable) - val = D40_ACTIVATE_EVENTLINE; - else - val = D40_DEACTIVATE_EVENTLINE; - spin_lock_irqsave(&d40c->phy_chan->lock, flags); /* Enable event line connected to device (or memcpy) */ @@ -719,20 +753,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - writel((val << D40_EVENTLINE_POS(event)) | - ~D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + __d40_config_set_event(d40c, do_enable, event, + D40_CHAN_REG_SSLNK); } + if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - writel((val << D40_EVENTLINE_POS(event)) | - ~D40_EVENTLINE_MASK(event), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); + __d40_config_set_event(d40c, do_enable, event, + D40_CHAN_REG_SDLNK); } spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); -- cgit v1.2.3 From 7d83a854a1a44a8f6a699503441403a36c42f66c Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:06 +0100 Subject: dma40: remove "hardware link with previous jobs" code This link in hardware with previous jobs code is: - unused, no clients using or requiring this feature - incomplete, being implemented only for physical channels - broken, only working to perform one link Remove it. This also allows us to get rid of the channel pause in the submit_tx() routine. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 113 +++--------------------------------------------- 1 file changed, 6 insertions(+), 107 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 42c88fc28815..ed2a3ebcd86c 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -94,7 +94,6 @@ struct d40_lli_pool { * during a transfer. * @node: List entry. * @is_in_client_list: true if the client owns this descriptor. - * @is_hw_linked: true if this job will automatically be continued for * the previous one. * * This descriptor is used for both logical and physical transfers. @@ -114,7 +113,6 @@ struct d40_desc { struct list_head node; bool is_in_client_list; - bool is_hw_linked; }; /** @@ -548,18 +546,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) return d; } -static struct d40_desc *d40_last_queued(struct d40_chan *d40c) -{ - struct d40_desc *d; - - if (list_empty(&d40c->queue)) - return NULL; - list_for_each_entry(d, &d40c->queue, node) - if (list_is_last(&d->node, &d40c->queue)) - break; - return d; -} - static int d40_psize_2_burst_size(bool is_log, int psize) { if (is_log) { @@ -940,77 +926,6 @@ no_suspend: return res; } -static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) -{ - /* TODO: Write */ -} - -static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) -{ - struct d40_desc *d40d_prev = NULL; - int i; - u32 val; - - if (!list_empty(&d40c->queue)) - d40d_prev = d40_last_queued(d40c); - else if (!list_empty(&d40c->active)) - d40d_prev = d40_first_active_get(d40c); - - if (!d40d_prev) - return; - - /* Here we try to join this job with previous jobs */ - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); - - /* Figure out which link we're currently transmitting */ - for (i = 0; i < d40d_prev->lli_len; i++) - if (val == d40d_prev->lli_phy.src[i].reg_lnk) - break; - - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; - - if (i == (d40d_prev->lli_len - 1) && val > 0) { - /* Change the current one */ - writel(virt_to_phys(d40d->lli_phy.src), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); - writel(virt_to_phys(d40d->lli_phy.dst), - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); - - d40d->is_hw_linked = true; - - } else if (i < d40d_prev->lli_len) { - (void) dma_unmap_single(d40c->base->dev, - virt_to_phys(d40d_prev->lli_phy.src), - d40d_prev->lli_pool.size, - DMA_TO_DEVICE); - - /* Keep the settings */ - val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & - ~D40_SREG_LNK_PHYS_LNK_MASK; - d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = - val | virt_to_phys(d40d->lli_phy.src); - - val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & - ~D40_SREG_LNK_PHYS_LNK_MASK; - d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = - val | virt_to_phys(d40d->lli_phy.dst); - - (void) dma_map_single(d40c->base->dev, - d40d_prev->lli_phy.src, - d40d_prev->lli_pool.size, - DMA_TO_DEVICE); - d40d->is_hw_linked = true; - } -} - static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, @@ -1019,8 +934,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); unsigned long flags; - (void) d40_pause(&d40c->chan); - spin_lock_irqsave(&d40c->lock, flags); d40c->chan.cookie++; @@ -1030,17 +943,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) d40d->txd.cookie = d40c->chan.cookie; - if (d40c->log_num == D40_PHY_CHAN) - d40_tx_submit_phy(d40c, d40d); - else - d40_tx_submit_log(d40c, d40d); - d40_desc_queue(d40c, d40d); spin_unlock_irqrestore(&d40c->lock, flags); - (void) d40_resume(&d40c->chan); - return tx->cookie; } @@ -1080,21 +986,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) /* Add to active queue */ d40_desc_submit(d40c, d40d); - /* - * If this job is already linked in hw, - * do not submit it. - */ - - if (!d40d->is_hw_linked) { - /* Initiate DMA job */ - d40_desc_load(d40c, d40d); + /* Initiate DMA job */ + d40_desc_load(d40c, d40d); - /* Start dma job */ - err = d40_start(d40c); + /* Start dma job */ + err = d40_start(d40c); - if (err) - return NULL; - } + if (err) + return NULL; } return d40d; -- cgit v1.2.3 From 8ca84687b91322b9eafeaf4da43a21684cd0316e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:07 +0100 Subject: dma40: use helper for channel registers base The register offset computation for accessing channel registers is copy/pasted in several places. Create a helper function to do it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 74 ++++++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ed2a3ebcd86c..3d4cea3cff35 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -306,6 +306,12 @@ static struct device *chan2dev(struct d40_chan *d40c) return &d40c->chan.dev->device; } +static void __iomem *chan_base(struct d40_chan *chan) +{ + return chan->base->virtbase + D40_DREG_PCBASE + + chan->phy_chan->num * D40_DREG_PCDELTA; +} + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -695,8 +701,7 @@ static void d40_term_all(struct d40_chan *d40c) static void __d40_config_set_event(struct d40_chan *d40c, bool enable, u32 event, int reg) { - void __iomem *addr = d40c->base->virtbase + D40_DREG_PCBASE - + d40c->phy_chan->num * D40_DREG_PCDELTA + reg; + void __iomem *addr = chan_base(d40c) + reg; int tries; if (!enable) { @@ -755,15 +760,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) static u32 d40_chan_has_events(struct d40_chan *d40c) { + void __iomem *chanbase = chan_base(d40c); u32 val; - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + val = readl(chanbase + D40_CHAN_REG_SSLNK); + val |= readl(chanbase + D40_CHAN_REG_SDLNK); - val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); return val; } @@ -810,29 +812,17 @@ static void d40_config_write(struct d40_chan *d40c) writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); if (d40c->log_num != D40_PHY_CHAN) { + int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) + & D40_SREG_ELEM_LOG_LIDX_MASK; + void __iomem *chanbase = chan_base(d40c); + /* Set default config for CFG reg */ - writel(d40c->src_def_cfg, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSCFG); - writel(d40c->dst_def_cfg, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDCFG); + writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); + writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); /* Set LIDX for lcla */ - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & - D40_SREG_ELEM_LOG_LIDX_MASK, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDELT); - - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & - D40_SREG_ELEM_LOG_LIDX_MASK, - d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSELT); - + writel(lidx, chanbase + D40_CHAN_REG_SSELT); + writel(lidx, chanbase + D40_CHAN_REG_SDELT); } } @@ -843,12 +833,12 @@ static u32 d40_residue(struct d40_chan *d40c) if (d40c->log_num != D40_PHY_CHAN) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; - else - num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDELT) & - D40_SREG_ELEM_PHY_ECNT_MASK) >> - D40_SREG_ELEM_PHY_ECNT_POS; + else { + u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); + num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) + >> D40_SREG_ELEM_PHY_ECNT_POS; + } + return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); } @@ -859,10 +849,9 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) if (d40c->log_num != D40_PHY_CHAN) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; else - is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK) & - D40_SREG_LNK_PHYS_LNK_MASK; + is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) + & D40_SREG_LNK_PHYS_LNK_MASK; + return is_link; } @@ -1550,6 +1539,7 @@ static int d40_free_dma(struct d40_chan *d40c) static bool d40_is_paused(struct d40_chan *d40c) { + void __iomem *chanbase = chan_base(d40c); bool is_paused = false; unsigned long flags; void __iomem *active_reg; @@ -1576,14 +1566,10 @@ static bool d40_is_paused(struct d40_chan *d40c) if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SDLNK); + status = readl(chanbase + D40_CHAN_REG_SDLNK); } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + - d40c->phy_chan->num * D40_DREG_PCDELTA + - D40_CHAN_REG_SSLNK); + status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { dev_err(&d40c->chan.dev->device, "[%s] Unknown direction\n", __func__); -- cgit v1.2.3 From 724a8577d80c6f8e9ac680be1cf419eddbd6f2a1 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:08 +0100 Subject: dma40: use helpers for channel type check The somewhat confusing check d40c->log_num == D40_PHY_CHAN and its variants are used in several places to check if a channel is logical or physical. Use appropriately named helpers to do this to make the code more readable. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 54 +++++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 3d4cea3cff35..0073988cdaf6 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -306,6 +306,16 @@ static struct device *chan2dev(struct d40_chan *d40c) return &d40c->chan.dev->device; } +static bool chan_is_physical(struct d40_chan *chan) +{ + return chan->log_num == D40_PHY_CHAN; +} + +static bool chan_is_logical(struct d40_chan *chan) +{ + return !chan_is_physical(chan); +} + static void __iomem *chan_base(struct d40_chan *chan) { return chan->base->virtbase + D40_DREG_PCBASE + @@ -400,7 +410,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c, int i; int ret = -EINVAL; - if (d40c->log_num == D40_PHY_CHAN) + if (chan_is_physical(d40c)) return 0; spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); @@ -472,7 +482,7 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { int curr_lcla = -EINVAL, next_lcla; - if (d40c->log_num == D40_PHY_CHAN) { + if (chan_is_physical(d40c)) { d40_phy_lli_write(d40c->base->virtbase, d40c->phy_chan->num, d40d->lli_phy.dst, @@ -788,7 +798,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c) = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, }; - if (d40c->log_num == D40_PHY_CHAN) + if (chan_is_physical(d40c)) return phy_map[d40c->dma_cfg.mode_opt]; else return log_map[d40c->dma_cfg.mode_opt]; @@ -802,7 +812,7 @@ static void d40_config_write(struct d40_chan *d40c) /* Odd addresses are even addresses + 4 */ addr_base = (d40c->phy_chan->num % 2) * 4; /* Setup channel mode to logical or physical */ - var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << + var = ((u32)(chan_is_logical(d40c)) + 1) << D40_CHAN_POS(d40c->phy_chan->num); writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); @@ -811,7 +821,7 @@ static void d40_config_write(struct d40_chan *d40c) writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & D40_SREG_ELEM_LOG_LIDX_MASK; void __iomem *chanbase = chan_base(d40c); @@ -830,7 +840,7 @@ static u32 d40_residue(struct d40_chan *d40c) { u32 num_elt; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) >> D40_MEM_LCSP2_ECNT_POS; else { @@ -846,7 +856,7 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) { bool is_link; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; else is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) @@ -869,7 +879,7 @@ static int d40_pause(struct dma_chan *chan) res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (res == 0) { - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { d40_config_set_event(d40c, false); /* Resume the other logical channels if any */ if (d40_chan_has_events(d40c)) @@ -895,7 +905,7 @@ static int d40_resume(struct dma_chan *chan) spin_lock_irqsave(&d40c->lock, flags); if (d40c->base->rev == 0) - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); goto no_suspend; @@ -904,7 +914,7 @@ static int d40_resume(struct dma_chan *chan) /* If bytes left to transfer or linked tx resume job */ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_config_set_event(d40c, true); res = d40_channel_execute_command(d40c, D40_DMA_RUN); @@ -944,7 +954,7 @@ static int d40_start(struct d40_chan *d40c) if (d40c->base->rev == 0) { int err; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (err) @@ -952,7 +962,7 @@ static int d40_start(struct d40_chan *d40c) } } - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_config_set_event(d40c, true); return d40_channel_execute_command(d40c, D40_DMA_RUN); @@ -1495,7 +1505,7 @@ static int d40_free_dma(struct d40_chan *d40c) return res; } - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { /* Release logical channel, deactivate the event line */ d40_config_set_event(d40c, false); @@ -1548,7 +1558,7 @@ static bool d40_is_paused(struct d40_chan *d40c) spin_lock_irqsave(&d40c->lock, flags); - if (d40c->log_num == D40_PHY_CHAN) { + if (chan_is_physical(d40c)) { if (d40c->phy_chan->num % 2 == 0) active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; else @@ -1638,7 +1648,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_current = 0; d40d->txd.flags = dma_flags; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -1765,9 +1775,9 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) /* Fill in basic CFG register values */ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); + &d40c->dst_def_cfg, chan_is_logical(d40c)); - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { d40_log_cfg(&d40c->dma_cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); @@ -1857,7 +1867,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d->txd.tx_submit = d40_tx_submit; - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { dev_err(&d40c->chan.dev->device, @@ -2093,7 +2103,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, if (d40d == NULL) goto err; - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, direction, dma_flags); else @@ -2103,7 +2113,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, dev_err(&d40c->chan.dev->device, "[%s] Failed to prepare %s slave sg job: %d\n", __func__, - d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); + chan_is_logical(d40c) ? "log" : "phy", err); goto err; } @@ -2253,7 +2263,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, return; } - if (d40c->log_num != D40_PHY_CHAN) { + if (chan_is_logical(d40c)) { if (config_maxburst >= 16) psize = STEDMA40_PSIZE_LOG_16; else if (config_maxburst >= 8) @@ -2286,7 +2296,7 @@ static void d40_set_runtime_config(struct dma_chan *chan, cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; /* Fill in register values */ - if (d40c->log_num != D40_PHY_CHAN) + if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else d40_phy_cfg(cfg, &d40c->src_def_cfg, -- cgit v1.2.3 From 6db5a8ba11bf23d1618e392518f1684cbf2fe031 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:09 +0100 Subject: dma40: use helpers for error functions Almost every use of dev_err in this driver prints the function name. Abstract out wrappers to help with this and reduce code duplication. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 201 ++++++++++++++++++------------------------------ 1 file changed, 74 insertions(+), 127 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0073988cdaf6..ab3ca6af7dbd 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -322,6 +322,12 @@ static void __iomem *chan_base(struct d40_chan *chan) chan->phy_chan->num * D40_DREG_PCDELTA; } +#define d40_err(dev, format, arg...) \ + dev_err(dev, "[%s] " format, __func__, ## arg) + +#define chan_err(d40c, format, arg...) \ + d40_err(chan2dev(d40c), format, ## arg) + static int d40_pool_lli_alloc(struct d40_desc *d40d, int lli_len, bool is_log) { @@ -673,9 +679,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, } if (i == D40_SUSPEND_MAX_IT) { - dev_err(&d40c->chan.dev->device, - "[%s]: unable to suspend the chl %d (log: %d) status %x\n", - __func__, d40c->phy_chan->num, d40c->log_num, + chan_err(d40c, + "unable to suspend the chl %d (log: %d) status %x\n", + d40c->phy_chan->num, d40c->log_num, status); dump_stack(); ret = -EBUSY; @@ -1143,9 +1149,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) if (!il[row].is_error) dma_tc_handle(d40c); else - dev_err(base->dev, - "[%s] IRQ chan: %ld offset %d idx %d\n", - __func__, chan, il[row].offset, idx); + d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", + chan, il[row].offset, idx); spin_unlock(&d40c->lock); } @@ -1164,8 +1169,7 @@ static int d40_validate_conf(struct d40_chan *d40c, bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", - __func__); + chan_err(d40c, "Invalid direction.\n"); res = -EINVAL; } @@ -1173,46 +1177,40 @@ static int d40_validate_conf(struct d40_chan *d40c, d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && d40c->runtime_addr == 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid TX channel address (%d)\n", - __func__, conf->dst_dev_type); + chan_err(d40c, "Invalid TX channel address (%d)\n", + conf->dst_dev_type); res = -EINVAL; } if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && d40c->runtime_addr == 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid RX channel address (%d)\n", - __func__, conf->src_dev_type); + chan_err(d40c, "Invalid RX channel address (%d)\n", + conf->src_dev_type); res = -EINVAL; } if (conf->dir == STEDMA40_MEM_TO_PERIPH && dst_event_group == STEDMA40_DEV_DST_MEMORY) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", - __func__); + chan_err(d40c, "Invalid dst\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_MEM && src_event_group == STEDMA40_DEV_SRC_MEMORY) { - dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", - __func__); + chan_err(d40c, "Invalid src\n"); res = -EINVAL; } if (src_event_group == STEDMA40_DEV_SRC_MEMORY && dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { - dev_err(&d40c->chan.dev->device, - "[%s] No event line\n", __func__); + chan_err(d40c, "No event line\n"); res = -EINVAL; } if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && (src_event_group != dst_event_group)) { - dev_err(&d40c->chan.dev->device, - "[%s] Invalid event group\n", __func__); + chan_err(d40c, "Invalid event group\n"); res = -EINVAL; } @@ -1221,9 +1219,7 @@ static int d40_validate_conf(struct d40_chan *d40c, * DMAC HW supports it. Will be added to this driver, * in case any dma client requires it. */ - dev_err(&d40c->chan.dev->device, - "[%s] periph to periph not supported\n", - __func__); + chan_err(d40c, "periph to periph not supported\n"); res = -EINVAL; } @@ -1236,9 +1232,7 @@ static int d40_validate_conf(struct d40_chan *d40c, * src (burst x width) == dst (burst x width) */ - dev_err(&d40c->chan.dev->device, - "[%s] src (burst x width) != dst (burst x width)\n", - __func__); + chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); res = -EINVAL; } @@ -1441,8 +1435,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_has_cap(DMA_SLAVE, cap)) { d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; } else { - dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", - __func__); + chan_err(d40c, "No memcpy\n"); return -EINVAL; } @@ -1473,15 +1466,13 @@ static int d40_free_dma(struct d40_chan *d40c) } if (phy == NULL) { - dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", - __func__); + chan_err(d40c, "phy == null\n"); return -EINVAL; } if (phy->allocated_src == D40_ALLOC_FREE && phy->allocated_dst == D40_ALLOC_FREE) { - dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", - __func__); + chan_err(d40c, "channel already free\n"); return -EINVAL; } @@ -1493,15 +1484,13 @@ static int d40_free_dma(struct d40_chan *d40c) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); is_src = true; } else { - dev_err(&d40c->chan.dev->device, - "[%s] Unknown direction\n", __func__); + chan_err(d40c, "Unknown direction\n"); return -EINVAL; } res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (res) { - dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", - __func__); + chan_err(d40c, "suspend failed\n"); return res; } @@ -1521,9 +1510,8 @@ static int d40_free_dma(struct d40_chan *d40c) res = d40_channel_execute_command(d40c, D40_DMA_RUN); if (res) { - dev_err(&d40c->chan.dev->device, - "[%s] Executing RUN command\n", - __func__); + chan_err(d40c, + "Executing RUN command\n"); return res; } } @@ -1536,8 +1524,7 @@ static int d40_free_dma(struct d40_chan *d40c) /* Release physical channel */ res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to stop channel\n", __func__); + chan_err(d40c, "Failed to stop channel\n"); return res; } d40c->phy_chan = NULL; @@ -1581,8 +1568,7 @@ static bool d40_is_paused(struct d40_chan *d40c) event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { - dev_err(&d40c->chan.dev->device, - "[%s] Unknown direction\n", __func__); + chan_err(d40c, "Unknown direction\n"); goto _exit; } @@ -1625,8 +1611,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Unallocated channel.\n", __func__); + chan_err(d40c, "Unallocated channel.\n"); return ERR_PTR(-EINVAL); } @@ -1640,8 +1625,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); goto err; } @@ -1651,8 +1635,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1671,8 +1654,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width); } else { if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1758,9 +1740,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) if (!d40c->configured) { err = d40_config_memcpy(d40c); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to configure memcpy channel\n", - __func__); + chan_err(d40c, "Failed to configure memcpy channel\n"); goto fail; } } @@ -1768,8 +1748,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) err = d40_allocate_channel(d40c); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to allocate channel\n", __func__); + chan_err(d40c, "Failed to allocate channel\n"); goto fail; } @@ -1810,8 +1789,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot free unallocated channel\n", __func__); + chan_err(d40c, "Cannot free unallocated channel\n"); return; } @@ -1821,8 +1799,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) err = d40_free_dma(d40c); if (err) - dev_err(&d40c->chan.dev->device, - "[%s] Failed to free channel\n", __func__); + chan_err(d40c, "Failed to free channel\n"); spin_unlock_irqrestore(&d40c->lock, flags); } @@ -1838,8 +1815,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated.\n", __func__); + chan_err(d40c, "Channel is not allocated.\n"); return ERR_PTR(-EINVAL); } @@ -1847,8 +1823,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40d = d40_desc_get(d40c); if (d40d == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Descriptor is NULL\n", __func__); + chan_err(d40c, "Descriptor is NULL\n"); goto err; } @@ -1857,8 +1832,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); goto err; } @@ -1870,8 +1844,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } d40d->lli_current = 0; @@ -1897,8 +1870,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, } else { if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); goto err; } @@ -1966,14 +1938,12 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); return -EINVAL; } if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2022,14 +1992,12 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, d40c->dma_cfg.src_info.data_width, d40c->dma_cfg.dst_info.data_width); if (d40d->lli_len < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Unaligned size\n", __func__); + chan_err(d40c, "Unaligned size\n"); return -EINVAL; } if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { - dev_err(&d40c->chan.dev->device, - "[%s] Out of memory\n", __func__); + chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2092,8 +2060,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, int err; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot prepare unallocated channel\n", __func__); + chan_err(d40c, "Cannot prepare unallocated channel\n"); return ERR_PTR(-EINVAL); } @@ -2110,9 +2077,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, direction, dma_flags); if (err) { - dev_err(&d40c->chan.dev->device, - "[%s] Failed to prepare %s slave sg job: %d\n", - __func__, + chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); goto err; } @@ -2143,9 +2108,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, int ret; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Cannot read status of unallocated channel\n", - __func__); + chan_err(d40c, "Cannot read status of unallocated channel\n"); return -EINVAL; } @@ -2169,8 +2132,7 @@ static void d40_issue_pending(struct dma_chan *chan) unsigned long flags; if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated!\n", __func__); + chan_err(d40c, "Channel is not allocated!\n"); return; } @@ -2321,8 +2283,7 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); if (d40c->phy_chan == NULL) { - dev_err(&d40c->chan.dev->device, - "[%s] Channel is not allocated!\n", __func__); + chan_err(d40c, "Channel is not allocated!\n"); return -EINVAL; } @@ -2404,9 +2365,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_slave); if (err) { - dev_err(base->dev, - "[%s] Failed to register slave channels\n", - __func__); + d40_err(base->dev, "Failed to register slave channels\n"); goto failure1; } @@ -2435,9 +2394,8 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_memcpy); if (err) { - dev_err(base->dev, - "[%s] Failed to regsiter memcpy only channels\n", - __func__); + d40_err(base->dev, + "Failed to regsiter memcpy only channels\n"); goto failure2; } @@ -2462,9 +2420,8 @@ static int __init d40_dmaengine_init(struct d40_base *base, err = dma_async_device_register(&base->dma_both); if (err) { - dev_err(base->dev, - "[%s] Failed to register logical and physical capable channels\n", - __func__); + d40_err(base->dev, + "Failed to register logical and physical capable channels\n"); goto failure3; } return 0; @@ -2566,8 +2523,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "[%s] No matching clock found\n", - __func__); + d40_err(&pdev->dev, "No matching clock found\n"); goto failure; } @@ -2590,9 +2546,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { if (dma_id_regs[i].val != readl(virtbase + dma_id_regs[i].reg)) { - dev_err(&pdev->dev, - "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", - __func__, + d40_err(&pdev->dev, + "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", dma_id_regs[i].val, dma_id_regs[i].reg, readl(virtbase + dma_id_regs[i].reg)); @@ -2605,9 +2560,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != D40_HW_DESIGNER) { - dev_err(&pdev->dev, - "[%s] Unknown designer! Got %x wanted %x\n", - __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, + d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", + val & D40_DREG_PERIPHID2_DESIGNER_MASK, D40_HW_DESIGNER); goto failure; } @@ -2637,7 +2591,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { - dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); + d40_err(&pdev->dev, "Out of memory\n"); goto failure; } @@ -2809,9 +2763,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) base->lcla_pool.pages); if (!page_list[i]) { - dev_err(base->dev, - "[%s] Failed to allocate %d pages.\n", - __func__, base->lcla_pool.pages); + d40_err(base->dev, "Failed to allocate %d pages.\n", + base->lcla_pool.pages); for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); @@ -2881,9 +2834,7 @@ static int __init d40_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); if (!res) { ret = -ENOENT; - dev_err(&pdev->dev, - "[%s] No \"lcpa\" memory resource\n", - __func__); + d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); goto failure; } base->lcpa_size = resource_size(res); @@ -2892,9 +2843,9 @@ static int __init d40_probe(struct platform_device *pdev) if (request_mem_region(res->start, resource_size(res), D40_NAME " I/O lcpa") == NULL) { ret = -EBUSY; - dev_err(&pdev->dev, - "[%s] Failed to request LCPA region 0x%x-0x%x\n", - __func__, res->start, res->end); + d40_err(&pdev->dev, + "Failed to request LCPA region 0x%x-0x%x\n", + res->start, res->end); goto failure; } @@ -2910,16 +2861,13 @@ static int __init d40_probe(struct platform_device *pdev) base->lcpa_base = ioremap(res->start, resource_size(res)); if (!base->lcpa_base) { ret = -ENOMEM; - dev_err(&pdev->dev, - "[%s] Failed to ioremap LCPA region\n", - __func__); + d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); goto failure; } ret = d40_lcla_allocate(base); if (ret) { - dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", - __func__); + d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); goto failure; } @@ -2928,9 +2876,8 @@ static int __init d40_probe(struct platform_device *pdev) base->irq = platform_get_irq(pdev, 0); ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); - if (ret) { - dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); + d40_err(&pdev->dev, "No IRQ defined\n"); goto failure; } @@ -2973,7 +2920,7 @@ failure: kfree(base); } - dev_err(&pdev->dev, "[%s] probe failed\n", __func__); + d40_err(&pdev->dev, "probe failed\n"); return ret; } -- cgit v1.2.3 From 4d5949009e585b2bcf09dc4de625351f987a1e6d Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:10 +0100 Subject: dma40: fix comment to refer to SOCs rather than boards And add DB8500v2 information. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ab3ca6af7dbd..0faae662ad15 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2497,9 +2497,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, /* * D40_DREG_PERIPHID2 Depends on HW revision: - * MOP500/HREF ED has 0x0008, + * DB8500ed has 0x0008, * ? has 0x0018, - * HREF V1 has 0x0028 + * DB8500v1 has 0x0028 + * DB8500v2 has 0x0038 */ { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, -- cgit v1.2.3 From ac2c0a387194f45c759572b3462d1bf92ec92f00 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:11 +0100 Subject: dma40: allow realtime and priority for event lines DB8500v2's DMA40 (revision 3) allows setting event lines as high priority and real time. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- arch/arm/plat-nomadik/include/plat/ste_dma40.h | 3 +++ drivers/dma/ste_dma40.c | 34 ++++++++++++++++++++++++++ drivers/dma/ste_dma40_ll.h | 16 ++++++++++++ 3 files changed, 53 insertions(+) (limited to 'drivers') diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 4d6dd4c39b75..8358f857a29e 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -104,6 +104,8 @@ struct stedma40_half_channel_info { * * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @high_priority: true if high-priority + * @realtime: true if realtime mode is to be enabled. Only available on DMA40 + * version 3+, i.e DB8500v2+ * @mode: channel mode: physical, logical, or operation * @mode_opt: options for the chosen channel mode * @src_dev_type: Src device type @@ -119,6 +121,7 @@ struct stedma40_half_channel_info { struct stedma40_chan_cfg { enum stedma40_xfer_dir dir; bool high_priority; + bool realtime; enum stedma40_mode mode; enum stedma40_mode_opt mode_opt; int src_dev_type; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0faae662ad15..ce5516221581 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1724,6 +1724,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data) } EXPORT_SYMBOL(stedma40_filter); +static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) +{ + bool realtime = d40c->dma_cfg.realtime; + bool highprio = d40c->dma_cfg.high_priority; + u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; + u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; + u32 event = D40_TYPE_TO_EVENT(dev_type); + u32 group = D40_TYPE_TO_GROUP(dev_type); + u32 bit = 1 << event; + + /* Destination event lines are stored in the upper halfword */ + if (!src) + bit <<= 16; + + writel(bit, d40c->base->virtbase + prioreg + group * 4); + writel(bit, d40c->base->virtbase + rtreg + group * 4); +} + +static void d40_set_prio_realtime(struct d40_chan *d40c) +{ + if (d40c->base->rev < 3) + return; + + if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); + + if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) + __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); +} + /* DMA ENGINE functions */ static int d40_alloc_chan_resources(struct dma_chan *chan) { @@ -1756,6 +1788,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg, chan_is_logical(d40c)); + d40_set_prio_realtime(d40c); + if (chan_is_logical(d40c)) { d40_log_cfg(&d40c->dma_cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9cc43495bea2..e93f394187a3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -163,6 +163,22 @@ #define D40_DREG_LCEIS1 0x0B4 #define D40_DREG_LCEIS2 0x0B8 #define D40_DREG_LCEIS3 0x0BC +#define D40_DREG_PSEG1 0x110 +#define D40_DREG_PSEG2 0x114 +#define D40_DREG_PSEG3 0x118 +#define D40_DREG_PSEG4 0x11C +#define D40_DREG_PCEG1 0x120 +#define D40_DREG_PCEG2 0x124 +#define D40_DREG_PCEG3 0x128 +#define D40_DREG_PCEG4 0x12C +#define D40_DREG_RSEG1 0x130 +#define D40_DREG_RSEG2 0x134 +#define D40_DREG_RSEG3 0x138 +#define D40_DREG_RSEG4 0x13C +#define D40_DREG_RCEG1 0x140 +#define D40_DREG_RCEG2 0x144 +#define D40_DREG_RCEG3 0x148 +#define D40_DREG_RCEG4 0x14C #define D40_DREG_STFU 0xFC8 #define D40_DREG_ICFG 0xFCC #define D40_DREG_PERIPHID0 0xFE0 -- cgit v1.2.3 From 594ece4dc0e8ac222945ef2048430600ad3c7644 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:12 +0100 Subject: dma40: remove unnecessary ALIGN()s ALIGN(x * y, y) == x * y ALIGN(aligned + x * y, y) == aligned + x * y Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index ce5516221581..d32a9ac86084 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -344,7 +344,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); d40d->lli_pool.base = NULL; } else { - d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); + d40d->lli_pool.size = lli_len * 2 * align; base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); d40d->lli_pool.base = base; @@ -356,13 +356,11 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, if (is_log) { d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, align); - d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, - align); + d40d->lli_log.dst = d40d->lli_log.src + lli_len; } else { d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, align); - d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, - align); + d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; } return 0; -- cgit v1.2.3 From 7fe8be5a74eb058b0b48970caef83e0215f55944 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:13 +0100 Subject: dma40: use sg_dma_address() instead of sg_phys() The address to use for DMA should be taken from sg_dma_address() and not sg_phys(). Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 0b096a38322d..6f03f580c492 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -272,7 +272,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, if (target) dst = target; else - dst = sg_phys(current_sg); + dst = sg_dma_address(current_sg); l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); @@ -416,7 +416,7 @@ int d40_log_sg_to_dev(struct scatterlist *sg, if (direction == DMA_TO_DEVICE) { lli_src = d40_log_buf_to_lli(lli_src, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp->lcsp1, src_data_width, dst_data_width, @@ -431,7 +431,7 @@ int d40_log_sg_to_dev(struct scatterlist *sg, } else { lli_dst = d40_log_buf_to_lli(lli_dst, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp->lcsp3, dst_data_width, src_data_width, @@ -491,7 +491,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, for_each_sg(sg, current_sg, sg_len, i) { total_size += sg_dma_len(current_sg); lli = d40_log_buf_to_lli(lli, - sg_phys(current_sg), + sg_dma_address(current_sg), sg_dma_len(current_sg), lcsp13, data_width1, data_width2, true); -- cgit v1.2.3 From 026cbc424a162e495ad29e91d354fb8fc2da2657 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:14 +0100 Subject: dma40: fix DMA API usage for LCLA Map the buffer once and use dma_sync*() appropriately instead of mapping the buffer over and over without unmapping it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d32a9ac86084..f08e5c49c5d2 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -128,6 +128,7 @@ struct d40_desc { */ struct d40_lcla_pool { void *base; + dma_addr_t dma_addr; void *base_unaligned; int pages; spinlock_t lock; @@ -504,25 +505,25 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) d40d->lli_current++; for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { - struct d40_log_lli *lcla; + unsigned int lcla_offset = d40c->phy_chan->num * 1024 + + 8 * curr_lcla * 2; + struct d40_lcla_pool *pool = &d40c->base->lcla_pool; + struct d40_log_lli *lcla = pool->base + lcla_offset; if (d40d->lli_current + 1 < d40d->lli_len) next_lcla = d40_lcla_alloc_one(d40c, d40d); else next_lcla = -EINVAL; - lcla = d40c->base->lcla_pool.base + - d40c->phy_chan->num * 1024 + - 8 * curr_lcla * 2; - d40_log_lli_lcla_write(lcla, &d40d->lli_log.dst[d40d->lli_current], &d40d->lli_log.src[d40d->lli_current], next_lcla); - (void) dma_map_single(d40c->base->dev, lcla, - 2 * sizeof(struct d40_log_lli), - DMA_TO_DEVICE); + dma_sync_single_range_for_device(d40c->base->dev, + pool->dma_addr, lcla_offset, + 2 * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); curr_lcla = next_lcla; @@ -2771,6 +2772,7 @@ static void __init d40_hw_init(struct d40_base *base) static int __init d40_lcla_allocate(struct d40_base *base) { + struct d40_lcla_pool *pool = &base->lcla_pool; unsigned long *page_list; int i, j; int ret = 0; @@ -2835,6 +2837,15 @@ static int __init d40_lcla_allocate(struct d40_base *base) LCLA_ALIGNMENT); } + pool->dma_addr = dma_map_single(base->dev, pool->base, + SZ_1K * base->num_phy_chans, + DMA_TO_DEVICE); + if (dma_mapping_error(base->dev, pool->dma_addr)) { + pool->dma_addr = 0; + ret = -ENOMEM; + goto failure; + } + writel(virt_to_phys(base->lcla_pool.base), base->virtbase + D40_DREG_LCLA); failure: @@ -2929,6 +2940,12 @@ failure: kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); + + if (base->lcla_pool.dma_addr) + dma_unmap_single(base->dev, base->lcla_pool.dma_addr, + SZ_1K * base->num_phy_chans, + DMA_TO_DEVICE); + if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) free_pages((unsigned long)base->lcla_pool.base, base->lcla_pool.pages); -- cgit v1.2.3 From b00f938c8cf5ba8e7a692519548a256aa3ea1203 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:15 +0100 Subject: dma40: fix DMA API usage for LLIs Map and unmap the LLIs and use dma_sync_single_for_device() appropriately instead of mapping and never unmapping them. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 58 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index f08e5c49c5d2..b5856864d48d 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -68,6 +68,7 @@ enum d40_command { * @base: Pointer to memory area when the pre_alloc_lli's are not large * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if * pre_alloc_lli is used. + * @dma_addr: DMA address, if mapped * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. * @pre_alloc_lli: Pre allocated area for the most common case of transfers, * one buffer to one buffer. @@ -75,6 +76,7 @@ enum d40_command { struct d40_lli_pool { void *base; int size; + dma_addr_t dma_addr; /* Space for dst and src, plus an extra for padding */ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; }; @@ -329,7 +331,7 @@ static void __iomem *chan_base(struct d40_chan *chan) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) -static int d40_pool_lli_alloc(struct d40_desc *d40d, +static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len, bool is_log) { u32 align; @@ -358,17 +360,36 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, align); d40d->lli_log.dst = d40d->lli_log.src + lli_len; + + d40d->lli_pool.dma_addr = 0; } else { d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, align); d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; + + d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, + d40d->lli_phy.src, + d40d->lli_pool.size, + DMA_TO_DEVICE); + + if (dma_mapping_error(d40c->base->dev, + d40d->lli_pool.dma_addr)) { + kfree(d40d->lli_pool.base); + d40d->lli_pool.base = NULL; + d40d->lli_pool.dma_addr = 0; + return -ENOMEM; + } } return 0; } -static void d40_pool_lli_free(struct d40_desc *d40d) +static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) { + if (d40d->lli_pool.dma_addr) + dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); + kfree(d40d->lli_pool.base); d40d->lli_pool.base = NULL; d40d->lli_pool.size = 0; @@ -454,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) list_for_each_entry_safe(d, _d, &d40c->client, node) if (async_tx_test_ack(&d->txd)) { - d40_pool_lli_free(d); + d40_pool_lli_free(d40c, d); d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); @@ -474,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) { + d40_pool_lli_free(d40c, d40d); d40_lcla_free_all(d40c, d40d); kmem_cache_free(d40c->base->desc_slab, d40d); } @@ -1063,7 +1085,7 @@ static void dma_tasklet(unsigned long data) callback_param = d40d->txd.callback_param; if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40d); + d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } else { @@ -1459,7 +1481,7 @@ static int d40_free_dma(struct d40_chan *d40c) /* Release client owned descriptors */ if (!list_empty(&d40c->client)) list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d); + d40_pool_lli_free(d40c, d); d40_desc_remove(d); d40_desc_free(d40c, d); } @@ -1633,7 +1655,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (chan_is_logical(d40c)) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1652,7 +1674,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.data_width); } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1683,8 +1705,9 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, if (res < 0) goto err; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, + d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); } dma_async_tx_descriptor_init(&d40d->txd, chan); @@ -1876,7 +1899,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, if (chan_is_logical(d40c)) { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1902,7 +1925,7 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, } else { - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); goto err; } @@ -1931,8 +1954,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, false) == NULL) goto err; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, + d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); } spin_unlock_irqrestore(&d40c->lock, flags); @@ -1975,7 +1999,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, return -EINVAL; } - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2029,7 +2053,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, return -EINVAL; } - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { + if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } @@ -2075,8 +2099,8 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, if (res < 0) return res; - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, - d40d->lli_pool.size, DMA_TO_DEVICE); + dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr, + d40d->lli_pool.size, DMA_TO_DEVICE); return 0; } -- cgit v1.2.3 From d924abad7fa9a78d70b20552bf27fe4f7a19a2fb Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:16 +0100 Subject: dma40: remove unnecessary casts Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b5856864d48d..bd72269dac08 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -357,14 +357,12 @@ static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, } if (is_log) { - d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, - align); + d40d->lli_log.src = PTR_ALIGN(base, align); d40d->lli_log.dst = d40d->lli_log.src + lli_len; d40d->lli_pool.dma_addr = 0; } else { - d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, - align); + d40d->lli_phy.src = PTR_ALIGN(base, align); d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, -- cgit v1.2.3 From 95944c6ef5b5214508273992416adb836b63c73f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:17 +0100 Subject: dma40: implement prep_memcpy as a wrapper around memcpy_sg To simplify the code. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 107 ++++----------------------------------------- drivers/dma/ste_dma40_ll.c | 2 +- drivers/dma/ste_dma40_ll.h | 11 ----- 3 files changed, 10 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bd72269dac08..0a20179349b0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1863,108 +1863,19 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, size_t size, unsigned long dma_flags) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); - unsigned long flags; - - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Channel is not allocated.\n"); - return ERR_PTR(-EINVAL); - } - - spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); - - if (d40d == NULL) { - chan_err(d40c, "Descriptor is NULL\n"); - goto err; - } - - d40d->txd.flags = dma_flags; - d40d->lli_len = d40_size_2_dmalen(size, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - goto err; - } - - - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - - if (chan_is_logical(d40c)) { - - if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - d40d->lli_current = 0; - - if (d40_log_buf_to_lli(d40d->lli_log.src, - src, - size, - d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - true) == NULL) - goto err; - - if (d40_log_buf_to_lli(d40d->lli_log.dst, - dst, - size, - d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - true) == NULL) - goto err; - - } else { - - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - - if (d40_phy_buf_to_lli(d40d->lli_phy.src, - src, - size, - d40c->dma_cfg.src_info.psize, - 0, - d40c->src_def_cfg, - true, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - false) == NULL) - goto err; + struct scatterlist dst_sg; + struct scatterlist src_sg; - if (d40_phy_buf_to_lli(d40d->lli_phy.dst, - dst, - size, - d40c->dma_cfg.dst_info.psize, - 0, - d40c->dst_def_cfg, - true, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - false) == NULL) - goto err; + sg_init_table(&dst_sg, 1); + sg_init_table(&src_sg, 1); - dma_sync_single_for_device(d40c->base->dev, - d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); - } + sg_dma_address(&dst_sg) = dst; + sg_dma_address(&src_sg) = src; - spin_unlock_irqrestore(&d40c->lock, flags); - return &d40d->txd; + sg_dma_len(&dst_sg) = size; + sg_dma_len(&src_sg) = size; -err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); - return NULL; + return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags); } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 6f03f580c492..552c5972c75e 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -198,7 +198,7 @@ static int d40_seg_size(int size, int data_width1, int data_width2) return seg_max; } -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, +static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, int psize, diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index e93f394187a3..a5d71714ebf7 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -312,17 +312,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, u32 data_width2, int psize); -struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t data, - u32 data_size, - int psize, - dma_addr_t next_lli, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device); - void d40_phy_lli_write(void __iomem *virtbase, u32 phy_chan_num, struct d40_phy_lli *lli_dst, -- cgit v1.2.3 From 5f81158f90db4bc8a79e91736aa3afce8e590e46 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:18 +0100 Subject: dma40: combine desc init functions The desc init code can be shared between the mem and slave prep routines. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 76 +++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0a20179349b0..5259a9832435 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1617,6 +1617,35 @@ static u32 stedma40_residue(struct dma_chan *chan) return bytes_left; } +static struct d40_desc * +d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, + unsigned int sg_len, unsigned long dma_flags) +{ + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct d40_desc *desc; + + desc = d40_desc_get(chan); + if (!desc) + return NULL; + + desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, + cfg->dst_info.data_width); + if (desc->lli_len < 0) { + chan_err(chan, "Unaligned size\n"); + d40_desc_free(chan, desc); + + return NULL; + } + + desc->lli_current = 0; + desc->txd.flags = dma_flags; + desc->txd.tx_submit = d40_tx_submit; + + dma_async_tx_descriptor_init(&desc->txd, &chan->chan); + + return desc; +} + struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, @@ -1635,22 +1664,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, } spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); - if (d40d == NULL) + d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags); + if (!d40d) goto err; - d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - goto err; - } - - d40d->lli_current = 0; - d40d->txd.flags = dma_flags; - if (chan_is_logical(d40c)) { if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { @@ -1708,10 +1726,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40d->lli_pool.size, DMA_TO_DEVICE); } - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; @@ -1900,21 +1914,11 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, dma_addr_t dev_addr = 0; int total_size; - d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - return -EINVAL; - } - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } - d40d->lli_current = 0; - if (direction == DMA_FROM_DEVICE) if (d40c->runtime_addr) dev_addr = d40c->runtime_addr; @@ -1954,21 +1958,11 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, dma_addr_t dst_dev_addr; int res; - d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - if (d40d->lli_len < 0) { - chan_err(d40c, "Unaligned size\n"); - return -EINVAL; - } - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { chan_err(d40c, "Out of memory\n"); return -ENOMEM; } - d40d->lli_current = 0; - if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; if (d40c->runtime_addr) @@ -2031,8 +2025,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, } spin_lock_irqsave(&d40c->lock, flags); - d40d = d40_desc_get(d40c); + d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); if (d40d == NULL) goto err; @@ -2048,12 +2042,6 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, goto err; } - d40d->txd.flags = dma_flags; - - dma_async_tx_descriptor_init(&d40d->txd, chan); - - d40d->txd.tx_submit = d40_tx_submit; - spin_unlock_irqrestore(&d40c->lock, flags); return &d40d->txd; -- cgit v1.2.3 From dbd887880320b6a56811bb38ff4ad888728c3a91 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:19 +0100 Subject: dma40: combine duplicated d40_pool_lli_alloc() calls Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5259a9832435..495f9eb0a4b6 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -332,8 +332,9 @@ static void __iomem *chan_base(struct d40_chan *chan) d40_err(chan2dev(d40c), format, ## arg) static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, - int lli_len, bool is_log) + int lli_len) { + bool is_log = chan_is_logical(d40c); u32 align; void *base; @@ -1623,6 +1624,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, { struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct d40_desc *desc; + int ret; desc = d40_desc_get(chan); if (!desc) @@ -1632,11 +1634,16 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, cfg->dst_info.data_width); if (desc->lli_len < 0) { chan_err(chan, "Unaligned size\n"); - d40_desc_free(chan, desc); + goto err; + } - return NULL; + ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); + if (ret < 0) { + chan_err(chan, "Could not allocate lli\n"); + goto err; } + desc->lli_current = 0; desc->txd.flags = dma_flags; desc->txd.tx_submit = d40_tx_submit; @@ -1644,6 +1651,10 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, dma_async_tx_descriptor_init(&desc->txd, &chan->chan); return desc; + +err: + d40_desc_free(chan, desc); + return NULL; } struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, @@ -1670,12 +1681,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, goto err; if (chan_is_logical(d40c)) { - - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - (void) d40_log_sg_to_lli(sgl_src, sgl_len, d40d->lli_log.src, @@ -1690,11 +1695,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, d40c->dma_cfg.dst_info.data_width, d40c->dma_cfg.src_info.data_width); } else { - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - goto err; - } - res = d40_phy_sg_to_lli(sgl_src, sgl_len, 0, @@ -1914,11 +1914,6 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, dma_addr_t dev_addr = 0; int total_size; - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { - chan_err(d40c, "Out of memory\n"); - return -ENOMEM; - } - if (direction == DMA_FROM_DEVICE) if (d40c->runtime_addr) dev_addr = d40c->runtime_addr; @@ -1958,11 +1953,6 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, dma_addr_t dst_dev_addr; int res; - if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { - chan_err(d40c, "Out of memory\n"); - return -ENOMEM; - } - if (direction == DMA_FROM_DEVICE) { dst_dev_addr = 0; if (d40c->runtime_addr) -- cgit v1.2.3 From 00ac0341486ffe212f45ff1fe0780d12a36fffde Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:20 +0100 Subject: dma40: remove duplicated dev addr code Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 66 ++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 495f9eb0a4b6..65b5aad1fc4c 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1909,25 +1909,10 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, - unsigned long dma_flags) + dma_addr_t dev_addr) { - dma_addr_t dev_addr = 0; int total_size; - if (direction == DMA_FROM_DEVICE) - if (d40c->runtime_addr) - dev_addr = d40c->runtime_addr; - else - dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - else if (direction == DMA_TO_DEVICE) - if (d40c->runtime_addr) - dev_addr = d40c->runtime_addr; - else - dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - - else - return -EINVAL; - total_size = d40_log_sg_to_dev(sgl, sg_len, &d40d->lli_log, &d40c->log_def, @@ -1947,27 +1932,12 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, struct scatterlist *sgl, unsigned int sgl_len, enum dma_data_direction direction, - unsigned long dma_flags) + dma_addr_t dev_addr) { - dma_addr_t src_dev_addr; - dma_addr_t dst_dev_addr; + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; int res; - if (direction == DMA_FROM_DEVICE) { - dst_dev_addr = 0; - if (d40c->runtime_addr) - src_dev_addr = d40c->runtime_addr; - else - src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; - } else if (direction == DMA_TO_DEVICE) { - if (d40c->runtime_addr) - dst_dev_addr = d40c->runtime_addr; - else - dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; - src_dev_addr = 0; - } else - return -EINVAL; - res = d40_phy_sg_to_lli(sgl, sgl_len, src_dev_addr, @@ -1997,6 +1967,24 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, return 0; } +static dma_addr_t +d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) +{ + struct stedma40_platform_data *plat = chan->base->plat_data; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + dma_addr_t addr; + + if (chan->runtime_addr) + return chan->runtime_addr; + + if (direction == DMA_FROM_DEVICE) + addr = plat->dev_rx[cfg->src_dev_type]; + else if (direction == DMA_TO_DEVICE) + addr = plat->dev_tx[cfg->dst_dev_type]; + + return addr; +} + static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, @@ -2006,6 +1994,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + dma_addr_t dev_addr; unsigned long flags; int err; @@ -2014,18 +2003,23 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return ERR_PTR(-EINVAL); } + if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) + return NULL; + spin_lock_irqsave(&d40c->lock, flags); d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); if (d40d == NULL) goto err; + dev_addr = d40_get_dev_addr(d40c, direction); + if (chan_is_logical(d40c)) err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, dma_flags); + direction, dev_addr); else err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, dma_flags); + direction, dev_addr); if (err) { chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); -- cgit v1.2.3 From 3e3a0763e78b520dac5fde569c42664863336d94 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:21 +0100 Subject: dma40: combine mem and slave sg-to-lli functions Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 194 +++++++++++++++++++++--------------------------- 1 file changed, 84 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 65b5aad1fc4c..8c6abc23db80 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1618,6 +1618,81 @@ static u32 stedma40_residue(struct dma_chan *chan) return bytes_left; } +static int +d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, + struct scatterlist *sg_src, struct scatterlist *sg_dst, + unsigned int sg_len, enum dma_data_direction direction, + dma_addr_t dev_addr) +{ + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct stedma40_half_channel_info *src_info = &cfg->src_info; + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + + if (direction == DMA_NONE) { + /* memcpy */ + (void) d40_log_sg_to_lli(sg_src, sg_len, + desc->lli_log.src, + chan->log_def.lcsp1, + src_info->data_width, + dst_info->data_width); + + (void) d40_log_sg_to_lli(sg_dst, sg_len, + desc->lli_log.dst, + chan->log_def.lcsp3, + dst_info->data_width, + src_info->data_width); + } else { + unsigned int total_size; + + total_size = d40_log_sg_to_dev(sg_src, sg_len, + &desc->lli_log, + &chan->log_def, + src_info->data_width, + dst_info->data_width, + direction, dev_addr); + if (total_size < 0) + return -EINVAL; + } + + return 0; +} + +static int +d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, + struct scatterlist *sg_src, struct scatterlist *sg_dst, + unsigned int sg_len, enum dma_data_direction direction, + dma_addr_t dev_addr) +{ + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct stedma40_half_channel_info *src_info = &cfg->src_info; + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + int ret; + + ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, + desc->lli_phy.src, + virt_to_phys(desc->lli_phy.src), + chan->src_def_cfg, + src_info->data_width, + dst_info->data_width, + src_info->psize); + + ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, + desc->lli_phy.dst, + virt_to_phys(desc->lli_phy.dst), + chan->dst_def_cfg, + dst_info->data_width, + src_info->data_width, + dst_info->psize); + + dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, + desc->lli_pool.size, DMA_TO_DEVICE); + + return ret < 0 ? ret : 0; +} + + static struct d40_desc * d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, unsigned int sg_len, unsigned long dma_flags) @@ -1663,7 +1738,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, unsigned int sgl_len, unsigned long dma_flags) { - int res; struct d40_desc *d40d; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); @@ -1681,49 +1755,11 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, goto err; if (chan_is_logical(d40c)) { - (void) d40_log_sg_to_lli(sgl_src, - sgl_len, - d40d->lli_log.src, - d40c->log_def.lcsp1, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width); - - (void) d40_log_sg_to_lli(sgl_dst, - sgl_len, - d40d->lli_log.dst, - d40c->log_def.lcsp3, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width); + d40_prep_sg_log(d40c, d40d, sgl_src, sgl_dst, + sgl_len, DMA_NONE, 0); } else { - res = d40_phy_sg_to_lli(sgl_src, - sgl_len, - 0, - d40d->lli_phy.src, - virt_to_phys(d40d->lli_phy.src), - d40c->src_def_cfg, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.psize); - - if (res < 0) - goto err; - - res = d40_phy_sg_to_lli(sgl_dst, - sgl_len, - 0, - d40d->lli_phy.dst, - virt_to_phys(d40d->lli_phy.dst), - d40c->dst_def_cfg, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.psize); - - if (res < 0) - goto err; - - dma_sync_single_for_device(d40c->base->dev, - d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); + d40_prep_sg_phy(d40c, d40d, sgl_src, sgl_dst, + sgl_len, DMA_NONE, 0); } spin_unlock_irqrestore(&d40c->lock, flags); @@ -1904,69 +1940,6 @@ d40_prep_sg(struct dma_chan *chan, return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); } -static int d40_prep_slave_sg_log(struct d40_desc *d40d, - struct d40_chan *d40c, - struct scatterlist *sgl, - unsigned int sg_len, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - int total_size; - - total_size = d40_log_sg_to_dev(sgl, sg_len, - &d40d->lli_log, - &d40c->log_def, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - direction, - dev_addr); - - if (total_size < 0) - return -EINVAL; - - return 0; -} - -static int d40_prep_slave_sg_phy(struct d40_desc *d40d, - struct d40_chan *d40c, - struct scatterlist *sgl, - unsigned int sgl_len, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; - int res; - - res = d40_phy_sg_to_lli(sgl, - sgl_len, - src_dev_addr, - d40d->lli_phy.src, - virt_to_phys(d40d->lli_phy.src), - d40c->src_def_cfg, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.psize); - if (res < 0) - return res; - - res = d40_phy_sg_to_lli(sgl, - sgl_len, - dst_dev_addr, - d40d->lli_phy.dst, - virt_to_phys(d40d->lli_phy.dst), - d40c->dst_def_cfg, - d40c->dma_cfg.dst_info.data_width, - d40c->dma_cfg.src_info.data_width, - d40c->dma_cfg.dst_info.psize); - if (res < 0) - return res; - - dma_sync_single_for_device(d40c->base->dev, d40d->lli_pool.dma_addr, - d40d->lli_pool.size, DMA_TO_DEVICE); - return 0; -} - static dma_addr_t d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) { @@ -2015,11 +1988,12 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, dev_addr = d40_get_dev_addr(d40c, direction); if (chan_is_logical(d40c)) - err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, - direction, dev_addr); + err = d40_prep_sg_log(d40c, d40d, sgl, NULL, + sg_len, direction, dev_addr); else - err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, - direction, dev_addr); + err = d40_prep_sg_phy(d40c, d40d, sgl, NULL, + sg_len, direction, dev_addr); + if (err) { chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", chan_is_logical(d40c) ? "log" : "phy", err); -- cgit v1.2.3 From 10a946b3a4e1ad665a81981cbe33c3d3903cd7da Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:22 +0100 Subject: dma40: remove export of stedma40_memcpy_sg The dmaengine framework has the API for this now. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- arch/arm/plat-nomadik/include/plat/ste_dma40.h | 19 ------------------- drivers/dma/ste_dma40.c | 2 +- 2 files changed, 1 insertion(+), 20 deletions(-) (limited to 'drivers') diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 8358f857a29e..c44886062f8e 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -171,25 +171,6 @@ struct stedma40_platform_data { bool stedma40_filter(struct dma_chan *chan, void *data); -/** - * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from - * scattergatter lists. - * - * @chan: dmaengine handle - * @sgl_dst: Destination scatter list - * @sgl_src: Source scatter list - * @sgl_len: The length of each scatterlist. Both lists must be of equal length - * and each element must match the corresponding element in the other scatter - * list. - * @flags: is actually enum dma_ctrl_flags. See dmaengine.h - */ - -struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, - struct scatterlist *sgl_dst, - struct scatterlist *sgl_src, - unsigned int sgl_len, - unsigned long flags); - /** * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave * (=device) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8c6abc23db80..0f5d61720ab8 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1733,6 +1733,7 @@ err: } struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, +static struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct scatterlist *sgl_dst, struct scatterlist *sgl_src, unsigned int sgl_len, @@ -1771,7 +1772,6 @@ err: spin_unlock_irqrestore(&d40c->lock, flags); return NULL; } -EXPORT_SYMBOL(stedma40_memcpy_sg); bool stedma40_filter(struct dma_chan *chan, void *data) { -- cgit v1.2.3 From cade1d30b2e071a687011c2a38c03ed7187ec501 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:23 +0100 Subject: dma40: combine mem and slave prep_sg functions Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 156 +++++++++++++++++++----------------------------- 1 file changed, 62 insertions(+), 94 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 0f5d61720ab8..4e9d6c5a7134 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1732,44 +1732,70 @@ err: return NULL; } -struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, -static struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, - struct scatterlist *sgl_dst, - struct scatterlist *sgl_src, - unsigned int sgl_len, - unsigned long dma_flags) +static dma_addr_t +d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); + struct stedma40_platform_data *plat = chan->base->plat_data; + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + dma_addr_t addr; + + if (chan->runtime_addr) + return chan->runtime_addr; + + if (direction == DMA_FROM_DEVICE) + addr = plat->dev_rx[cfg->src_dev_type]; + else if (direction == DMA_TO_DEVICE) + addr = plat->dev_tx[cfg->dst_dev_type]; + + return addr; +} + +static struct dma_async_tx_descriptor * +d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, + struct scatterlist *sg_dst, unsigned int sg_len, + enum dma_data_direction direction, unsigned long dma_flags) +{ + struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); + dma_addr_t dev_addr = 0; + struct d40_desc *desc; unsigned long flags; + int ret; - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Unallocated channel.\n"); - return ERR_PTR(-EINVAL); + if (!chan->phy_chan) { + chan_err(chan, "Cannot prepare unallocated channel\n"); + return NULL; } - spin_lock_irqsave(&d40c->lock, flags); + spin_lock_irqsave(&chan->lock, flags); - d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags); - if (!d40d) + desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); + if (desc == NULL) goto err; - if (chan_is_logical(d40c)) { - d40_prep_sg_log(d40c, d40d, sgl_src, sgl_dst, - sgl_len, DMA_NONE, 0); - } else { - d40_prep_sg_phy(d40c, d40d, sgl_src, sgl_dst, - sgl_len, DMA_NONE, 0); + if (direction != DMA_NONE) + dev_addr = d40_get_dev_addr(chan, direction); + + if (chan_is_logical(chan)) + ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, + sg_len, direction, dev_addr); + else + ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, + sg_len, direction, dev_addr); + + if (ret) { + chan_err(chan, "Failed to prepare %s sg job: %d\n", + chan_is_logical(chan) ? "log" : "phy", ret); + goto err; } - spin_unlock_irqrestore(&d40c->lock, flags); + spin_unlock_irqrestore(&chan->lock, flags); + + return &desc->txd; - return &d40d->txd; err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); + if (desc) + d40_desc_free(chan, desc); + spin_unlock_irqrestore(&chan->lock, flags); return NULL; } @@ -1925,37 +1951,19 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, sg_dma_len(&dst_sg) = size; sg_dma_len(&src_sg) = size; - return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags); + return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor * -d40_prep_sg(struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long dma_flags) +d40_prep_memcpy_sg(struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long dma_flags) { if (dst_nents != src_nents) return NULL; - return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); -} - -static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) -{ - struct stedma40_platform_data *plat = chan->base->plat_data; - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr; - - if (chan->runtime_addr) - return chan->runtime_addr; - - if (direction == DMA_FROM_DEVICE) - addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_TO_DEVICE) - addr = plat->dev_tx[cfg->dst_dev_type]; - - return addr; + return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); } static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, @@ -1964,50 +1972,10 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, enum dma_data_direction direction, unsigned long dma_flags) { - struct d40_desc *d40d; - struct d40_chan *d40c = container_of(chan, struct d40_chan, - chan); - dma_addr_t dev_addr; - unsigned long flags; - int err; - - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Cannot prepare unallocated channel\n"); - return ERR_PTR(-EINVAL); - } - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) return NULL; - spin_lock_irqsave(&d40c->lock, flags); - - d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags); - if (d40d == NULL) - goto err; - - dev_addr = d40_get_dev_addr(d40c, direction); - - if (chan_is_logical(d40c)) - err = d40_prep_sg_log(d40c, d40d, sgl, NULL, - sg_len, direction, dev_addr); - else - err = d40_prep_sg_phy(d40c, d40d, sgl, NULL, - sg_len, direction, dev_addr); - - if (err) { - chan_err(d40c, "Failed to prepare %s slave sg job: %d\n", - chan_is_logical(d40c) ? "log" : "phy", err); - goto err; - } - - spin_unlock_irqrestore(&d40c->lock, flags); - return &d40d->txd; - -err: - if (d40d) - d40_desc_free(d40c, d40d); - spin_unlock_irqrestore(&d40c->lock, flags); - return NULL; + return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); } static enum dma_status d40_tx_status(struct dma_chan *chan, @@ -2267,7 +2235,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_slave.device_free_chan_resources = d40_free_chan_resources; base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; base->dma_slave.device_tx_status = d40_tx_status; base->dma_slave.device_issue_pending = d40_issue_pending; @@ -2291,7 +2259,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; base->dma_memcpy.device_tx_status = d40_tx_status; base->dma_memcpy.device_issue_pending = d40_issue_pending; @@ -2322,7 +2290,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_both.device_free_chan_resources = d40_free_chan_resources; base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; + base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; base->dma_both.device_tx_status = d40_tx_status; base->dma_both.device_issue_pending = d40_issue_pending; -- cgit v1.2.3 From 1c4b0927feab41346b0be971e0287aaf46eba8e0 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:24 +0100 Subject: dma40: move lli_load to main source file These register writes are better placed in the main source file rather than ll.c. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 22 ++++++++++++++++++---- drivers/dma/ste_dma40_ll.c | 26 -------------------------- drivers/dma/ste_dma40_ll.h | 5 ----- 3 files changed, 18 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4e9d6c5a7134..6a7a00d5d682 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -504,15 +504,29 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) list_add_tail(&desc->node, &d40c->active); } +static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) +{ + struct d40_phy_lli *lli_dst = desc->lli_phy.dst; + struct d40_phy_lli *lli_src = desc->lli_phy.src; + void __iomem *base = chan_base(chan); + + writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); + writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); + writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); + writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); + + writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); + writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); + writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); + writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); +} + static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) { int curr_lcla = -EINVAL, next_lcla; if (chan_is_physical(d40c)) { - d40_phy_lli_write(d40c->base->virtbase, - d40c->phy_chan->num, - d40d->lli_phy.dst, - d40d->lli_phy.src); + d40_phy_lli_load(d40c, d40d); d40d->lli_current = d40d->lli_len; } else { diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 552c5972c75e..fd7525162596 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -295,32 +295,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, } -void d40_phy_lli_write(void __iomem *virtbase, - u32 phy_chan_num, - struct d40_phy_lli *lli_dst, - struct d40_phy_lli *lli_src) -{ - - writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); - writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); - writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); - writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); - - writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); - writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); - writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); - writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); - -} - /* DMA logical lli operations */ static void d40_log_lli_link(struct d40_log_lli *lli_dst, diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index a5d71714ebf7..46578a661b28 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -312,11 +312,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, u32 data_width2, int psize); -void d40_phy_lli_write(void __iomem *virtbase, - u32 phy_chan_num, - struct d40_phy_lli *lli_dst, - struct d40_phy_lli *lli_src); - /* Logical channels */ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, -- cgit v1.2.3 From e24b36bdf873b4a64545fd66da13877214d235cf Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:25 +0100 Subject: dma40: combine duplicated code in log_sg_to_dev Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 52 ++++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index fd7525162596..fa6c3ab93fae 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -385,40 +385,34 @@ int d40_log_sg_to_dev(struct scatterlist *sg, struct d40_log_lli *lli_dst = lli->dst; for_each_sg(sg, current_sg, sg_len, i) { - total_size += sg_dma_len(current_sg); + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t src; + dma_addr_t dst; + + total_size += len; if (direction == DMA_TO_DEVICE) { - lli_src = - d40_log_buf_to_lli(lli_src, - sg_dma_address(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - true); - lli_dst = - d40_log_buf_to_lli(lli_dst, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - false); + src = sg_addr; + dst = dev_addr; } else { - lli_dst = - d40_log_buf_to_lli(lli_dst, - sg_dma_address(current_sg), - sg_dma_len(current_sg), - lcsp->lcsp3, dst_data_width, - src_data_width, - true); - lli_src = - d40_log_buf_to_lli(lli_src, - dev_addr, - sg_dma_len(current_sg), - lcsp->lcsp1, src_data_width, - dst_data_width, - false); + src = dev_addr; + dst = sg_addr; } + + lli_src = d40_log_buf_to_lli(lli_src, src, len, + lcsp->lcsp1, + src_data_width, + dst_data_width, + src == sg_addr); + + lli_dst = d40_log_buf_to_lli(lli_dst, dst, len, + lcsp->lcsp3, + dst_data_width, + src_data_width, + dst == sg_addr); } + return total_size; } -- cgit v1.2.3 From 5ed04b8575cb22920b1333aeb55121339449048f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:26 +0100 Subject: dma40: unify d40_log_sg_to_lli funcs for mem and slave Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 41 ++++++++++++------------------ drivers/dma/ste_dma40_ll.c | 63 +++++++++------------------------------------- drivers/dma/ste_dma40_ll.h | 10 +------- 3 files changed, 29 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6a7a00d5d682..c597dba713b0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1638,37 +1638,28 @@ d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, unsigned int sg_len, enum dma_data_direction direction, dma_addr_t dev_addr) { + dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; + dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + int ret; - if (direction == DMA_NONE) { - /* memcpy */ - (void) d40_log_sg_to_lli(sg_src, sg_len, - desc->lli_log.src, - chan->log_def.lcsp1, - src_info->data_width, - dst_info->data_width); - - (void) d40_log_sg_to_lli(sg_dst, sg_len, - desc->lli_log.dst, - chan->log_def.lcsp3, - dst_info->data_width, - src_info->data_width); - } else { - unsigned int total_size; + ret = d40_log_sg_to_lli(sg_src, sg_len, + src_dev_addr, + desc->lli_log.src, + chan->log_def.lcsp1, + src_info->data_width, + dst_info->data_width); - total_size = d40_log_sg_to_dev(sg_src, sg_len, - &desc->lli_log, - &chan->log_def, - src_info->data_width, - dst_info->data_width, - direction, dev_addr); - if (total_size < 0) - return -EINVAL; - } + ret = d40_log_sg_to_lli(sg_dst, sg_len, + dst_dev_addr, + desc->lli_log.dst, + chan->log_def.lcsp3, + dst_info->data_width, + src_info->data_width); - return 0; + return ret < 0 ? ret : 0; } static int diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index fa6c3ab93fae..9935c6dbcfe0 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -369,53 +369,6 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, } -int d40_log_sg_to_dev(struct scatterlist *sg, - int sg_len, - struct d40_log_lli_bidir *lli, - struct d40_def_lcsp *lcsp, - u32 src_data_width, - u32 dst_data_width, - enum dma_data_direction direction, - dma_addr_t dev_addr) -{ - int total_size = 0; - struct scatterlist *current_sg = sg; - int i; - struct d40_log_lli *lli_src = lli->src; - struct d40_log_lli *lli_dst = lli->dst; - - for_each_sg(sg, current_sg, sg_len, i) { - dma_addr_t sg_addr = sg_dma_address(current_sg); - unsigned int len = sg_dma_len(current_sg); - dma_addr_t src; - dma_addr_t dst; - - total_size += len; - - if (direction == DMA_TO_DEVICE) { - src = sg_addr; - dst = dev_addr; - } else { - src = dev_addr; - dst = sg_addr; - } - - lli_src = d40_log_buf_to_lli(lli_src, src, len, - lcsp->lcsp1, - src_data_width, - dst_data_width, - src == sg_addr); - - lli_dst = d40_log_buf_to_lli(lli_dst, dst, len, - lcsp->lcsp3, - dst_data_width, - src_data_width, - dst == sg_addr); - } - - return total_size; -} - struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, dma_addr_t addr, int size, @@ -447,6 +400,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, + dma_addr_t dev_addr, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2) @@ -455,14 +409,21 @@ int d40_log_sg_to_lli(struct scatterlist *sg, struct scatterlist *current_sg = sg; int i; struct d40_log_lli *lli = lli_sg; + bool autoinc = !dev_addr; for_each_sg(sg, current_sg, sg_len, i) { + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t addr = dev_addr ?: sg_addr; + total_size += sg_dma_len(current_sg); - lli = d40_log_buf_to_lli(lli, - sg_dma_address(current_sg), - sg_dma_len(current_sg), + + lli = d40_log_buf_to_lli(lli, addr, len, lcsp13, - data_width1, data_width2, true); + data_width1, + data_width2, + autoinc); } + return total_size; } diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 46578a661b28..867f23f6e098 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -321,17 +321,9 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, u32 data_width1, u32 data_width2, bool addr_inc); -int d40_log_sg_to_dev(struct scatterlist *sg, - int sg_len, - struct d40_log_lli_bidir *lli, - struct d40_def_lcsp *lcsp, - u32 src_data_width, - u32 dst_data_width, - enum dma_data_direction direction, - dma_addr_t dev_addr); - int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, + dma_addr_t dev_addr, struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2); -- cgit v1.2.3 From cc31b6f7949efd46c5f13d0758cf7b0bcb71fae2 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:27 +0100 Subject: dma40: pass the info pointer all the way to reduce argument count Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 8 ++------ drivers/dma/ste_dma40_ll.c | 42 ++++++++++++++++++------------------------ drivers/dma/ste_dma40_ll.h | 5 ++--- 3 files changed, 22 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c597dba713b0..3c61c582303d 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1679,17 +1679,13 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, desc->lli_phy.src, virt_to_phys(desc->lli_phy.src), chan->src_def_cfg, - src_info->data_width, - dst_info->data_width, - src_info->psize); + src_info, dst_info); ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_phy.dst, virt_to_phys(desc->lli_phy.dst), chan->dst_def_cfg, - dst_info->data_width, - src_info->data_width, - dst_info->psize); + dst_info, src_info); dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, desc->lli_pool.size, DMA_TO_DEVICE); diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 9935c6dbcfe0..509a13049d7e 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -125,13 +125,14 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, static int d40_phy_fill_lli(struct d40_phy_lli *lli, dma_addr_t data, u32 data_size, - int psize, dma_addr_t next_lli, u32 reg_cfg, bool term_int, - u32 data_width, - bool is_device) + bool is_device, + struct stedma40_half_channel_info *info) { + unsigned int data_width = info->data_width; + int psize = info->psize; int num_elems; if (psize == STEDMA40_PSIZE_PHY_1) @@ -198,16 +199,11 @@ static int d40_seg_size(int size, int data_width1, int data_width2) return seg_max; } -static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, - dma_addr_t addr, - u32 size, - int psize, - dma_addr_t lli_phys, - u32 reg_cfg, - bool term_int, - u32 data_width1, - u32 data_width2, - bool is_device) +static struct d40_phy_lli * +d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, + dma_addr_t lli_phys, u32 reg_cfg, bool term_int, + bool is_device, struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo) { int err; dma_addr_t next = lli_phys; @@ -215,7 +211,8 @@ static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, int size_seg = 0; do { - size_seg = d40_seg_size(size_rest, data_width1, data_width2); + size_seg = d40_seg_size(size_rest, info->data_width, + otherinfo->data_width); size_rest -= size_seg; if (term_int && size_rest == 0) @@ -227,12 +224,11 @@ static struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, err = d40_phy_fill_lli(lli, addr, size_seg, - psize, next, reg_cfg, !next, - data_width1, - is_device); + is_device, + info); if (err) goto err; @@ -254,9 +250,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, struct d40_phy_lli *lli_sg, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, - int psize) + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo) { int total_size = 0; int i; @@ -280,13 +275,12 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, lli = d40_phy_buf_to_lli(lli, dst, sg_dma_len(current_sg), - psize, l_phys, reg_cfg, sg_len - 1 == i, - data_width1, - data_width2, - target == dst); + target == dst, + info, + otherinfo); if (lli == NULL) return -EINVAL; } diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 867f23f6e098..cd94afd9c2f2 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -308,9 +308,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, struct d40_phy_lli *lli, dma_addr_t lli_phys, u32 reg_cfg, - u32 data_width1, - u32 data_width2, - int psize); + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo); /* Logical channels */ -- cgit v1.2.3 From 822c567639971628ceba2c53531670d595e3164d Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:28 +0100 Subject: dma40: unify src/dst addr check Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 3c61c582303d..d9dfda2c65b5 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1635,11 +1635,9 @@ static u32 stedma40_residue(struct dma_chan *chan) static int d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, - unsigned int sg_len, enum dma_data_direction direction, - dma_addr_t dev_addr) + unsigned int sg_len, dma_addr_t src_dev_addr, + dma_addr_t dst_dev_addr) { - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; @@ -1665,11 +1663,9 @@ d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, static int d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, struct scatterlist *sg_src, struct scatterlist *sg_dst, - unsigned int sg_len, enum dma_data_direction direction, - dma_addr_t dev_addr) + unsigned int sg_len, dma_addr_t src_dev_addr, + dma_addr_t dst_dev_addr) { - dma_addr_t src_dev_addr = direction == DMA_FROM_DEVICE ? dev_addr : 0; - dma_addr_t dst_dev_addr = direction == DMA_TO_DEVICE ? dev_addr : 0; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; @@ -1757,7 +1753,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, enum dma_data_direction direction, unsigned long dma_flags) { struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); - dma_addr_t dev_addr = 0; + dma_addr_t src_dev_addr = 0; + dma_addr_t dst_dev_addr = 0; struct d40_desc *desc; unsigned long flags; int ret; @@ -1773,15 +1770,21 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (desc == NULL) goto err; - if (direction != DMA_NONE) - dev_addr = d40_get_dev_addr(chan, direction); + if (direction != DMA_NONE) { + dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); + + if (direction == DMA_FROM_DEVICE) + src_dev_addr = dev_addr; + else if (direction == DMA_TO_DEVICE) + dst_dev_addr = dev_addr; + } if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, - sg_len, direction, dev_addr); + sg_len, src_dev_addr, dst_dev_addr); else ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, - sg_len, direction, dev_addr); + sg_len, src_dev_addr, dst_dev_addr); if (ret) { chan_err(chan, "Failed to prepare %s sg job: %d\n", -- cgit v1.2.3 From 1f7622ca55b1f5875e32140b4781759f800aded3 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:29 +0100 Subject: dma40: make d40_log_buf_to_lli static Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 2 +- drivers/dma/ste_dma40_ll.h | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 509a13049d7e..876aad2c838a 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -363,7 +363,7 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, } -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, +static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, dma_addr_t addr, int size, u32 lcsp13, /* src or dst*/ diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index cd94afd9c2f2..4626c8874374 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -313,13 +313,6 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, /* Logical channels */ -struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, - dma_addr_t addr, - int size, - u32 lcsp13, /* src or dst*/ - u32 data_width1, u32 data_width2, - bool addr_inc); - int d40_log_sg_to_lli(struct scatterlist *sg, int sg_len, dma_addr_t dev_addr, -- cgit v1.2.3 From 7f933bed96e9872131014ea2bdd5b012e43fc316 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:30 +0100 Subject: dma40: use flags to reduce parameter count Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40_ll.c | 84 ++++++++++++++++++++++++++-------------------- drivers/dma/ste_dma40_ll.h | 5 +++ 2 files changed, 52 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 876aad2c838a..88b9e371be2f 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -127,10 +127,11 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, u32 data_size, dma_addr_t next_lli, u32 reg_cfg, - bool term_int, - bool is_device, - struct stedma40_half_channel_info *info) + struct stedma40_half_channel_info *info, + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; + bool term_int = flags & LLI_TERM_INT; unsigned int data_width = info->data_width; int psize = info->psize; int num_elems; @@ -155,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli, * Distance to next element sized entry. * Usually the size of the element unless you want gaps. */ - if (!is_device) + if (addr_inc) lli->reg_elt |= (0x1 << data_width) << D40_SREG_ELEM_PHY_EIDX_POS; @@ -201,40 +202,45 @@ static int d40_seg_size(int size, int data_width1, int data_width2) static struct d40_phy_lli * d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, - dma_addr_t lli_phys, u32 reg_cfg, bool term_int, - bool is_device, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo) + dma_addr_t lli_phys, u32 reg_cfg, + struct stedma40_half_channel_info *info, + struct stedma40_half_channel_info *otherinfo, + unsigned long flags) { + bool addr_inc = flags & LLI_ADDR_INC; + bool term_int = flags & LLI_TERM_INT; int err; dma_addr_t next = lli_phys; int size_rest = size; int size_seg = 0; + /* + * This piece may be split up based on d40_seg_size(); we only want the + * term int on the last part. + */ + if (term_int) + flags &= ~LLI_TERM_INT; + do { size_seg = d40_seg_size(size_rest, info->data_width, otherinfo->data_width); size_rest -= size_seg; - if (term_int && size_rest == 0) + if (term_int && size_rest == 0) { next = 0; - else + flags |= LLI_TERM_INT; + } else next = ALIGN(next + sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - err = d40_phy_fill_lli(lli, - addr, - size_seg, - next, - reg_cfg, - !next, - is_device, - info); + err = d40_phy_fill_lli(lli, addr, size_seg, next, + reg_cfg, info, flags); if (err) goto err; lli++; - if (!is_device) + if (addr_inc) addr += size_seg; } while (size_rest); @@ -256,31 +262,29 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, int total_size = 0; int i; struct scatterlist *current_sg = sg; - dma_addr_t dst; struct d40_phy_lli *lli = lli_sg; dma_addr_t l_phys = lli_phys; + unsigned long flags = 0; + + if (!target) + flags |= LLI_ADDR_INC; for_each_sg(sg, current_sg, sg_len, i) { + dma_addr_t sg_addr = sg_dma_address(current_sg); + unsigned int len = sg_dma_len(current_sg); + dma_addr_t dst = target ?: sg_addr; total_size += sg_dma_len(current_sg); - if (target) - dst = target; - else - dst = sg_dma_address(current_sg); + if (i == sg_len - 1) + flags |= LLI_TERM_INT; l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - lli = d40_phy_buf_to_lli(lli, - dst, - sg_dma_len(current_sg), - l_phys, - reg_cfg, - sg_len - 1 == i, - target == dst, - info, - otherinfo); + lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, + reg_cfg, info, otherinfo, flags); + if (lli == NULL) return -EINVAL; } @@ -343,8 +347,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli, dma_addr_t data, u32 data_size, u32 reg_cfg, u32 data_width, - bool addr_inc) + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; + lli->lcsp13 = reg_cfg; /* The number of elements to transfer */ @@ -369,8 +375,9 @@ static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, u32 lcsp13, /* src or dst*/ u32 data_width1, u32 data_width2, - bool addr_inc) + unsigned int flags) { + bool addr_inc = flags & LLI_ADDR_INC; struct d40_log_lli *lli = lli_sg; int size_rest = size; int size_seg = 0; @@ -383,7 +390,7 @@ static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, addr, size_seg, lcsp13, data_width1, - addr_inc); + flags); if (addr_inc) addr += size_seg; lli++; @@ -403,7 +410,10 @@ int d40_log_sg_to_lli(struct scatterlist *sg, struct scatterlist *current_sg = sg; int i; struct d40_log_lli *lli = lli_sg; - bool autoinc = !dev_addr; + unsigned long flags = 0; + + if (!dev_addr) + flags |= LLI_ADDR_INC; for_each_sg(sg, current_sg, sg_len, i) { dma_addr_t sg_addr = sg_dma_address(current_sg); @@ -416,7 +426,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, lcsp13, data_width1, data_width2, - autoinc); + flags); } return total_size; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 4626c8874374..59e72f0cc901 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -293,6 +293,11 @@ struct d40_def_lcsp { /* Physical channels */ +enum d40_lli_flags { + LLI_ADDR_INC = 1 << 0, + LLI_TERM_INT = 1 << 1, +}; + void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg, -- cgit v1.2.3 From e65889c75ccb5b64dfb60f32e2d9448446cabcc7 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:31 +0100 Subject: dma40: extract lcla code into separate function Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 92 +++++++++++++++++++++++++++---------------------- 1 file changed, 50 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d9dfda2c65b5..4ec96ac1e41a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -521,54 +521,62 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); } -static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) -{ - int curr_lcla = -EINVAL, next_lcla; - - if (chan_is_physical(d40c)) { - d40_phy_lli_load(d40c, d40d); - d40d->lli_current = d40d->lli_len; - } else { - - if ((d40d->lli_len - d40d->lli_current) > 1) - curr_lcla = d40_lcla_alloc_one(d40c, d40d); - - d40_log_lli_lcpa_write(d40c->lcpa, - &d40d->lli_log.dst[d40d->lli_current], - &d40d->lli_log.src[d40d->lli_current], - curr_lcla); - - d40d->lli_current++; - for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { - unsigned int lcla_offset = d40c->phy_chan->num * 1024 + - 8 * curr_lcla * 2; - struct d40_lcla_pool *pool = &d40c->base->lcla_pool; - struct d40_log_lli *lcla = pool->base + lcla_offset; - - if (d40d->lli_current + 1 < d40d->lli_len) - next_lcla = d40_lcla_alloc_one(d40c, d40d); - else - next_lcla = -EINVAL; - - d40_log_lli_lcla_write(lcla, - &d40d->lli_log.dst[d40d->lli_current], - &d40d->lli_log.src[d40d->lli_current], - next_lcla); +static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) +{ + struct d40_lcla_pool *pool = &chan->base->lcla_pool; + struct d40_log_lli_bidir *lli = &desc->lli_log; + int lli_current = desc->lli_current; + int lli_len = desc->lli_len; + int curr_lcla = -EINVAL; + + if (lli_len - lli_current > 1) + curr_lcla = d40_lcla_alloc_one(chan, desc); + + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + curr_lcla); + + lli_current++; + for (; lli_current < lli_len; lli_current++) { + unsigned int lcla_offset = chan->phy_chan->num * 1024 + + 8 * curr_lcla * 2; + struct d40_log_lli *lcla = pool->base + lcla_offset; + int next_lcla; + + if (lli_current + 1 < lli_len) + next_lcla = d40_lcla_alloc_one(chan, desc); + else + next_lcla = -EINVAL; - dma_sync_single_range_for_device(d40c->base->dev, - pool->dma_addr, lcla_offset, - 2 * sizeof(struct d40_log_lli), - DMA_TO_DEVICE); + d40_log_lli_lcla_write(lcla, + &lli->dst[lli_current], + &lli->src[lli_current], + next_lcla); - curr_lcla = next_lcla; + dma_sync_single_range_for_device(chan->base->dev, + pool->dma_addr, lcla_offset, + 2 * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); - if (curr_lcla == -EINVAL) { - d40d->lli_current++; - break; - } + curr_lcla = next_lcla; + if (curr_lcla == -EINVAL) { + lli_current++; + break; } } + + desc->lli_current = lli_current; +} + +static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) +{ + if (chan_is_physical(d40c)) { + d40_phy_lli_load(d40c, d40d); + d40d->lli_current = d40d->lli_len; + } else + d40_log_lli_to_lcxa(d40c, d40d); } static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) -- cgit v1.2.3 From 6045f0bb2818393a44e835454db96709cb5b3d80 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:32 +0100 Subject: dma40: handle failure to allocate first LCLA Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4ec96ac1e41a..b8cce85a9558 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -538,6 +538,10 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) curr_lcla); lli_current++; + + if (curr_lcla < 0) + goto out; + for (; lli_current < lli_len; lli_current++) { unsigned int lcla_offset = chan->phy_chan->num * 1024 + 8 * curr_lcla * 2; @@ -567,6 +571,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) } } +out: desc->lli_current = lli_current; } -- cgit v1.2.3 From 7ad74a7cf6f6355fd3f4c15afe63460fc4ec3f57 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:33 +0100 Subject: dma40: fix DMA_SG capability and channels The DMA_SG cap is enabled on the wrong channel, and the pointers are repeatedly set incorrectly. Fix it and combine the ops settings to a common function. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 71 +++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b8cce85a9558..929fd8f45a2d 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2238,6 +2238,32 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, } } +static void d40_ops_init(struct d40_base *base, struct dma_device *dev) +{ + if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) + dev->device_prep_slave_sg = d40_prep_slave_sg; + + if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { + dev->device_prep_dma_memcpy = d40_prep_memcpy; + + /* + * This controller can only access address at even + * 32bit boundaries, i.e. 2^2 + */ + dev->copy_align = 2; + } + + if (dma_has_cap(DMA_SG, dev->cap_mask)) + dev->device_prep_dma_sg = d40_prep_memcpy_sg; + + dev->device_alloc_chan_resources = d40_alloc_chan_resources; + dev->device_free_chan_resources = d40_free_chan_resources; + dev->device_issue_pending = d40_issue_pending; + dev->device_tx_status = d40_tx_status; + dev->device_control = d40_control; + dev->dev = base->dev; +} + static int __init d40_dmaengine_init(struct d40_base *base, int num_reserved_chans) { @@ -2249,15 +2275,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); - base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_slave.device_free_chan_resources = d40_free_chan_resources; - base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_slave.device_tx_status = d40_tx_status; - base->dma_slave.device_issue_pending = d40_issue_pending; - base->dma_slave.device_control = d40_control; - base->dma_slave.dev = base->dev; + d40_ops_init(base, &base->dma_slave); err = dma_async_device_register(&base->dma_slave); @@ -2271,22 +2289,9 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); - - base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; - base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_memcpy.device_tx_status = d40_tx_status; - base->dma_memcpy.device_issue_pending = d40_issue_pending; - base->dma_memcpy.device_control = d40_control; - base->dma_memcpy.dev = base->dev; - /* - * This controller can only access address at even - * 32bit boundaries, i.e. 2^2 - */ - base->dma_memcpy.copy_align = 2; + dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); + + d40_ops_init(base, &base->dma_memcpy); err = dma_async_device_register(&base->dma_memcpy); @@ -2302,18 +2307,10 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_both.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); - - base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; - base->dma_both.device_free_chan_resources = d40_free_chan_resources; - base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_memcpy_sg; - base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; - base->dma_both.device_tx_status = d40_tx_status; - base->dma_both.device_issue_pending = d40_issue_pending; - base->dma_both.device_control = d40_control; - base->dma_both.dev = base->dev; - base->dma_both.copy_align = 2; + dma_cap_set(DMA_SG, base->dma_both.cap_mask); + + d40_ops_init(base, &base->dma_both); + err = dma_async_device_register(&base->dma_both); if (err) { -- cgit v1.2.3 From 86eb5fb61125e4646c9447a1f2ce130817dab34e Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:34 +0100 Subject: dma40: stop ongoing transfers in DMA_TERMINATE_ALL The current implementation of DMA_TERMINATE_ALL leaves ongoing transfers running. Fix it. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 929fd8f45a2d..8fd0bb94e777 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -918,10 +918,8 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) return is_link; } -static int d40_pause(struct dma_chan *chan) +static int d40_pause(struct d40_chan *d40c) { - struct d40_chan *d40c = - container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; @@ -945,10 +943,8 @@ static int d40_pause(struct dma_chan *chan) return res; } -static int d40_resume(struct dma_chan *chan) +static int d40_resume(struct d40_chan *d40c) { - struct d40_chan *d40c = - container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; @@ -978,6 +974,22 @@ no_suspend: return res; } +static int d40_terminate_all(struct d40_chan *chan) +{ + unsigned long flags; + int ret = 0; + + ret = d40_pause(chan); + if (!ret && chan_is_physical(chan)) + ret = d40_channel_execute_command(chan, D40_DMA_STOP); + + spin_lock_irqsave(&chan->lock, flags); + d40_term_all(chan); + spin_unlock_irqrestore(&chan->lock, flags); + + return ret; +} + static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, @@ -2176,7 +2188,6 @@ static void d40_set_runtime_config(struct dma_chan *chan, static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { - unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); if (d40c->phy_chan == NULL) { @@ -2186,14 +2197,11 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: - spin_lock_irqsave(&d40c->lock, flags); - d40_term_all(d40c); - spin_unlock_irqrestore(&d40c->lock, flags); - return 0; + return d40_terminate_all(d40c); case DMA_PAUSE: - return d40_pause(chan); + return d40_pause(d40c); case DMA_RESUME: - return d40_resume(chan); + return d40_resume(d40c); case DMA_SLAVE_CONFIG: d40_set_runtime_config(chan, (struct dma_slave_config *) arg); -- cgit v1.2.3 From 0c842b551063c5f7382ac9b457992f3b34972801 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 25 Jan 2011 11:18:35 +0100 Subject: dma40: cyclic xfer support Support cyclic transfers, which are useful for ALSA drivers. Acked-by: Per Forlin Acked-by: Jonas Aaberg Signed-off-by: Rabin Vincent Signed-off-by: Linus Walleij Signed-off-by: Dan Williams --- drivers/dma/ste_dma40.c | 172 ++++++++++++++++++++++++++++++++++++--------- drivers/dma/ste_dma40_ll.c | 35 +++++---- drivers/dma/ste_dma40_ll.h | 9 ++- 3 files changed, 167 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8fd0bb94e777..af955de035f4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -115,6 +115,7 @@ struct d40_desc { struct list_head node; bool is_in_client_list; + bool cyclic; }; /** @@ -527,17 +528,45 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) struct d40_log_lli_bidir *lli = &desc->lli_log; int lli_current = desc->lli_current; int lli_len = desc->lli_len; + bool cyclic = desc->cyclic; int curr_lcla = -EINVAL; + int first_lcla = 0; + bool linkback; - if (lli_len - lli_current > 1) + /* + * We may have partially running cyclic transfers, in case we did't get + * enough LCLA entries. + */ + linkback = cyclic && lli_current == 0; + + /* + * For linkback, we need one LCLA even with only one link, because we + * can't link back to the one in LCPA space + */ + if (linkback || (lli_len - lli_current > 1)) { curr_lcla = d40_lcla_alloc_one(chan, desc); + first_lcla = curr_lcla; + } + + /* + * For linkback, we normally load the LCPA in the loop since we need to + * link it to the second LCLA and not the first. However, if we + * couldn't even get a first LCLA, then we have to run in LCPA and + * reload manually. + */ + if (!linkback || curr_lcla == -EINVAL) { + unsigned int flags = 0; - d40_log_lli_lcpa_write(chan->lcpa, - &lli->dst[lli_current], - &lli->src[lli_current], - curr_lcla); + if (curr_lcla == -EINVAL) + flags |= LLI_TERM_INT; - lli_current++; + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + curr_lcla, + flags); + lli_current++; + } if (curr_lcla < 0) goto out; @@ -546,17 +575,33 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) unsigned int lcla_offset = chan->phy_chan->num * 1024 + 8 * curr_lcla * 2; struct d40_log_lli *lcla = pool->base + lcla_offset; + unsigned int flags = 0; int next_lcla; if (lli_current + 1 < lli_len) next_lcla = d40_lcla_alloc_one(chan, desc); else - next_lcla = -EINVAL; + next_lcla = linkback ? first_lcla : -EINVAL; + + if (cyclic || next_lcla == -EINVAL) + flags |= LLI_TERM_INT; + if (linkback && curr_lcla == first_lcla) { + /* First link goes in both LCPA and LCLA */ + d40_log_lli_lcpa_write(chan->lcpa, + &lli->dst[lli_current], + &lli->src[lli_current], + next_lcla, flags); + } + + /* + * One unused LCLA in the cyclic case if the very first + * next_lcla fails... + */ d40_log_lli_lcla_write(lcla, &lli->dst[lli_current], &lli->src[lli_current], - next_lcla); + next_lcla, flags); dma_sync_single_range_for_device(chan->base->dev, pool->dma_addr, lcla_offset, @@ -565,7 +610,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) curr_lcla = next_lcla; - if (curr_lcla == -EINVAL) { + if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { lli_current++; break; } @@ -1074,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40d == NULL) return; - d40_lcla_free_all(d40c, d40d); + if (d40d->cyclic) { + /* + * If this was a paritially loaded list, we need to reloaded + * it, and only when the list is completed. We need to check + * for done because the interrupt will hit for every link, and + * not just the last one. + */ + if (d40d->lli_current < d40d->lli_len + && !d40_tx_is_linked(d40c) + && !d40_residue(d40c)) { + d40_lcla_free_all(d40c, d40d); + d40_desc_load(d40c, d40d); + (void) d40_start(d40c); - if (d40d->lli_current < d40d->lli_len) { - d40_desc_load(d40c, d40d); - /* Start dma job */ - (void) d40_start(d40c); - return; - } + if (d40d->lli_current == d40d->lli_len) + d40d->lli_current = 0; + } + } else { + d40_lcla_free_all(d40c, d40d); - if (d40_queue_start(d40c) == NULL) - d40c->busy = false; + if (d40d->lli_current < d40d->lli_len) { + d40_desc_load(d40c, d40d); + /* Start dma job */ + (void) d40_start(d40c); + return; + } + + if (d40_queue_start(d40c) == NULL) + d40c->busy = false; + } d40c->pending_tx++; tasklet_schedule(&d40c->tasklet); @@ -1103,11 +1167,11 @@ static void dma_tasklet(unsigned long data) /* Get first active entry from list */ d40d = d40_first_active_get(d40c); - if (d40d == NULL) goto err; - d40c->completed = d40d->txd.cookie; + if (!d40d->cyclic) + d40c->completed = d40d->txd.cookie; /* * If terminating a channel pending_tx is set to zero. @@ -1122,16 +1186,18 @@ static void dma_tasklet(unsigned long data) callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; - if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40c, d40d); - d40_desc_remove(d40d); - d40_desc_free(d40c, d40d); - } else { - if (!d40d->is_in_client_list) { + if (!d40d->cyclic) { + if (async_tx_test_ack(&d40d->txd)) { + d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); - d40_lcla_free_all(d40c, d40d); - list_add_tail(&d40d->node, &d40c->client); - d40d->is_in_client_list = true; + d40_desc_free(d40c, d40d); + } else { + if (!d40d->is_in_client_list) { + d40_desc_remove(d40d); + d40_lcla_free_all(d40c, d40d); + list_add_tail(&d40d->node, &d40c->client); + d40d->is_in_client_list = true; + } } } @@ -1694,19 +1760,23 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, struct stedma40_chan_cfg *cfg = &chan->dma_cfg; struct stedma40_half_channel_info *src_info = &cfg->src_info; struct stedma40_half_channel_info *dst_info = &cfg->dst_info; + unsigned long flags = 0; int ret; + if (desc->cyclic) + flags |= LLI_CYCLIC | LLI_TERM_INT; + ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, desc->lli_phy.src, virt_to_phys(desc->lli_phy.src), chan->src_def_cfg, - src_info, dst_info); + src_info, dst_info, flags); ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, desc->lli_phy.dst, virt_to_phys(desc->lli_phy.dst), chan->dst_def_cfg, - dst_info, src_info); + dst_info, src_info, flags); dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, desc->lli_pool.size, DMA_TO_DEVICE); @@ -1789,12 +1859,16 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } + spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); if (desc == NULL) goto err; + if (sg_next(&sg_src[sg_len - 1]) == sg_src) + desc->cyclic = true; + if (direction != DMA_NONE) { dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); @@ -2007,6 +2081,36 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); } +static struct dma_async_tx_descriptor * +dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, + size_t buf_len, size_t period_len, + enum dma_data_direction direction) +{ + unsigned int periods = buf_len / period_len; + struct dma_async_tx_descriptor *txd; + struct scatterlist *sg; + int i; + + sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); + for (i = 0; i < periods; i++) { + sg_dma_address(&sg[i]) = dma_addr; + sg_dma_len(&sg[i]) = period_len; + dma_addr += period_len; + } + + sg[periods].offset = 0; + sg[periods].length = 0; + sg[periods].page_link = + ((unsigned long)sg | 0x01) & ~0x02; + + txd = d40_prep_sg(chan, sg, sg, periods, direction, + DMA_PREP_INTERRUPT); + + kfree(sg); + + return txd; +} + static enum dma_status d40_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) @@ -2264,6 +2368,9 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) if (dma_has_cap(DMA_SG, dev->cap_mask)) dev->device_prep_dma_sg = d40_prep_memcpy_sg; + if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) + dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; + dev->device_alloc_chan_resources = d40_alloc_chan_resources; dev->device_free_chan_resources = d40_free_chan_resources; dev->device_issue_pending = d40_issue_pending; @@ -2282,6 +2389,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_slave); @@ -2316,9 +2424,9 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); dma_cap_set(DMA_SG, base->dma_both.cap_mask); + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); d40_ops_init(base, &base->dma_both); - err = dma_async_device_register(&base->dma_both); if (err) { diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 88b9e371be2f..cad9e1daedff 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -202,13 +202,15 @@ static int d40_seg_size(int size, int data_width1, int data_width2) static struct d40_phy_lli * d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, - dma_addr_t lli_phys, u32 reg_cfg, + dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, struct stedma40_half_channel_info *otherinfo, unsigned long flags) { + bool lastlink = flags & LLI_LAST_LINK; bool addr_inc = flags & LLI_ADDR_INC; bool term_int = flags & LLI_TERM_INT; + bool cyclic = flags & LLI_CYCLIC; int err; dma_addr_t next = lli_phys; int size_rest = size; @@ -226,10 +228,12 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, otherinfo->data_width); size_rest -= size_seg; - if (term_int && size_rest == 0) { - next = 0; + if (size_rest == 0 && term_int) flags |= LLI_TERM_INT; - } else + + if (size_rest == 0 && lastlink) + next = cyclic ? first_phys : 0; + else next = ALIGN(next + sizeof(struct d40_phy_lli), D40_LLI_ALIGN); @@ -257,14 +261,14 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, dma_addr_t lli_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo) + struct stedma40_half_channel_info *otherinfo, + unsigned long flags) { int total_size = 0; int i; struct scatterlist *current_sg = sg; struct d40_phy_lli *lli = lli_sg; dma_addr_t l_phys = lli_phys; - unsigned long flags = 0; if (!target) flags |= LLI_ADDR_INC; @@ -277,12 +281,12 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, total_size += sg_dma_len(current_sg); if (i == sg_len - 1) - flags |= LLI_TERM_INT; + flags |= LLI_TERM_INT | LLI_LAST_LINK; l_phys = ALIGN(lli_phys + (lli - lli_sg) * sizeof(struct d40_phy_lli), D40_LLI_ALIGN); - lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, + lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, reg_cfg, info, otherinfo, flags); if (lli == NULL) @@ -297,15 +301,18 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, static void d40_log_lli_link(struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { + bool interrupt = flags & LLI_TERM_INT; u32 slos = 0; u32 dlos = 0; if (next != -EINVAL) { slos = next * 2; dlos = next * 2 + 1; - } else { + } + + if (interrupt) { lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; } @@ -320,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst, void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { - d40_log_lli_link(lli_dst, lli_src, next); + d40_log_lli_link(lli_dst, lli_src, next, flags); writel(lli_src->lcsp02, &lcpa[0].lcsp0); writel(lli_src->lcsp13, &lcpa[0].lcsp1); @@ -333,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, void d40_log_lli_lcla_write(struct d40_log_lli *lcla, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next) + int next, unsigned int flags) { - d40_log_lli_link(lli_dst, lli_src, next); + d40_log_lli_link(lli_dst, lli_src, next, flags); writel(lli_src->lcsp02, &lcla[0].lcsp02); writel(lli_src->lcsp13, &lcla[0].lcsp13); diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 59e72f0cc901..195ee65ee7f3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -296,6 +296,8 @@ struct d40_def_lcsp { enum d40_lli_flags { LLI_ADDR_INC = 1 << 0, LLI_TERM_INT = 1 << 1, + LLI_CYCLIC = 1 << 2, + LLI_LAST_LINK = 1 << 3, }; void d40_phy_cfg(struct stedma40_chan_cfg *cfg, @@ -314,7 +316,8 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, dma_addr_t lli_phys, u32 reg_cfg, struct stedma40_half_channel_info *info, - struct stedma40_half_channel_info *otherinfo); + struct stedma40_half_channel_info *otherinfo, + unsigned long flags); /* Logical channels */ @@ -328,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg, void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next); + int next, unsigned int flags); void d40_log_lli_lcla_write(struct d40_log_lli *lcla, struct d40_log_lli *lli_dst, struct d40_log_lli *lli_src, - int next); + int next, unsigned int flags); #endif /* STE_DMA40_LLI_H */ -- cgit v1.2.3