summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-07 20:11:43 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-07 20:11:43 +0200
commitd2b4a646717153a1a180b64d4a8464054dbd700e (patch)
treea019907da37389f59ddb429c7d10de178514af1e /drivers
parentMerge branch 'cpuinit-delete' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff)
parentDMA: shdma: add DT support (diff)
downloadlinux-d2b4a646717153a1a180b64d4a8464054dbd700e.tar.xz
linux-d2b4a646717153a1a180b64d4a8464054dbd700e.zip
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "Once you have some time from extended weekend celebrations please consider pulling the following to get: - Various fixes and PCI driver for dw_dmac by Andy - DT binding for imx-dma by Markus & imx-sdma by Shawn - DT fixes for dmaengine by Lars - jz4740 dmac driver by Lars - and various fixes across the drivers" What "extended weekend celebrations"? I'm in the merge window, who has time for extended celebrations.. * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (40 commits) DMA: shdma: add DT support DMA: shdma: shdma_chan_filter() has to be in shdma-base.h DMA: shdma: (cosmetic) don't re-calculate a pointer dmaengine: at_hdmac: prepare clk before calling enable dmaengine/trivial: at_hdmac: add curly brackets to if/else expressions dmaengine: at_hdmac: remove unsuded atc_cleanup_descriptors() dmaengine: at_hdmac: add FIFO configuration parameter to DMA DT binding ARM: at91: dt: add header to define at_hdmac configuration MIPS: jz4740: Correct clock gate bit for DMA controller MIPS: jz4740: Remove custom DMA API MIPS: jz4740: Register jz4740 DMA device dma: Add a jz4740 dmaengine driver MIPS: jz4740: Acquire and enable DMA controller clock dma: mmp_tdma: disable irq when disabling dma channel dmaengine: PL08x: Avoid collisions with get_signal() macro dmaengine: dw: select DW_DMAC_BIG_ENDIAN_IO automagically dma: dw: add PCI part of the driver dma: dw: split driver to library part and platform code dma: move dw_dmac driver to an own directory dw_dmac: don't check resource with devm_ioremap_resource ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/Kconfig26
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c8
-rw-r--r--drivers/dma/at_hdmac.c211
-rw-r--r--drivers/dma/at_hdmac_regs.h5
-rw-r--r--drivers/dma/dma-jz4740.c617
-rw-r--r--drivers/dma/dw/Kconfig29
-rw-r--r--drivers/dma/dw/Makefile8
-rw-r--r--drivers/dma/dw/core.c (renamed from drivers/dma/dw_dmac.c)320
-rw-r--r--drivers/dma/dw/internal.h70
-rw-r--r--drivers/dma/dw/pci.c101
-rw-r--r--drivers/dma/dw/platform.c317
-rw-r--r--drivers/dma/dw/regs.h (renamed from drivers/dma/dw_dmac_regs.h)7
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/imx-dma.c77
-rw-r--r--drivers/dma/imx-sdma.c40
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/mxs-dma.c2
-rw-r--r--drivers/dma/of-dma.c17
-rw-r--r--drivers/dma/pl330.c29
-rw-r--r--drivers/dma/ppc4xx/adma.c5
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c82
-rw-r--r--drivers/dma/sh/shdma.c33
-rw-r--r--drivers/dma/sirf-dma.c17
-rw-r--r--drivers/dma/tegra20-apb-dma.c3
-rw-r--r--drivers/dma/timb_dma.c2
28 files changed, 1633 insertions, 433 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 3215a3cb3de8..6825957c97fb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -79,25 +79,7 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
-config DW_DMAC
- tristate "Synopsys DesignWare AHB DMA support"
- depends on GENERIC_HARDIRQS
- select DMA_ENGINE
- default y if CPU_AT32AP7000
- help
- Support the Synopsys DesignWare AHB DMA controller. This
- can be integrated in chips such as the Atmel AT32ap7000.
-
-config DW_DMAC_BIG_ENDIAN_IO
- bool "Use big endian I/O register access"
- default y if AVR32
- depends on DW_DMAC
- help
- Say yes here to use big endian I/O access when reading and writing
- to the DMA controller registers. This is needed on some platforms,
- like the Atmel AVR32 architecture.
-
- If unsure, use the default setting.
+source "drivers/dma/dw/Kconfig"
config AT_HDMAC
tristate "Atmel AHB DMA support"
@@ -312,6 +294,12 @@ config MMP_PDMA
help
Support the MMP PDMA engine for PXA and MMP platfrom.
+config DMA_JZ4740
+ tristate "JZ4740 DMA support"
+ depends on MACH_JZ4740
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df591f95..5e0f2ef85614 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_DW_DMAC) += dw_dmac.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
@@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8bad254a498d..06fe45c74de5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -299,8 +299,8 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
const struct pl08x_platform_data *pd = plchan->host->pd;
int ret;
- if (plchan->mux_use++ == 0 && pd->get_signal) {
- ret = pd->get_signal(plchan->cd);
+ if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
+ ret = pd->get_xfer_signal(plchan->cd);
if (ret < 0) {
plchan->mux_use = 0;
return ret;
@@ -318,8 +318,8 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
if (plchan->signal >= 0) {
WARN_ON(plchan->mux_use == 0);
- if (--plchan->mux_use == 0 && pd->put_signal) {
- pd->put_signal(plchan->cd, plchan->signal);
+ if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
+ pd->put_xfer_signal(plchan->cd, plchan->signal);
plchan->signal = -1;
}
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e923cda930f9..c787f38a186a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -14,6 +14,7 @@
* found on AT91SAM9263.
*/
+#include <dt-bindings/dma/at91.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -54,6 +55,7 @@ MODULE_PARM_DESC(init_nr_desc_per_channel,
/* prototypes */
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
+static void atc_issue_pending(struct dma_chan *chan);
/*----------------------------------------------------------------------*/
@@ -230,6 +232,95 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
vdbg_dump_regs(atchan);
}
+/*
+ * atc_get_current_descriptors -
+ * locate the descriptor which equal to physical address in DSCR
+ * @atchan: the channel we want to start
+ * @dscr_addr: physical descriptor address in DSCR
+ */
+static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan,
+ u32 dscr_addr)
+{
+ struct at_desc *desc, *_desc, *child, *desc_cur = NULL;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
+ if (desc->lli.dscr == dscr_addr) {
+ desc_cur = desc;
+ break;
+ }
+
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
+ if (child->lli.dscr == dscr_addr) {
+ desc_cur = child;
+ break;
+ }
+ }
+ }
+
+ return desc_cur;
+}
+
+/*
+ * atc_get_bytes_left -
+ * Get the number of bytes residue in dma buffer,
+ * @chan: the channel we want to start
+ */
+static int atc_get_bytes_left(struct dma_chan *chan)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma *atdma = to_at_dma(chan->device);
+ int chan_id = atchan->chan_common.chan_id;
+ struct at_desc *desc_first = atc_first_active(atchan);
+ struct at_desc *desc_cur;
+ int ret = 0, count = 0;
+
+ /*
+ * Initialize necessary values in the first time.
+ * remain_desc record remain desc length.
+ */
+ if (atchan->remain_desc == 0)
+ /* First descriptor embedds the transaction length */
+ atchan->remain_desc = desc_first->len;
+
+ /*
+ * This happens when current descriptor transfer complete.
+ * The residual buffer size should reduce current descriptor length.
+ */
+ if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
+ clear_bit(ATC_IS_BTC, &atchan->status);
+ desc_cur = atc_get_current_descriptors(atchan,
+ channel_readl(atchan, DSCR));
+ if (!desc_cur) {
+ ret = -EINVAL;
+ goto out;
+ }
+ atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
+ << (desc_first->tx_width);
+ if (atchan->remain_desc < 0) {
+ ret = -EINVAL;
+ goto out;
+ } else {
+ ret = atchan->remain_desc;
+ }
+ } else {
+ /*
+ * Get residual bytes when current
+ * descriptor transfer in progress.
+ */
+ count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
+ << (desc_first->tx_width);
+ ret = atchan->remain_desc - count;
+ }
+ /*
+ * Check fifo empty.
+ */
+ if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
+ atc_issue_pending(chan);
+
+out:
+ return ret;
+}
+
/**
* atc_chain_complete - finish work for one transaction chain
* @atchan: channel we work on
@@ -327,37 +418,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
}
/**
- * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
- * @atchan: channel to be cleaned up
- *
- * Called with atchan->lock held and bh disabled
- */
-static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
-{
- struct at_desc *desc, *_desc;
- struct at_desc *child;
-
- dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
-
- list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
- if (!(desc->lli.ctrla & ATC_DONE))
- /* This one is currently in progress */
- return;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- if (!(child->lli.ctrla & ATC_DONE))
- /* Currently in progress */
- return;
-
- /*
- * No descriptors so far seem to be in progress, i.e.
- * this chain must be done.
- */
- atc_chain_complete(atchan, desc);
- }
-}
-
-/**
* atc_advance_work - at the end of a transaction, move forward
* @atchan: channel where the transaction ended
*
@@ -496,6 +556,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
/* Give information to tasklet */
set_bit(ATC_IS_ERROR, &atchan->status);
}
+ if (pending & AT_DMA_BTC(i))
+ set_bit(ATC_IS_BTC, &atchan->status);
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
}
@@ -615,6 +677,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = len;
+ first->tx_width = src_width;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -761,6 +824,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = total_len;
+ first->tx_width = reg_width;
/* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */
@@ -919,6 +983,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = buf_len;
+ first->tx_width = reg_width;
return &first->txd;
@@ -1032,34 +1097,36 @@ atc_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret;
-
- spin_lock_irqsave(&atchan->lock, flags);
+ int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- atc_cleanup_descriptors(atchan);
+ if (ret == DMA_SUCCESS)
+ return ret;
+ /*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate)
+ return DMA_ERROR;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_irqsave(&atchan->lock, flags);
- last_complete = chan->completed_cookie;
- last_used = chan->cookie;
+ /* Get number of bytes left in the active transactions */
+ bytes = atc_get_bytes_left(chan);
spin_unlock_irqrestore(&atchan->lock, flags);
- if (ret != DMA_SUCCESS)
- dma_set_residue(txstate, atc_first_active(atchan)->len);
-
- if (atc_chan_is_paused(atchan))
- ret = DMA_PAUSED;
+ if (unlikely(bytes < 0)) {
+ dev_vdbg(chan2dev(chan), "get residual bytes error\n");
+ return DMA_ERROR;
+ } else {
+ dma_set_residue(txstate, bytes);
+ }
- dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
- ret, cookie, last_complete ? last_complete : 0,
- last_used ? last_used : 0);
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
+ ret, cookie, bytes);
return ret;
}
@@ -1120,7 +1187,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
*/
BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
- /* if cfg configuration specified take it instad of default */
+ /* if cfg configuration specified take it instead of default */
if (atslave->cfg)
cfg = atslave->cfg;
}
@@ -1143,6 +1210,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
+ atchan->remain_desc = 0;
list_splice(&tmp_list, &atchan->free_list);
dma_cookie_init(chan);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1185,6 +1253,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
atchan->status = 0;
+ atchan->remain_desc = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1223,14 +1292,31 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
+
+ atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
* We can fill both SRC_PER and DST_PER, one of these fields will be
* ignored depending on DMA transfer direction.
*/
- per_id = dma_spec->args[1];
- atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
- | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
- | ATC_SRC_PER(per_id);
+ per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
+ atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
+ | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
+ /*
+ * We have to translate the value we get from the device tree since
+ * the half FIFO configuration value had to be 0 to keep backward
+ * compatibility.
+ */
+ switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
+ case AT91_DMA_CFG_FIFOCFG_ALAP:
+ atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
+ break;
+ case AT91_DMA_CFG_FIFOCFG_ASAP:
+ atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
+ break;
+ case AT91_DMA_CFG_FIFOCFG_HALF:
+ default:
+ atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
+ }
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
@@ -1374,7 +1460,9 @@ static int __init at_dma_probe(struct platform_device *pdev)
err = PTR_ERR(atdma->clk);
goto err_clk;
}
- clk_enable(atdma->clk);
+ err = clk_prepare_enable(atdma->clk);
+ if (err)
+ goto err_clk_prepare;
/* force dma off, just in case */
at_dma_off(atdma);
@@ -1472,10 +1560,10 @@ err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->dma_desc_pool);
err_pool_create:
- platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
err_irq:
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
+err_clk_prepare:
clk_put(atdma->clk);
err_clk:
iounmap(atdma->regs);
@@ -1497,7 +1585,6 @@ static int at_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->dma_desc_pool);
- platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
@@ -1512,7 +1599,7 @@ static int at_dma_remove(struct platform_device *pdev)
list_del(&chan->device_node);
}
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
clk_put(atdma->clk);
iounmap(atdma->regs);
@@ -1531,7 +1618,7 @@ static void at_dma_shutdown(struct platform_device *pdev)
struct at_dma *atdma = platform_get_drvdata(pdev);
at_dma_off(platform_get_drvdata(pdev));
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
}
static int at_dma_prepare(struct device *dev)
@@ -1588,7 +1675,7 @@ static int at_dma_suspend_noirq(struct device *dev)
/* disable DMA controller */
at_dma_off(atdma);
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
return 0;
}
@@ -1618,7 +1705,7 @@ static int at_dma_resume_noirq(struct device *dev)
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
- clk_enable(atdma->clk);
+ clk_prepare_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index c604d26fd4d3..f31d647acdfa 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -182,6 +182,7 @@ struct at_lli {
* @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list
* @len: total transaction bytecount
+ * @tx_width: transfer width
*/
struct at_desc {
/* FIRST values the hardware uses */
@@ -192,6 +193,7 @@ struct at_desc {
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
+ u32 tx_width;
};
static inline struct at_desc *
@@ -211,6 +213,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
enum atc_status {
ATC_IS_ERROR = 0,
ATC_IS_PAUSED = 1,
+ ATC_IS_BTC = 2,
ATC_IS_CYCLIC = 24,
};
@@ -228,6 +231,7 @@ enum atc_status {
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
+ * @remain_desc: to save remain desc length
* @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @active_list: list of descriptors dmaengine is being running on
@@ -246,6 +250,7 @@ struct at_dma_chan {
struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
+ u32 remain_desc;
struct dma_slave_config dma_sconfig;
spinlock_t lock;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
new file mode 100644
index 000000000000..b0c0c8268d42
--- /dev/null
+++ b/drivers/dma/dma-jz4740.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 DMAC support
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+
+#include <asm/mach-jz4740/dma.h>
+
+#include "virt-dma.h"
+
+#define JZ_DMA_NR_CHANS 6
+
+#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
+#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
+#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
+#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
+#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
+#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
+#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
+
+#define JZ_REG_DMA_CTRL 0x300
+#define JZ_REG_DMA_IRQ 0x304
+#define JZ_REG_DMA_DOORBELL 0x308
+#define JZ_REG_DMA_DOORBELL_SET 0x30C
+
+#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
+#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
+#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
+#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
+#define JZ_DMA_STATUS_CTRL_HALT BIT(2)
+#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
+#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
+
+#define JZ_DMA_CMD_SRC_INC BIT(23)
+#define JZ_DMA_CMD_DST_INC BIT(22)
+#define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
+#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
+#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
+#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
+#define JZ_DMA_CMD_BLOCK_MODE BIT(7)
+#define JZ_DMA_CMD_DESC_VALID BIT(4)
+#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
+#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
+#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
+#define JZ_DMA_CMD_LINK_ENABLE BIT(0)
+
+#define JZ_DMA_CMD_FLAGS_OFFSET 22
+#define JZ_DMA_CMD_RDIL_OFFSET 16
+#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
+#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
+#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
+#define JZ_DMA_CMD_MODE_OFFSET 7
+
+#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
+#define JZ_DMA_CTRL_HALT BIT(3)
+#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
+#define JZ_DMA_CTRL_ENABLE BIT(0)
+
+enum jz4740_dma_width {
+ JZ4740_DMA_WIDTH_32BIT = 0,
+ JZ4740_DMA_WIDTH_8BIT = 1,
+ JZ4740_DMA_WIDTH_16BIT = 2,
+};
+
+enum jz4740_dma_transfer_size {
+ JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
+ JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
+ JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
+ JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
+ JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
+};
+
+enum jz4740_dma_flags {
+ JZ4740_DMA_SRC_AUTOINC = 0x2,
+ JZ4740_DMA_DST_AUTOINC = 0x1,
+};
+
+enum jz4740_dma_mode {
+ JZ4740_DMA_MODE_SINGLE = 0,
+ JZ4740_DMA_MODE_BLOCK = 1,
+};
+
+struct jz4740_dma_sg {
+ dma_addr_t addr;
+ unsigned int len;
+};
+
+struct jz4740_dma_desc {
+ struct virt_dma_desc vdesc;
+
+ enum dma_transfer_direction direction;
+ bool cyclic;
+
+ unsigned int num_sgs;
+ struct jz4740_dma_sg sg[];
+};
+
+struct jz4740_dmaengine_chan {
+ struct virt_dma_chan vchan;
+ unsigned int id;
+
+ dma_addr_t fifo_addr;
+ unsigned int transfer_shift;
+
+ struct jz4740_dma_desc *desc;
+ unsigned int next_sg;
+};
+
+struct jz4740_dma_dev {
+ struct dma_device ddev;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
+};
+
+static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
+ struct jz4740_dmaengine_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
+ ddev);
+}
+
+static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
+}
+
+static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct jz4740_dma_desc, vdesc);
+}
+
+static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
+ unsigned int reg)
+{
+ return readl(dmadev->base + reg);
+}
+
+static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
+ unsigned reg, uint32_t val)
+{
+ writel(val, dmadev->base + reg);
+}
+
+static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
+ unsigned int reg, uint32_t val, uint32_t mask)
+{
+ uint32_t tmp;
+
+ tmp = jz4740_dma_read(dmadev, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ jz4740_dma_write(dmadev, reg, tmp);
+}
+
+static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
+{
+ return kzalloc(sizeof(struct jz4740_dma_desc) +
+ sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
+}
+
+static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return JZ4740_DMA_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return JZ4740_DMA_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return JZ4740_DMA_WIDTH_32BIT;
+ default:
+ return JZ4740_DMA_WIDTH_32BIT;
+ }
+}
+
+static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
+{
+ if (maxburst <= 1)
+ return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
+ else if (maxburst <= 3)
+ return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
+ else if (maxburst <= 15)
+ return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
+ else if (maxburst <= 31)
+ return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
+
+ return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
+}
+
+static int jz4740_dma_slave_config(struct dma_chan *c,
+ const struct dma_slave_config *config)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ enum jz4740_dma_width src_width;
+ enum jz4740_dma_width dst_width;
+ enum jz4740_dma_transfer_size transfer_size;
+ enum jz4740_dma_flags flags;
+ uint32_t cmd;
+
+ switch (config->direction) {
+ case DMA_MEM_TO_DEV:
+ flags = JZ4740_DMA_SRC_AUTOINC;
+ transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
+ chan->fifo_addr = config->dst_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ flags = JZ4740_DMA_DST_AUTOINC;
+ transfer_size = jz4740_dma_maxburst(config->src_maxburst);
+ chan->fifo_addr = config->src_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ src_width = jz4740_dma_width(config->src_addr_width);
+ dst_width = jz4740_dma_width(config->dst_addr_width);
+
+ switch (transfer_size) {
+ case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
+ chan->transfer_shift = 1;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
+ chan->transfer_shift = 2;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
+ chan->transfer_shift = 4;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
+ chan->transfer_shift = 5;
+ break;
+ default:
+ chan->transfer_shift = 0;
+ break;
+ }
+
+ cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
+ cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
+ cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
+ cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
+ cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
+ cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
+
+ jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
+ config->slave_id);
+
+ return 0;
+}
+
+static int jz4740_dma_terminate_all(struct dma_chan *c)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE);
+ chan->desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config = (struct dma_slave_config *)arg;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return jz4740_dma_slave_config(chan, config);
+ case DMA_TERMINATE_ALL:
+ return jz4740_dma_terminate_all(chan);
+ default:
+ return -ENOSYS;
+ }
+}
+
+static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
+{
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ dma_addr_t src_addr, dst_addr;
+ struct virt_dma_desc *vdesc;
+ struct jz4740_dma_sg *sg;
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE);
+
+ if (!chan->desc) {
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return 0;
+ chan->desc = to_jz4740_dma_desc(vdesc);
+ chan->next_sg = 0;
+ }
+
+ if (chan->next_sg == chan->desc->num_sgs)
+ chan->next_sg = 0;
+
+ sg = &chan->desc->sg[chan->next_sg];
+
+ if (chan->desc->direction == DMA_MEM_TO_DEV) {
+ src_addr = sg->addr;
+ dst_addr = chan->fifo_addr;
+ } else {
+ src_addr = chan->fifo_addr;
+ dst_addr = sg->addr;
+ }
+ jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
+ sg->len >> chan->transfer_shift);
+
+ chan->next_sg++;
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
+ JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
+ JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
+ JZ_DMA_STATUS_CTRL_ENABLE);
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
+ JZ_DMA_CTRL_ENABLE,
+ JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
+
+ return 0;
+}
+
+static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
+{
+ spin_lock(&chan->vchan.lock);
+ if (chan->desc) {
+ if (chan->desc && chan->desc->cyclic) {
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ } else {
+ if (chan->next_sg == chan->desc->num_sgs) {
+ chan->desc = NULL;
+ vchan_cookie_complete(&chan->desc->vdesc);
+ }
+ }
+ }
+ jz4740_dma_start_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
+}
+
+static irqreturn_t jz4740_dma_irq(int irq, void *devid)
+{
+ struct jz4740_dma_dev *dmadev = devid;
+ uint32_t irq_status;
+ unsigned int i;
+
+ irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
+
+ for (i = 0; i < 6; ++i) {
+ if (irq_status & (1 << i)) {
+ jz4740_dma_write_mask(dmadev,
+ JZ_REG_DMA_STATUS_CTRL(i), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE |
+ JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
+
+ jz4740_dma_chan_irq(&dmadev->chan[i]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void jz4740_dma_issue_pending(struct dma_chan *c)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (vchan_issue_pending(&chan->vchan) && !chan->desc)
+ jz4740_dma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
+ struct dma_chan *c, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ desc = jz4740_dma_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].addr = sg_dma_address(sg);
+ desc->sg[i].len = sg_dma_len(sg);
+ }
+
+ desc->num_sgs = sg_len;
+ desc->direction = direction;
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_desc *desc;
+ unsigned int num_periods, i;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ desc = jz4740_dma_alloc_desc(num_periods);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < num_periods; i++) {
+ desc->sg[i].addr = buf_addr;
+ desc->sg[i].len = period_len;
+ buf_addr += period_len;
+ }
+
+ desc->num_sgs = num_periods;
+ desc->direction = direction;
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
+ struct jz4740_dma_desc *desc, unsigned int next_sg)
+{
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ unsigned int residue, count;
+ unsigned int i;
+
+ residue = 0;
+
+ for (i = next_sg; i < desc->num_sgs; i++)
+ residue += desc->sg[i].len;
+
+ if (next_sg != 0) {
+ count = jz4740_dma_read(dmadev,
+ JZ_REG_DMA_TRANSFER_COUNT(chan->id));
+ residue += count << chan->transfer_shift;
+ }
+
+ return residue;
+}
+
+static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(c, cookie, state);
+ if (status == DMA_SUCCESS || !state)
+ return status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
+ if (cookie == chan->desc->vdesc.tx.cookie) {
+ state->residue = jz4740_dma_desc_residue(chan, chan->desc,
+ chan->next_sg);
+ } else if (vdesc) {
+ state->residue = jz4740_dma_desc_residue(chan,
+ to_jz4740_dma_desc(vdesc), 0);
+ } else {
+ state->residue = 0;
+ }
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return status;
+}
+
+static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
+{
+ return 0;
+}
+
+static void jz4740_dma_free_chan_resources(struct dma_chan *c)
+{
+ vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
+}
+
+static int jz4740_dma_probe(struct platform_device *pdev)
+{
+ struct jz4740_dmaengine_chan *chan;
+ struct jz4740_dma_dev *dmadev;
+ struct dma_device *dd;
+ unsigned int i;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev)
+ return -EINVAL;
+
+ dd = &dmadev->ddev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmadev->base))
+ return PTR_ERR(dmadev->base);
+
+ dmadev->clk = clk_get(&pdev->dev, "dma");
+ if (IS_ERR(dmadev->clk))
+ return PTR_ERR(dmadev->clk);
+
+ clk_prepare_enable(dmadev->clk);
+
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+ dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
+ dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
+ dd->device_tx_status = jz4740_dma_tx_status;
+ dd->device_issue_pending = jz4740_dma_issue_pending;
+ dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
+ dd->device_control = jz4740_dma_control;
+ dd->dev = &pdev->dev;
+ dd->chancnt = JZ_DMA_NR_CHANS;
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < dd->chancnt; i++) {
+ chan = &dmadev->chan[i];
+ chan->id = i;
+ chan->vchan.desc_free = jz4740_dma_desc_free;
+ vchan_init(&chan->vchan, dd);
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
+ if (ret)
+ goto err_unregister;
+
+ platform_set_drvdata(pdev, dmadev);
+
+ return 0;
+
+err_unregister:
+ dma_async_device_unregister(dd);
+ return ret;
+}
+
+static int jz4740_dma_remove(struct platform_device *pdev)
+{
+ struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ free_irq(irq, dmadev);
+ dma_async_device_unregister(&dmadev->ddev);
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static struct platform_driver jz4740_dma_driver = {
+ .probe = jz4740_dma_probe,
+ .remove = jz4740_dma_remove,
+ .driver = {
+ .name = "jz4740-dma",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(jz4740_dma_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("JZ4740 DMA driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
new file mode 100644
index 000000000000..dde13248b681
--- /dev/null
+++ b/drivers/dma/dw/Kconfig
@@ -0,0 +1,29 @@
+#
+# DMA engine configuration for dw
+#
+
+config DW_DMAC_CORE
+ tristate "Synopsys DesignWare AHB DMA support"
+ depends on GENERIC_HARDIRQS
+ select DMA_ENGINE
+
+config DW_DMAC
+ tristate "Synopsys DesignWare AHB DMA platform driver"
+ select DW_DMAC_CORE
+ select DW_DMAC_BIG_ENDIAN_IO if AVR32
+ default y if CPU_AT32AP7000
+ help
+ Support the Synopsys DesignWare AHB DMA controller. This
+ can be integrated in chips such as the Atmel AT32ap7000.
+
+config DW_DMAC_PCI
+ tristate "Synopsys DesignWare AHB DMA PCI driver"
+ depends on PCI
+ select DW_DMAC_CORE
+ help
+ Support the Synopsys DesignWare AHB DMA controller on the
+ platfroms that enumerate it as a PCI device. For example,
+ Intel Medfield has integrated this GPDMA controller.
+
+config DW_DMAC_BIG_ENDIAN_IO
+ bool
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
new file mode 100644
index 000000000000..3eebd1ce2c6b
--- /dev/null
+++ b/drivers/dma/dw/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
+dw_dmac_core-objs := core.o
+
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
+dw_dmac-objs := platform.o
+
+obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
+dw_dmac_pci-objs := pci.o
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw/core.c
index 2e5deaa82b60..eea479c12173 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw/core.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,17 +20,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
-#include "dw_dmac_regs.h"
-#include "dmaengine.h"
+#include "../dmaengine.h"
+#include "internal.h"
/*
* This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -41,16 +37,6 @@
* which does not support descriptor writeback.
*/
-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
-{
- return slave ? slave->dst_master : 0;
-}
-
-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
-{
- return slave ? slave->src_master : 1;
-}
-
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -556,14 +542,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* --------------------- Cyclic DMA API extensions -------------------- */
-inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
return channel_readl(dwc, SAR);
}
EXPORT_SYMBOL(dw_dma_get_src_addr);
-inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
return channel_readl(dwc, DAR);
@@ -1225,99 +1211,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-/*----------------------------------------------------------------------*/
-
-struct dw_dma_of_filter_args {
- struct dw_dma *dw;
- unsigned int req;
- unsigned int src;
- unsigned int dst;
-};
-
-static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_of_filter_args *fargs = param;
-
- /* Ensure the device matches our channel */
- if (chan->device != &fargs->dw->dma)
- return false;
-
- dwc->request_line = fargs->req;
- dwc->src_master = fargs->src;
- dwc->dst_master = fargs->dst;
-
- return true;
-}
-
-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_of_filter_args fargs = {
- .dw = dw,
- };
- dma_cap_mask_t cap;
-
- if (dma_spec->args_count != 3)
- return NULL;
-
- fargs.req = dma_spec->args[0];
- fargs.src = dma_spec->args[1];
- fargs.dst = dma_spec->args[2];
-
- if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
- fargs.src >= dw->nr_masters ||
- fargs.dst >= dw->nr_masters))
- return NULL;
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- /* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_of_filter, &fargs);
-}
-
-#ifdef CONFIG_ACPI
-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct acpi_dma_spec *dma_spec = param;
-
- if (chan->device->dev != dma_spec->dev ||
- chan->chan_id != dma_spec->chan_id)
- return false;
-
- dwc->request_line = dma_spec->slave_id;
- dwc->src_master = dwc_get_sms(NULL);
- dwc->dst_master = dwc_get_dms(NULL);
-
- return true;
-}
-
-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
-{
- struct device *dev = dw->dma.dev;
- struct acpi_dma_filter_info *info;
- int ret;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- dma_cap_zero(info->dma_cap);
- dma_cap_set(DMA_SLAVE, info->dma_cap);
- info->filter_fn = dw_dma_acpi_filter;
-
- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
- info);
- if (ret)
- dev_err(dev, "could not register acpi_dma_controller\n");
-}
-#else /* !CONFIG_ACPI */
-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
-#endif /* !CONFIG_ACPI */
-
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
@@ -1598,104 +1491,24 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
-#ifdef CONFIG_OF
-static struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
- struct device_node *np = pdev->dev.of_node;
- struct dw_dma_platform_data *pdata;
- u32 tmp, arr[4];
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
- return NULL;
-
- if (of_property_read_bool(np, "is_private"))
- pdata->is_private = true;
-
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
-
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
-
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32(np, "dma-masters", &tmp)) {
- if (tmp > 4)
- return NULL;
-
- pdata->nr_masters = tmp;
- }
-
- if (!of_property_read_u32_array(np, "data_width", arr,
- pdata->nr_masters))
- for (tmp = 0; tmp < pdata->nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
-
- return pdata;
-}
-#else
-static inline struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
-static int dw_probe(struct platform_device *pdev)
-{
- struct dw_dma_platform_data *pdata;
- struct resource *io;
struct dw_dma *dw;
size_t size;
- void __iomem *regs;
bool autocfg;
unsigned int dw_params;
unsigned int nr_channels;
unsigned int max_blk_size = 0;
- int irq;
int err;
int i;
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -EINVAL;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- regs = devm_ioremap_resource(&pdev->dev, io);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
-
- /* Apply default dma_mask if needed */
- if (!pdev->dev.dma_mask) {
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- }
-
- dw_params = dma_read_byaddr(regs, DW_PARAMS);
+ dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
- dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata)
- pdata = dw_dma_parse_dt(pdev);
+ dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
if (!pdata && autocfg) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -1712,16 +1525,17 @@ static int dw_probe(struct platform_device *pdev)
nr_channels = pdata->nr_channels;
size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
- dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
if (!dw)
return -ENOMEM;
- dw->clk = devm_clk_get(&pdev->dev, "hclk");
+ dw->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(dw->clk))
return PTR_ERR(dw->clk);
clk_prepare_enable(dw->clk);
- dw->regs = regs;
+ dw->regs = chip->regs;
+ chip->dw = dw;
/* Get hardware configuration parameters */
if (autocfg) {
@@ -1746,18 +1560,16 @@ static int dw_probe(struct platform_device *pdev)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
"dw_dmac", dw);
if (err)
return err;
- platform_set_drvdata(pdev, dw);
-
/* Create a pool of consistent memory blocks for hardware descriptors */
- dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
- dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ dev_err(chip->dev, "No memory for descriptors dma pool\n");
return -ENOMEM;
}
@@ -1798,12 +1610,12 @@ static int dw_probe(struct platform_device *pdev)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ void __iomem *addr = chip->regs + r * sizeof(u32);
- dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
- DWC_PARAMS);
+ dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
- dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
- dwc_params);
+ dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+ dwc_params);
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
@@ -1834,7 +1646,7 @@ static int dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
if (pdata->is_private)
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
- dw->dma.dev = &pdev->dev;
+ dw->dma.dev = chip->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1848,32 +1660,20 @@ static int dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
- dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+ dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
nr_channels);
dma_async_device_register(&dw->dma);
- if (pdev->dev.of_node) {
- err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_of_xlate, dw);
- if (err)
- dev_err(&pdev->dev,
- "could not register of_dma_controller\n");
- }
-
- if (ACPI_HANDLE(&pdev->dev))
- dw_dma_acpi_controller_register(dw);
-
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_probe);
-static int dw_remove(struct platform_device *pdev)
+int dw_dma_remove(struct dw_dma_chip *chip)
{
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
struct dw_dma_chan *dwc, *_dwc;
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
@@ -1887,86 +1687,44 @@ static int dw_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_remove);
-static void dw_shutdown(struct platform_device *pdev)
+void dw_dma_shutdown(struct dw_dma_chip *chip)
{
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
}
+EXPORT_SYMBOL_GPL(dw_dma_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
-static int dw_suspend_noirq(struct device *dev)
+int dw_dma_suspend(struct dw_dma_chip *chip)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_suspend);
-static int dw_resume_noirq(struct device *dev)
+int dw_dma_resume(struct dw_dma_chip *chip)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_resume);
-static const struct dev_pm_ops dw_dev_pm_ops = {
- .suspend_noirq = dw_suspend_noirq,
- .resume_noirq = dw_resume_noirq,
- .freeze_noirq = dw_suspend_noirq,
- .thaw_noirq = dw_resume_noirq,
- .restore_noirq = dw_resume_noirq,
- .poweroff_noirq = dw_suspend_noirq,
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,dma-spear1340" },
- {}
-};
-MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw_dma_acpi_id_table[] = {
- { "INTL9C60", 0 },
- { }
-};
-#endif
-
-static struct platform_driver dw_driver = {
- .probe = dw_probe,
- .remove = dw_remove,
- .shutdown = dw_shutdown,
- .driver = {
- .name = "dw_dmac",
- .pm = &dw_dev_pm_ops,
- .of_match_table = of_match_ptr(dw_dma_of_id_table),
- .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
- },
-};
-
-static int __init dw_init(void)
-{
- return platform_driver_register(&dw_driver);
-}
-subsys_initcall(dw_init);
-
-static void __exit dw_exit(void)
-{
- platform_driver_unregister(&dw_driver);
-}
-module_exit(dw_exit);
+#endif /* CONFIG_PM_SLEEP */
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
new file mode 100644
index 000000000000..32667f9e0dda
--- /dev/null
+++ b/drivers/dma/dw/internal.h
@@ -0,0 +1,70 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DW_DMAC_INTERNAL_H
+#define _DW_DMAC_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/dw_dmac.h>
+
+#include "regs.h"
+
+/**
+ * struct dw_dma_chip - representation of DesignWare DMA controller hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @dw: struct dw_dma that is filed by dw_dma_probe()
+ */
+struct dw_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ struct dw_dma *dw;
+};
+
+/* Export to the platform drivers */
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_remove(struct dw_dma_chip *chip);
+
+void dw_dma_shutdown(struct dw_dma_chip *chip);
+
+#ifdef CONFIG_PM_SLEEP
+
+int dw_dma_suspend(struct dw_dma_chip *chip);
+int dw_dma_resume(struct dw_dma_chip *chip);
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * dwc_get_dms - get destination master
+ * @slave: pointer to the custom slave configuration
+ *
+ * Returns destination master in the custom slave configuration if defined, or
+ * default value otherwise.
+ */
+static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->dst_master : 0;
+}
+
+/**
+ * dwc_get_sms - get source master
+ * @slave: pointer to the custom slave configuration
+ *
+ * Returns source master in the custom slave configuration if defined, or
+ * default value otherwise.
+ */
+static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->src_master : 1;
+}
+
+#endif /* _DW_DMAC_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
new file mode 100644
index 000000000000..e89fc24b8293
--- /dev/null
+++ b/drivers/dma/dw/pci.c
@@ -0,0 +1,101 @@
+/*
+ * PCI driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+
+#include "internal.h"
+
+static struct dw_dma_platform_data dw_pci_pdata = {
+ .is_private = 1,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+};
+
+static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct dw_dma_chip *chip;
+ struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regs = pcim_iomap_table(pdev)[0];
+ chip->irq = pdev->irq;
+
+ ret = dw_dma_probe(chip, pdata);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+}
+
+static void dw_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_dma_chip *chip = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = dw_dma_remove(chip);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
+ /* Medfield */
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
+
+ /* BayTrail */
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
+
+static struct pci_driver dw_pci_driver = {
+ .name = "dw_dmac_pci",
+ .id_table = dw_pci_id_table,
+ .probe = dw_pci_probe,
+ .remove = dw_pci_remove,
+};
+
+module_pci_driver(dw_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
new file mode 100644
index 000000000000..6c9449cffae8
--- /dev/null
+++ b/drivers/dma/dw/platform.c
@@ -0,0 +1,317 @@
+/*
+ * Platform driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Some parts of this driver are derived from the original dw_dmac.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+#include "internal.h"
+
+struct dw_dma_of_filter_args {
+ struct dw_dma *dw;
+ unsigned int req;
+ unsigned int src;
+ unsigned int dst;
+};
+
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_of_filter_args *fargs = param;
+
+ /* Ensure the device matches our channel */
+ if (chan->device != &fargs->dw->dma)
+ return false;
+
+ dwc->request_line = fargs->req;
+ dwc->src_master = fargs->src;
+ dwc->dst_master = fargs->dst;
+
+ return true;
+}
+
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_of_filter_args fargs = {
+ .dw = dw,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ fargs.req = dma_spec->args[0];
+ fargs.src = dma_spec->args[1];
+ fargs.dst = dma_spec->args[2];
+
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+ fargs.src >= dw->nr_masters ||
+ fargs.dst >= dw->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+}
+
+#ifdef CONFIG_ACPI
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct acpi_dma_spec *dma_spec = param;
+
+ if (chan->device->dev != dma_spec->dev ||
+ chan->chan_id != dma_spec->chan_id)
+ return false;
+
+ dwc->request_line = dma_spec->slave_id;
+ dwc->src_master = dwc_get_sms(NULL);
+ dwc->dst_master = dwc_get_dms(NULL);
+
+ return true;
+}
+
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+ info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
+}
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[4];
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+ return NULL;
+
+ if (of_property_read_bool(np, "is_private"))
+ pdata->is_private = true;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+ if (tmp > 4)
+ return NULL;
+
+ pdata->nr_masters = tmp;
+ }
+
+ if (!of_property_read_u32_array(np, "data_width", arr,
+ pdata->nr_masters))
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+
+ return pdata;
+}
+#else
+static inline struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
+static int dw_probe(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ struct dw_dma_platform_data *pdata;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ /* Apply default dma_mask if needed */
+ if (!dev->dma_mask) {
+ dev->dma_mask = &dev->coherent_dma_mask;
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ pdata = dw_dma_parse_dt(pdev);
+
+ chip->dev = dev;
+
+ err = dw_dma_probe(chip, pdata);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, chip);
+
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ dw_dma_of_xlate, chip->dw);
+ if (err)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
+ if (ACPI_HANDLE(&pdev->dev))
+ dw_dma_acpi_controller_register(chip->dw);
+
+ return 0;
+}
+
+static int dw_remove(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return dw_dma_remove(chip);
+}
+
+static void dw_shutdown(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ dw_dma_shutdown(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_of_id_table[] = {
+ { .compatible = "snps,dma-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+ { "INTL9C60", 0 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ return dw_dma_suspend(chip);
+}
+
+static int dw_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ return dw_dma_resume(chip);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define dw_suspend_noirq NULL
+#define dw_resume_noirq NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_dev_pm_ops = {
+ .suspend_noirq = dw_suspend_noirq,
+ .resume_noirq = dw_resume_noirq,
+ .freeze_noirq = dw_suspend_noirq,
+ .thaw_noirq = dw_resume_noirq,
+ .restore_noirq = dw_resume_noirq,
+ .poweroff_noirq = dw_suspend_noirq,
+};
+
+static struct platform_driver dw_driver = {
+ .probe = dw_probe,
+ .remove = dw_remove,
+ .shutdown = dw_shutdown,
+ .driver = {
+ .name = "dw_dmac",
+ .pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+ },
+};
+
+static int __init dw_init(void)
+{
+ return platform_driver_register(&dw_driver);
+}
+subsys_initcall(dw_init);
+
+static void __exit dw_exit(void)
+{
+ platform_driver_unregister(&dw_driver);
+}
+module_exit(dw_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw/regs.h
index 9d417200bd57..deb4274f80f4 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw/regs.h
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/dw_dmac.h>
@@ -100,6 +101,12 @@ struct dw_dma_regs {
u32 DW_PARAMS;
};
+/*
+ * Big endian I/O access when reading and writing to the DMA controller
+ * registers. This is needed on some platforms, like the Atmel AVR32
+ * architecture.
+ */
+
#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
#define dma_readl_native ioread32be
#define dma_writel_native iowrite32be
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4fc2980556ad..49e8fbdb8983 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1368,7 +1368,7 @@ static int fsldma_of_probe(struct platform_device *op)
dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
- dev_set_drvdata(&op->dev, fdev);
+ platform_set_drvdata(op, fdev);
/*
* We cannot use of_platform_bus_probe() because there is no
@@ -1417,7 +1417,7 @@ static int fsldma_of_remove(struct platform_device *op)
struct fsldma_device *fdev;
unsigned int i;
- fdev = dev_get_drvdata(&op->dev);
+ fdev = platform_get_drvdata(op);
dma_async_device_unregister(&fdev->common);
fsldma_free_irqs(fdev);
@@ -1428,7 +1428,6 @@ static int fsldma_of_remove(struct platform_device *op)
}
iounmap(fdev->regs);
- dev_set_drvdata(&op->dev, NULL);
kfree(fdev);
return 0;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f28583370d00..ff2aab973b45 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -27,6 +27,8 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx.h>
@@ -186,6 +188,11 @@ struct imxdma_engine {
enum imx_dma_type devtype;
};
+struct imxdma_filter_data {
+ struct imxdma_engine *imxdma;
+ int request;
+};
+
static struct platform_device_id imx_dma_devtype[] = {
{
.name = "imx1-dma",
@@ -202,6 +209,22 @@ static struct platform_device_id imx_dma_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
+static const struct of_device_id imx_dma_of_dev_id[] = {
+ {
+ .compatible = "fsl,imx1-dma",
+ .data = &imx_dma_devtype[IMX1_DMA],
+ }, {
+ .compatible = "fsl,imx21-dma",
+ .data = &imx_dma_devtype[IMX21_DMA],
+ }, {
+ .compatible = "fsl,imx27-dma",
+ .data = &imx_dma_devtype[IMX27_DMA],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
+
static inline int is_imx1_dma(struct imxdma_engine *imxdma)
{
return imxdma->devtype == IMX1_DMA;
@@ -996,17 +1019,55 @@ static void imxdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&imxdma->lock, flags);
}
+static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct imxdma_filter_data *fdata = param;
+ struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
+
+ if (chan->device->dev != fdata->imxdma->dev)
+ return false;
+
+ imxdma_chan->dma_request = fdata->request;
+ chan->private = NULL;
+
+ return true;
+}
+
+static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct imxdma_engine *imxdma = ofdma->of_dma_data;
+ struct imxdma_filter_data fdata = {
+ .imxdma = imxdma,
+ };
+
+ if (count != 1)
+ return NULL;
+
+ fdata.request = dma_spec->args[0];
+
+ return dma_request_channel(imxdma->dma_device.cap_mask,
+ imxdma_filter_fn, &fdata);
+}
+
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
struct resource *res;
+ const struct of_device_id *of_id;
int ret, i;
int irq, irq_err;
+ of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
+ imxdma->dev = &pdev->dev;
imxdma->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1111,7 +1172,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
&imxdma->dma_device.channels);
}
- imxdma->dev = &pdev->dev;
imxdma->dma_device.dev = &pdev->dev;
imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
@@ -1136,8 +1196,19 @@ static int __init imxdma_probe(struct platform_device *pdev)
goto err;
}
+ if (pdev->dev.of_node) {
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ imxdma_xlate, imxdma);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register of_dma_controller\n");
+ goto err_of_dma_controller;
+ }
+ }
+
return 0;
+err_of_dma_controller:
+ dma_async_device_unregister(&imxdma->dma_device);
err:
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
@@ -1150,6 +1221,9 @@ static int imxdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&imxdma->dma_device);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
@@ -1159,6 +1233,7 @@ static int imxdma_remove(struct platform_device *pdev)
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
+ .of_match_table = imx_dma_of_dev_id,
},
.id_table = imx_dma_devtype,
.remove = imxdma_remove,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 092867bf795c..1e44b8cf95da 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -36,6 +36,7 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
@@ -1296,6 +1297,35 @@ err_dma_alloc:
return ret;
}
+static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+ struct imx_dma_data *data = fn_param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = data;
+
+ return true;
+}
+
+static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sdma_engine *sdma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = sdma->dma_device.cap_mask;
+ struct imx_dma_data data;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ data.dma_request = dma_spec->args[0];
+ data.peripheral_type = dma_spec->args[1];
+ data.priority = dma_spec->args[2];
+
+ return dma_request_channel(mask, sdma_filter_fn, &data);
+}
+
static int __init sdma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -1443,10 +1473,20 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_init;
}
+ if (np) {
+ ret = of_dma_controller_register(np, sdma_xlate, sdma);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register controller\n");
+ goto err_register;
+ }
+ }
+
dev_info(sdma->dev, "initialized\n");
return 0;
+err_register:
+ dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
err_alloc:
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 43d5a6c33297..9b9366537d73 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -154,6 +154,10 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
{
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
tdmac->reg_base + TDCR);
+
+ /* disable irq */
+ writel(0, tdmac->reg_base + TDIMR);
+
tdmac->status = DMA_SUCCESS;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b48a79c28845..719593002ab7 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -693,7 +693,7 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
return true;
}
-struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 7aa0864cd487..75334bdd2c56 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -35,8 +35,7 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
struct of_dma *ofdma;
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
- if ((ofdma->of_node == dma_spec->np) &&
- (ofdma->of_dma_nbcells == dma_spec->args_count))
+ if (ofdma->of_node == dma_spec->np)
return ofdma;
pr_debug("%s: can't find DMA controller %s\n", __func__,
@@ -64,8 +63,6 @@ int of_dma_controller_register(struct device_node *np,
void *data)
{
struct of_dma *ofdma;
- int nbcells;
- const __be32 *prop;
if (!np || !of_dma_xlate) {
pr_err("%s: not enough information provided\n", __func__);
@@ -76,19 +73,7 @@ int of_dma_controller_register(struct device_node *np,
if (!ofdma)
return -ENOMEM;
- prop = of_get_property(np, "#dma-cells", NULL);
- if (prop)
- nbcells = be32_to_cpup(prop);
-
- if (!prop || !nbcells) {
- pr_err("%s: #dma-cells property is missing or invalid\n",
- __func__);
- kfree(ofdma);
- return -EINVAL;
- }
-
ofdma->of_node = np;
- ofdma->of_dma_nbcells = nbcells;
ofdma->of_dma_xlate = of_dma_xlate;
ofdma->of_dma_data = data;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 7ec82f0667eb..593827b3fdd4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -157,7 +157,6 @@ enum pl330_reqtype {
#define PERIPH_REV_R0P0 0
#define PERIPH_REV_R1P0 1
#define PERIPH_REV_R1P1 2
-#define PCELL_ID 0xff0
#define CR0_PERIPH_REQ_SET (1 << 0)
#define CR0_BOOT_EN_SET (1 << 1)
@@ -193,8 +192,6 @@ enum pl330_reqtype {
#define INTEG_CFG 0x0
#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
-#define PCELL_ID_VAL 0xb105f00d
-
#define PL330_STATE_STOPPED (1 << 0)
#define PL330_STATE_EXECUTING (1 << 1)
#define PL330_STATE_WFE (1 << 2)
@@ -292,7 +289,6 @@ static unsigned cmd_line;
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
- u32 pcell_id;
#define DMAC_MODE_NS (1 << 0)
unsigned int mode;
unsigned int data_bus_width:10; /* In number of bits */
@@ -505,7 +501,7 @@ struct pl330_dmac {
/* Maximum possible events/irqs */
int events[32];
/* BUS address of MicroCode buffer */
- u32 mcode_bus;
+ dma_addr_t mcode_bus;
/* CPU address of MicroCode buffer */
void *mcode_cpu;
/* List of all Channel threads */
@@ -650,19 +646,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd)
return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
}
-static inline u32 get_id(struct pl330_info *pi, u32 off)
-{
- void __iomem *regs = pi->base;
- u32 id = 0;
-
- id |= (readb(regs + off + 0x0) << 0);
- id |= (readb(regs + off + 0x4) << 8);
- id |= (readb(regs + off + 0x8) << 16);
- id |= (readb(regs + off + 0xc) << 24);
-
- return id;
-}
-
static inline u32 get_revision(u32 periph_id)
{
return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
@@ -1986,9 +1969,6 @@ static void read_dmac_config(struct pl330_info *pi)
pi->pcfg.num_events = val;
pi->pcfg.irq_ns = readl(regs + CR3);
-
- pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
- pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
}
static inline void _reset_thread(struct pl330_thread *thrd)
@@ -2098,10 +2078,8 @@ static int pl330_add(struct pl330_info *pi)
regs = pi->base;
/* Check if we can handle this DMAC */
- if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
- || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
- get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
+ if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
+ dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
return -EINVAL;
}
@@ -2916,6 +2894,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
+ pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
goto probe_err1;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 1e220f8dfd8c..370ff8265630 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4434,7 +4434,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
adev->dev = &ofdev->dev;
adev->common.dev = &ofdev->dev;
INIT_LIST_HEAD(&adev->common.channels);
- dev_set_drvdata(&ofdev->dev, adev);
+ platform_set_drvdata(ofdev, adev);
/* create a channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
@@ -4547,14 +4547,13 @@ out:
*/
static int ppc440spe_adma_remove(struct platform_device *ofdev)
{
- struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
+ struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct dma_chan *chan, *_chan;
struct ppc_dma_chan_ref *ref, *_ref;
struct ppc440spe_adma_chan *ppc440spe_chan;
- dev_set_drvdata(&ofdev->dev, NULL);
if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
ppc440spe_adma_devices[adev->id] = -1;
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c07ca4612e46..c962138dde96 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,3 @@
-obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 4acb85a10250..28ca36121631 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -175,7 +175,18 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int ret;
+ int ret, match;
+
+ if (schan->dev->of_node) {
+ match = schan->hw_req;
+ ret = ops->set_slave(schan, match, true);
+ if (ret < 0)
+ return ret;
+
+ slave_id = schan->slave_id;
+ } else {
+ match = slave_id;
+ }
if (slave_id < 0 || slave_id >= slave_num)
return -EINVAL;
@@ -183,7 +194,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, slave_id, false);
+ ret = ops->set_slave(schan, match, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -206,23 +217,26 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
* services would have to provide their own filters, which first would check
* the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
* this, and only then, in case of a match, call this common filter.
+ * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
+ * In that case the MID-RID value is used for slave channel filtering and is
+ * passed to this function in the "arg" parameter.
*/
bool shdma_chan_filter(struct dma_chan *chan, void *arg)
{
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int slave_id = (int)arg;
+ int match = (int)arg;
int ret;
- if (slave_id < 0)
+ if (match < 0)
/* No slave requested - arbitrary channel */
return true;
- if (slave_id >= slave_num)
+ if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, slave_id, true);
+ ret = ops->set_slave(schan, match, true);
if (ret < 0)
return false;
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
new file mode 100644
index 000000000000..11bcb05cd79c
--- /dev/null
+++ b/drivers/dma/sh/shdma-of.c
@@ -0,0 +1,82 @@
+/*
+ * SHDMA Device Tree glue
+ *
+ * Copyright (C) 2013 Renesas Electronics Inc.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+
+static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ u32 id = dma_spec->args[0];
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ dma_cap_zero(mask);
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+ if (chan)
+ to_shdma_chan(chan)->hw_req = id;
+
+ return chan;
+}
+
+static int shdma_of_probe(struct platform_device *pdev)
+{
+ const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ int ret;
+
+ if (!lookup)
+ return -EINVAL;
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ shdma_of_xlate, pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev);
+ if (ret < 0)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return ret;
+}
+
+static const struct of_device_id shdma_of_match[] = {
+ { .compatible = "renesas,shdma-mux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
+static struct platform_driver shdma_of = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "shdma-of",
+ .of_match_table = shdma_of_match,
+ },
+ .probe = shdma_of_probe,
+};
+
+module_platform_driver(shdma_of);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SH-DMA driver DT glue");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b70709b030d8..b67f45f5c271 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -301,20 +301,32 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
}
}
+/*
+ * Find a slave channel configuration from the contoller list by either a slave
+ * ID in the non-DT case, or by a MID/RID value in the DT case
+ */
static const struct sh_dmae_slave_config *dmae_find_slave(
- struct sh_dmae_chan *sh_chan, int slave_id)
+ struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
- if (slave_id >= SH_DMA_SLAVE_NUMBER)
- return NULL;
+ if (!sh_chan->shdma_chan.dev->of_node) {
+ if (match >= SH_DMA_SLAVE_NUMBER)
+ return NULL;
- for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id)
- return cfg;
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == match)
+ return cfg;
+ } else {
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->mid_rid == match) {
+ sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ return cfg;
+ }
+ }
return NULL;
}
@@ -729,7 +741,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
goto eshdma;
/* platform data */
- shdev->pdata = pdev->dev.platform_data;
+ shdev->pdata = pdata;
if (pdata->chcr_offset)
shdev->chcr_offset = pdata->chcr_offset;
@@ -920,11 +932,18 @@ static int sh_dmae_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id sh_dmae_of_match[] = {
+ { .compatible = "renesas,shdma", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
+ .of_match_table = sh_dmae_of_match,
},
.remove = sh_dmae_remove,
.shutdown = sh_dmae_shutdown,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 1765a0a2736d..716b23e4f327 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -466,12 +466,29 @@ static enum dma_status
sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
unsigned long flags;
enum dma_status ret;
+ struct sirfsoc_dma_desc *sdesc;
+ int cid = schan->chan.chan_id;
+ unsigned long dma_pos;
+ unsigned long dma_request_bytes;
+ unsigned long residue;
spin_lock_irqsave(&schan->lock, flags);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
+ (sdesc->width * SIRFSOC_DMA_WORD_LEN);
+
ret = dma_cookie_status(chan, cookie, txstate);
+ dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
+ << 2;
+ residue = dma_request_bytes - (dma_pos - sdesc->addr);
+ dma_set_residue(txstate, residue);
+
spin_unlock_irqrestore(&schan->lock, flags);
return ret;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 33f59ecd256e..f137914d7b16 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1191,6 +1191,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
INIT_LIST_HEAD(&tdc->cb_desc);
tdc->config_init = false;
+ tdc->isr_handler = NULL;
spin_unlock_irqrestore(&tdc->lock, flags);
while (!list_empty(&dma_desc_list)) {
@@ -1334,7 +1335,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
- i, ret);
+ ret, i);
goto err_irq;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 26107ba6edb3..0ef43c136aa7 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -811,8 +811,6 @@ static int td_remove(struct platform_device *pdev)
kfree(td);
release_mem_region(iomem->start, resource_size(iomem));
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&pdev->dev, "Removed...\n");
return 0;
}