summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 00:45:17 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 00:45:17 +0200
commit6039b80eb50a893476fea7d56e86ed2d19290054 (patch)
tree9cb535ca7604f4e1859dfac221ada671368b149b /drivers/dma
parentMerge tag 'hwlock-v4.8' of git://github.com/andersson/remoteproc (diff)
parentMerge branch 'topic/dmaengine_cleanups' into for-linus (diff)
downloadlinux-6039b80eb50a893476fea7d56e86ed2d19290054.tar.xz
linux-6039b80eb50a893476fea7d56e86ed2d19290054.zip
Merge tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time we have bit of largish changes: two new drivers, bunch of updates and cleanups to existing set. Nothing super exciting though. New drivers: - Xilinx zynqmp dma engine driver - Marvell xor2 driver Updates: - dmatest sg support - updates and enhancements to Xilinx drivers, adding of cyclic mode - clock handling fixes across drivers - removal of OOM messages on kzalloc across subsystem - interleaved transfers support in omap driver - runtime pm support in qcom bam dma - tasklet kill freeup across drivers - irq cleanup on remove across drivers" * tag 'dmaengine-4.8-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits) dmaengine: k3dma: add missing clk_disable_unprepare() on error in k3_dma_probe() dmaengine: zynqmp_dma: add missing MODULE_LICENSE dmaengine: qcom_hidma: use for_each_matching_node() macro dmaengine: zynqmp_dma: Fix static checker warning dmaengine: omap-dma: Support for interleaved transfer dmaengine: ioat: statify symbol dmaengine: pxa_dma: implement device_synchronize dmaengine: imx-sdma: remove assignment never used dmaengine: imx-sdma: remove dummy assignment dmaengine: cppi: remove unused and bogus check dmaengine: qcom_hidma_lli: kill the tasklets upon exit dmaengine: pxa_dma: remove owner assignment dmaengine: fsl_raid: remove owner assignment dmaengine: coh901318: remove owner assignment dmaengine: qcom_hidma: kill the tasklets upon exit dmaengine: txx9dmac: explicitly freeup irq dmaengine: sirf-dma: kill the tasklets upon exit dmaengine: s3c24xx: kill the tasklets upon exit dmaengine: s3c24xx: explicitly freeup irq dmaengine: pl330: explicitly freeup irq ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig32
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_xdmac.c8
-rw-r--r--drivers/dma/bcm2835-dma.c7
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/coh901318.c32
-rw-r--r--drivers/dma/cppi41.c3
-rw-r--r--drivers/dma/dma-axi-dmac.c8
-rw-r--r--drivers/dma/dma-jz4740.c14
-rw-r--r--drivers/dma/dmatest.c43
-rw-r--r--drivers/dma/edma.c52
-rw-r--r--drivers/dma/fsl-edma.c49
-rw-r--r--drivers/dma/fsl_raid.c9
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/imx-sdma.c32
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/k3dma.c19
-rw-r--r--drivers/dma/mmp_pdma.c19
-rw-r--r--drivers/dma/mmp_tdma.c9
-rw-r--r--drivers/dma/moxart-dma.c8
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/mv_xor_v2.c878
-rw-r--r--drivers/dma/nbpfaxi.c18
-rw-r--r--drivers/dma/omap-dma.c100
-rw-r--r--drivers/dma/pl330.c11
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/pxa_dma.c16
-rw-r--r--drivers/dma/qcom/bam_dma.c109
-rw-r--r--drivers/dma/qcom/hidma.c1
-rw-r--r--drivers/dma/qcom/hidma_ll.c1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c7
-rw-r--r--drivers/dma/s3c24xx-dma.c29
-rw-r--r--drivers/dma/sh/rcar-dmac.c41
-rw-r--r--drivers/dma/sh/shdmac.c9
-rw-r--r--drivers/dma/sh/sudmac.c9
-rw-r--r--drivers/dma/sirf-dma.c12
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40_ll.c2
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c49
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/xilinx/Makefile3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c (renamed from drivers/dma/xilinx/xilinx_vdma.c)489
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c1151
49 files changed, 3108 insertions, 251 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8c98779a12b1..739f797b40d9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -339,6 +339,20 @@ config MV_XOR
---help---
Enable support for the Marvell XOR engine.
+config MV_XOR_V2
+ bool "Marvell XOR engine version 2 support "
+ depends on ARM64
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select GENERIC_MSI_IRQ_DOMAIN
+ ---help---
+ Enable support for the Marvell version 2 XOR engine.
+
+ This engine provides acceleration for copy, XOR and RAID6
+ operations, and is available on Marvell Armada 7K and 8K
+ platforms.
+
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
@@ -519,19 +533,31 @@ config XGENE_DMA
help
Enable support for the APM X-Gene SoC DMA engine.
-config XILINX_VDMA
- tristate "Xilinx AXI VDMA Engine"
+config XILINX_DMA
+ tristate "Xilinx AXI DMAS Engine"
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
- This engine provides high-bandwidth direct memory access
+ AXI VDMA engine provides high-bandwidth direct memory access
between memory and AXI4-Stream video type target
peripherals including peripherals which support AXI4-
Stream Video Protocol. It has two stream interfaces/
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
+ AXI CDMA engine provides high-bandwidth direct memory access
+ between a memory-mapped source address and a memory-mapped
+ destination address.
+ AXI DMA engine provides high-bandwidth one dimensional direct
+ memory access between memory and AXI4-Stream target peripherals.
+
+config XILINX_ZYNQMP_DMA
+ tristate "Xilinx ZynqMP DMA Engine"
+ depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx ZynqMP DMA controller.
config ZX_DMA
tristate "ZTE ZX296702 DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 614f28b0b739..e4dc9cac7ee8 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 81db1c4811ce..939a7c31f760 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
if (!dsg) {
pl08x_free_txd(pl08x, txd);
- dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
- __func__);
return NULL;
}
list_add_tail(&dsg->node, &txd->dsg_list);
@@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
*/
for (i = 0; i < channels; i++) {
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan) {
- dev_err(&pl08x->adev->dev,
- "%s no memory for channel\n", __func__);
+ if (!chan)
return -ENOMEM;
- }
chan->host = pl08x;
chan->state = PL08X_CHAN_IDLE;
@@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
- dev_err(&adev->dev, "%s failed to allocate "
- "physical channel holders\n",
- __func__);
ret = -ENOMEM;
goto out_no_phychans;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 75bd6621dc5d..e434ffe7bc5c 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc;
}
-void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
{
memset(&desc->lld, 0, sizeof(desc->lld));
INIT_LIST_HEAD(&desc->descs_list);
@@ -1195,14 +1195,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
desc->lld.mbr_cfg = chan_cc;
dev_dbg(chan2dev(chan),
- "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
- __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
+ "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+ __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
desc->lld.mbr_cfg);
return desc;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6149b27c33ad..e18dc596cf24 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
unsigned int sg_len)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- size_t max_len = bcm2835_dma_max_frame_length(c);
- unsigned int i, len;
+ size_t len, max_len;
+ unsigned int i;
dma_addr_t addr;
struct scatterlist *sgent;
+ max_len = bcm2835_dma_max_frame_length(c);
for_each_sg(sgl, sgent, sg_len, i) {
for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
len > 0;
@@ -613,7 +614,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&c->vc.lock, flags);
}
-struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 180fedb418cc..7ce843723003 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
/* Get a clean struct */
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
if (!bcom_eng) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't allocate state structure\n");
rv = -ENOMEM;
goto error_sramclean;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index c340ca9bd2b5..e4acd63e42aa 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = {
COH901318_CX_CTRL_DDMA_LEGACY | \
COH901318_CX_CTRL_PRDD_SOURCE)
-const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
{
.number = U300_DMA_MSL_TX_0,
.name = "MSL TX 0",
@@ -1280,6 +1280,7 @@ struct coh901318_desc {
struct coh901318_base {
struct device *dev;
void __iomem *virtbase;
+ unsigned int irq;
struct coh901318_pool pool;
struct powersave pm;
struct dma_device dma_slave;
@@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
}
static const struct file_operations coh901318_debugfs_status_operations = {
- .owner = THIS_MODULE,
.open = simple_open,
.read = coh901318_debugfs_read,
.llseek = default_llseek,
@@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (err)
return err;
+ base->irq = irq;
+
err = coh901318_pool_create(&base->pool, &pdev->dev,
sizeof(struct coh901318_lli),
32);
@@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev)
coh901318_pool_destroy(&base->pool);
return err;
}
+static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
+{
+ int chans_i;
+ int i = 0;
+ struct coh901318_chan *cohc;
+
+ for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+ for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+ cohc = &base->chans[i];
+
+ tasklet_kill(&cohc->tasklet);
+ }
+ }
+
+}
static int coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
+ devm_free_irq(&pdev->dev, base->irq, base);
+
+ coh901318_base_remove(base, dma_slave_channels);
+ coh901318_base_remove(base, dma_memcpy_channels);
+
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave);
@@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = {
},
};
-int __init coh901318_init(void)
+static int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
subsys_initcall(coh901318_init);
-void __exit coh901318_exit(void)
+static void __exit coh901318_exit(void)
{
platform_driver_unregister(&coh901318_driver);
}
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index ceedafbd23e0..4b2317426c8e 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
- unsigned int num;
- num = 0;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
u32 addr;
u32 len;
/* We need to use more than one desc once musb supports sg */
- BUG_ON(num > 0);
addr = lower_32_bits(sg_dma_address(sg));
len = sg_dma_len(sg);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index c3468094393e..7f0b9aa15867 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
unsigned int pending;
pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+ if (!pending)
+ return IRQ_NONE;
+
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
spin_lock(&dmac->chan.vchan.lock);
@@ -579,7 +582,9 @@ static int axi_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
dmac->irq = platform_get_irq(pdev, 0);
- if (dmac->irq <= 0)
+ if (dmac->irq < 0)
+ return dmac->irq;
+ if (dmac->irq == 0)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -683,6 +688,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = {
{ .compatible = "adi,axi-dmac-1.00.a" },
{ },
};
+MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
static struct platform_driver axi_dmac_driver = {
.driver = {
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 7638b24ce8d0..9689b36c005a 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -573,12 +573,26 @@ err_unregister:
return ret;
}
+static void jz4740_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct jz4740_dmaengine_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
+
static int jz4740_dma_remove(struct platform_device *pdev)
{
struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, dmadev);
+
+ jz4740_cleanup_vchan(&dmadev->ddev);
dma_async_device_unregister(&dmadev->ddev);
clk_disable_unprepare(dmadev->clk);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b8576fd6bd0e..1245db5438e1 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+ "Number of scatter gather buffers (default: 1)");
+
+static unsigned int dmatest = 1;
+module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dmatest,
+ "dmatest 0-memcpy 1-slave_sg (default: 1)");
+
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
@@ -431,6 +441,8 @@ static int dmatest_func(void *data)
dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
+ else if (thread->type == DMA_SG)
+ src_cnt = dst_cnt = sg_buffers;
else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -485,6 +497,8 @@ static int dmatest_func(void *data)
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
u8 align = 0;
+ struct scatterlist tx_sg[src_cnt];
+ struct scatterlist rx_sg[src_cnt];
total_tests++;
@@ -577,10 +591,22 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
+ sg_init_table(tx_sg, src_cnt);
+ sg_init_table(rx_sg, src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ sg_dma_address(&rx_sg[i]) = srcs[i];
+ sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
srcs[0], len, flags);
+ else if (thread->type == DMA_SG)
+ tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
+ rx_sg, src_cnt, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst_off,
@@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
if (type == DMA_MEMCPY)
op = "copy";
+ else if (type == DMA_SG)
+ op = "sg";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
@@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info,
INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
- thread_count += cnt > 0 ? cnt : 0;
+ if (dmatest == 0) {
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
}
+
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dmatest == 1) {
+ cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
@@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info)
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_XOR);
+ request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 8181ed131386..3d277fa76c1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -239,6 +239,9 @@ struct edma_cc {
bool chmap_exist;
enum dma_event_q default_queue;
+ unsigned int ccint;
+ unsigned int ccerrint;
+
/*
* The slot_inuse bit for each PaRAM slot is clear unless the slot is
* in use by Linux or if it is allocated to be used by DSP.
@@ -1069,10 +1072,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+ if (!edesc)
return NULL;
- }
edesc->pset_nr = sg_len;
edesc->residue = 0;
@@ -1114,14 +1115,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->absync = ret;
edesc->residue += sg_dma_len(sg);
- /* If this is the last in a current SG set of transactions,
- enable interrupts so that next set is processed */
- if (!((i+1) % MAX_NR_SG))
- edesc->pset[i].param.opt |= TCINTEN;
-
- /* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
+ /* Enable completion interrupt */
edesc->pset[i].param.opt |= TCINTEN;
+ else if (!((i+1) % MAX_NR_SG))
+ /*
+ * Enable early completion interrupt for the
+ * intermediateset. In this case the driver will be
+ * notified when the paRAM set is submitted to TC. This
+ * will allow more time to set up the next set of slots.
+ */
+ edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
}
edesc->residue_stat = edesc->residue;
@@ -1173,10 +1177,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ if (!edesc)
return NULL;
- }
edesc->pset_nr = nslots;
edesc->residue = edesc->residue_stat = len;
@@ -1298,10 +1300,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+ if (!edesc)
return NULL;
- }
edesc->cyclic = 1;
edesc->pset_nr = nslots;
@@ -2207,10 +2207,8 @@ static int edma_probe(struct platform_device *pdev)
return ret;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
- if (!ecc) {
- dev_err(dev, "Can't allocate controller\n");
+ if (!ecc)
return -ENOMEM;
- }
ecc->dev = dev;
ecc->id = pdev->id;
@@ -2288,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev)
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
return ret;
}
+ ecc->ccint = irq;
}
irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
@@ -2303,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev)
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
return ret;
}
+ ecc->ccerrint = irq;
}
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
@@ -2393,11 +2393,27 @@ err_reg1:
return ret;
}
+static void edma_cleanupp_vchan(struct dma_device *dmadev)
+{
+ struct edma_chan *echan, *_echan;
+
+ list_for_each_entry_safe(echan, _echan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&echan->vchan.chan.device_node);
+ tasklet_kill(&echan->vchan.task);
+ }
+}
+
static int edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
+ devm_free_irq(dev, ecc->ccint, ecc);
+ devm_free_irq(dev, ecc->ccerrint, ecc);
+
+ edma_cleanupp_vchan(&ecc->dma_slave);
+
if (dev->of_node)
of_dma_controller_free(dev->of_node);
dma_async_device_unregister(&ecc->dma_slave);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index be2e62b87948..6775f2c74e25 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -852,6 +852,25 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static void fsl_edma_irq_exit(
+ struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ if (fsl_edma->txirq == fsl_edma->errirq) {
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ } else {
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+ }
+}
+
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+{
+ int i;
+
+ for (i = 0; i < DMAMUX_NR; i++)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -897,6 +916,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
if (ret) {
+ /* disable only clks which were enabled on error */
+ for (; i >= 0; i--)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+
dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
return ret;
}
@@ -951,14 +974,18 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA engine. (%d)\n", ret);
+ fsl_disable_clocks(fsl_edma);
return ret;
}
ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
if (ret) {
- dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
+ fsl_disable_clocks(fsl_edma);
return ret;
}
@@ -968,17 +995,27 @@ static int fsl_edma_probe(struct platform_device *pdev)
return 0;
}
+static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_edma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
- int i;
+ fsl_edma_irq_exit(pdev, fsl_edma);
+ fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
-
- for (i = 0; i < DMAMUX_NR; i++)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
+ fsl_disable_clocks(fsl_edma);
return 0;
}
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 4d9470f16552..aad167eaaee8 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+ dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+ dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+ dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -856,6 +856,8 @@ static int fsl_re_probe(struct platform_device *ofdev)
static void fsl_re_remove_chan(struct fsl_re_chan *chan)
{
+ tasklet_kill(&chan->irqtask);
+
dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
chan->inb_phys_addr);
@@ -890,7 +892,6 @@ static struct of_device_id fsl_re_ids[] = {
static struct platform_driver fsl_re_driver = {
.driver = {
.name = "fsl-raideng",
- .owner = THIS_MODULE,
.of_match_table = fsl_re_ids,
},
.probe = fsl_re_probe,
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index a8828ed639b3..911b7177eb50 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
/* alloc channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
- dev_err(fdev->dev, "no free memory for DMA channels!\n");
err = -ENOMEM;
goto out_return;
}
@@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op)
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) {
- dev_err(&op->dev, "No enough memory for 'priv'\n");
err = -ENOMEM;
goto out_return;
}
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 48d85f8b95fe..a960608c0a4d 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -167,6 +167,7 @@ struct imxdma_channel {
u32 ccr_to_device;
bool enabled_2d;
int slot_2d;
+ unsigned int irq;
};
enum imx_dma_type {
@@ -186,6 +187,9 @@ struct imxdma_engine {
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
struct imxdma_channel channel[IMX_DMA_CHANNELS];
enum imx_dma_type devtype;
+ unsigned int irq;
+ unsigned int irq_err;
+
};
struct imxdma_filter_data {
@@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
}
static int __init imxdma_probe(struct platform_device *pdev)
- {
+{
struct imxdma_engine *imxdma;
struct resource *res;
const struct of_device_id *of_id;
@@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
goto disable_dma_ahb_clk;
}
+ imxdma->irq = irq;
irq_err = platform_get_irq(pdev, 1);
if (irq_err < 0) {
@@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
goto disable_dma_ahb_clk;
}
+ imxdma->irq_err = irq_err;
}
/* enable DMA module */
@@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
irq + i, i);
goto disable_dma_ahb_clk;
}
+
+ imxdmac->irq = irq + i;
init_timer(&imxdmac->watchdog);
imxdmac->watchdog.function = &imxdma_watchdog;
imxdmac->watchdog.data = (unsigned long)imxdmac;
@@ -1217,10 +1225,31 @@ disable_dma_ipg_clk:
return ret;
}
+static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
+{
+ int i;
+
+ if (is_imx1_dma(imxdma)) {
+ disable_irq(imxdma->irq);
+ disable_irq(imxdma->irq_err);
+ }
+
+ for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ if (!is_imx1_dma(imxdma))
+ disable_irq(imxdmac->irq);
+
+ tasklet_kill(&imxdmac->dma_tasklet);
+ }
+}
+
static int imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
+ imxdma_free_irq(pdev, imxdma);
+
dma_async_device_unregister(&imxdma->dma_device);
if (pdev->dev.of_node)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 0f6fd42f55ca..03ec76fc22ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitops.h>
@@ -385,6 +386,7 @@ struct sdma_engine {
const struct sdma_driver_data *drvdata;
u32 spba_start_addr;
u32 spba_end_addr;
+ unsigned int irq;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -571,28 +573,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
static int sdma_run_channel0(struct sdma_engine *sdma)
{
int ret;
- unsigned long timeout = 500;
+ u32 reg;
sdma_enable_channel(sdma, 0);
- while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
- if (timeout-- <= 0)
- break;
- udelay(1);
- }
-
- if (ret) {
- /* Clear the interrupt status */
- writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
- } else {
+ ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
+ reg, !(reg & 1), 1, 500);
+ if (ret)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
- }
/* Set bits of CONFIG register with dynamic context switching */
if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
- return ret ? 0 : -ETIMEDOUT;
+ return ret;
}
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
@@ -727,9 +721,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
- /* not interested in channel 0 interrupts */
- stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+ /* channel 0 is special and not handled here, see run_channel0() */
+ stat &= ~1;
while (stat) {
int channel = fls(stat) - 1;
@@ -758,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
- int per_2_per = 0, emi_2_emi = 0;
+ int per_2_per = 0;
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
@@ -766,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
- emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
break;
case IMX_DMATYPE_DSP:
emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -999,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan)
} else
__set_bit(sdmac->event_id0, sdmac->event_mask);
- /* Watermark Level */
- sdmac->watermark_level |= sdmac->watermark_level;
/* Address */
sdmac->shp_addr = sdmac->per_address;
sdmac->per_addr = sdmac->per_address2;
@@ -1715,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
return ret;
+ sdma->irq = irq;
+
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
if (!sdma->script_addrs)
return -ENOMEM;
@@ -1840,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev)
struct sdma_engine *sdma = platform_get_drvdata(pdev);
int i;
+ devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
/* Kill the tasklet */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index d406056e8892..7145f7716a92 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev)
ioat_disable_interrupts(ioat_dma);
}
-void ioat_resume(struct ioatdma_device *ioat_dma)
+static void ioat_resume(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
u32 chanerr;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 1ba2fd73852d..39de8980128c 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -102,6 +102,7 @@ struct k3_dma_dev {
struct clk *clk;
u32 dma_channels;
u32 dma_requests;
+ unsigned int irq;
};
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
@@ -425,10 +426,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
- if (!ds) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ if (!ds)
return NULL;
- }
+
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
ds->size = len;
ds->desc_num = num;
@@ -481,10 +481,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
}
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
- if (!ds) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ if (!ds)
return NULL;
- }
+
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
ds->desc_num = num;
num = 0;
@@ -705,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op)
if (ret)
return ret;
+ d->irq = irq;
+
/* init phy channel */
d->phy = devm_kzalloc(&op->dev,
d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -759,7 +760,7 @@ static int k3_dma_probe(struct platform_device *op)
ret = dma_async_device_register(&d->slave);
if (ret)
- return ret;
+ goto dma_async_register_fail;
ret = of_dma_controller_register((&op->dev)->of_node,
k3_of_dma_simple_xlate, d);
@@ -776,6 +777,8 @@ static int k3_dma_probe(struct platform_device *op)
of_dma_register_fail:
dma_async_device_unregister(&d->slave);
+dma_async_register_fail:
+ clk_disable_unprepare(d->clk);
return ret;
}
@@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op)
dma_async_device_unregister(&d->slave);
of_dma_controller_free((&op->dev)->of_node);
+ devm_free_irq(&op->dev, d->irq, d);
+
list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 56f1fd68b620..f4b25fb0d040 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data)
static int mmp_pdma_remove(struct platform_device *op)
{
struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+ struct mmp_pdma_phy *phy;
+ int i, irq = 0, irq_num = 0;
+
+
+ for (i = 0; i < pdev->dma_channels; i++) {
+ if (platform_get_irq(op, i) > 0)
+ irq_num++;
+ }
+
+ if (irq_num != pdev->dma_channels) {
+ irq = platform_get_irq(op, 0);
+ devm_free_irq(&op->dev, irq, pdev);
+ } else {
+ for (i = 0; i < pdev->dma_channels; i++) {
+ phy = &pdev->phy[i];
+ irq = platform_get_irq(op, i);
+ devm_free_irq(&op->dev, irq, phy);
+ }
+ }
dma_async_device_unregister(&pdev->device);
return 0;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3df0422607d5..b3441f57a364 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
return;
}
-struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
+static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
{
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
@@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
/* alloc channel */
tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
- if (!tdmac) {
- dev_err(tdev->dev, "no free memory for DMA channels!\n");
+ if (!tdmac)
return -ENOMEM;
- }
+
if (irq)
tdmac->irq = irq;
tdmac->dev = tdev->dev;
@@ -593,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
return true;
}
-struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mmp_tdma_device *tdev = ofdma->of_dma_data;
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 631c4435e075..a6e642792e5a 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -148,6 +148,7 @@ struct moxart_chan {
struct moxart_dmadev {
struct dma_device dma_slave;
struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ unsigned int irq;
};
struct moxart_filter_data {
@@ -574,10 +575,8 @@ static int moxart_probe(struct platform_device *pdev)
struct moxart_dmadev *mdc;
mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
- if (!mdc) {
- dev_err(dev, "can't allocate DMA container\n");
+ if (!mdc)
return -ENOMEM;
- }
irq = irq_of_parse_and_map(node, 0);
if (irq == NO_IRQ) {
@@ -617,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev)
dev_err(dev, "devm_request_irq failed\n");
return ret;
}
+ mdc->irq = irq;
ret = dma_async_device_register(&mdc->dma_slave);
if (ret) {
@@ -640,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev)
{
struct moxart_dmadev *m = platform_get_drvdata(pdev);
+ devm_free_irq(&pdev->dev, m->irq, m);
+
dma_async_device_unregister(&m->dma_slave);
if (pdev->dev.of_node)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index ccadafa51d5e..fa86592c7ae1 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op)
}
free_irq(mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
+ tasklet_kill(&mdma->tasklet);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d0446a75990a..f4c9f98ec35e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1057,7 +1057,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
- err_free_dma:
+err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
return ERR_PTR(ret);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
new file mode 100644
index 000000000000..a28a01fcba67
--- /dev/null
+++ b/drivers/dma/mv_xor_v2.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2015-2016 Marvell International Ltd.
+
+ * This program is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+/* DMA Engine Registers */
+#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
+#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
+#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
+#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
+#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
+#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
+#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
+#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
+#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
+#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
+#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
+#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
+#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
+#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
+#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
+#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
+#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
+ /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
+#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
+#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
+#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
+#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
+#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
+#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
+#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
+#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
+#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
+#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
+#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
+
+/* XOR Global registers */
+#define MV_XOR_V2_GLOB_BW_CTRL 0x4
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
+#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
+#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
+#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
+#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
+#define MV_XOR_V2_GLOB_PAUSE 0x014
+#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
+#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
+#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
+#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
+#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
+
+#define MV_XOR_V2_MIN_DESC_SIZE 32
+#define MV_XOR_V2_EXT_DESC_SIZE 128
+
+#define MV_XOR_V2_DESC_RESERVED_SIZE 12
+#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
+
+#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
+
+/*
+ * Descriptors queue size. With 32 bytes descriptors, up to 2^14
+ * descriptors are allowed, with 128 bytes descriptors, up to 2^12
+ * descriptors are allowed. This driver uses 128 bytes descriptors,
+ * but experimentation has shown that a set of 1024 descriptors is
+ * sufficient to reach a good level of performance.
+ */
+#define MV_XOR_V2_DESC_NUM 1024
+
+/**
+ * struct mv_xor_v2_descriptor - DMA HW descriptor
+ * @desc_id: used by S/W and is not affected by H/W.
+ * @flags: error and status flags
+ * @crc32_result: CRC32 calculation result
+ * @desc_ctrl: operation mode and control flags
+ * @buff_size: amount of bytes to be processed
+ * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
+ * AW-Attributes
+ * @data_buff_addr: Source (and might be RAID6 destination)
+ * addresses of data buffers in RAID5 and RAID6
+ * @reserved: reserved
+ */
+struct mv_xor_v2_descriptor {
+ u16 desc_id;
+ u16 flags;
+ u32 crc32_result;
+ u32 desc_ctrl;
+
+ /* Definitions for desc_ctrl */
+#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
+#define DESC_OP_MODE_SHIFT 28
+#define DESC_OP_MODE_NOP 0 /* Idle operation */
+#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
+#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
+#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
+#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
+#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
+#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
+#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
+#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
+#define DESC_Q_BUFFER_ENABLE BIT(16)
+#define DESC_P_BUFFER_ENABLE BIT(17)
+#define DESC_IOD BIT(27)
+
+ u32 buff_size;
+ u32 fill_pattern_src_addr[4];
+ u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
+ u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
+};
+
+/**
+ * struct mv_xor_v2_device - implements a xor device
+ * @lock: lock for the engine
+ * @dma_base: memory mapped DMA register base
+ * @glob_base: memory mapped global register base
+ * @irq_tasklet:
+ * @free_sw_desc: linked list of free SW descriptors
+ * @dmadev: dma device
+ * @dmachan: dma channel
+ * @hw_desq: HW descriptors queue
+ * @hw_desq_virt: virtual address of DESCQ
+ * @sw_desq: SW descriptors queue
+ * @desc_size: HW descriptor size
+ * @npendings: number of pending descriptors (for which tx_submit has
+ * been called, but not yet issue_pending)
+ */
+struct mv_xor_v2_device {
+ spinlock_t lock;
+ void __iomem *dma_base;
+ void __iomem *glob_base;
+ struct clk *clk;
+ struct tasklet_struct irq_tasklet;
+ struct list_head free_sw_desc;
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
+ dma_addr_t hw_desq;
+ struct mv_xor_v2_descriptor *hw_desq_virt;
+ struct mv_xor_v2_sw_desc *sw_desq;
+ int desc_size;
+ unsigned int npendings;
+};
+
+/**
+ * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
+ * @idx: descriptor index
+ * @async_tx: support for the async_tx api
+ * @hw_desc: assosiated HW descriptor
+ * @free_list: node of the free SW descriprots list
+*/
+struct mv_xor_v2_sw_desc {
+ int idx;
+ struct dma_async_tx_descriptor async_tx;
+ struct mv_xor_v2_descriptor hw_desc;
+ struct list_head free_list;
+};
+
+/*
+ * Fill the data buffers to a HW descriptor
+ */
+static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
+ struct mv_xor_v2_descriptor *desc,
+ dma_addr_t src, int index)
+{
+ int arr_index = ((index >> 1) * 3);
+
+ /*
+ * Fill the buffer's addresses to the descriptor.
+ *
+ * The format of the buffers address for 2 sequential buffers
+ * X and X + 1:
+ *
+ * First word: Buffer-DX-Address-Low[31:0]
+ * Second word: Buffer-DX+1-Address-Low[31:0]
+ * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
+ * DX-Buffer-Address-High[47:32] [15:0]
+ */
+ if ((index & 0x1) == 0) {
+ desc->data_buff_addr[arr_index] = lower_32_bits(src);
+
+ desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
+ desc->data_buff_addr[arr_index + 2] |=
+ upper_32_bits(src) & 0xFFFF;
+ } else {
+ desc->data_buff_addr[arr_index + 1] =
+ lower_32_bits(src);
+
+ desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
+ desc->data_buff_addr[arr_index + 2] |=
+ (upper_32_bits(src) & 0xFFFF) << 16;
+ }
+}
+
+/*
+ * Return the next available index in the DESQ.
+ */
+static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
+{
+ /* read the index for the next available descriptor in the DESQ */
+ u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
+
+ return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
+ & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
+}
+
+/*
+ * notify the engine of new descriptors, and update the available index.
+ */
+static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
+ int num_of_desc)
+{
+ /* write the number of new descriptors in the DESQ. */
+ writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
+}
+
+/*
+ * free HW descriptors
+ */
+static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
+ int num_of_desc)
+{
+ /* write the number of new descriptors in the DESQ. */
+ writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
+}
+
+/*
+ * Set descriptor size
+ * Return the HW descriptor size in bytes
+ */
+static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
+{
+ writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
+
+ return MV_XOR_V2_EXT_DESC_SIZE;
+}
+
+/*
+ * Set the IMSG threshold
+ */
+static inline
+void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
+{
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+
+ reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+}
+
+static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+{
+ struct mv_xor_v2_device *xor_dev = data;
+ unsigned int ndescs;
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+ ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+
+ /* No descriptors to process */
+ if (!ndescs)
+ return IRQ_NONE;
+
+ /*
+ * Update IMSG threshold, to disable new IMSG interrupts until
+ * end of the tasklet
+ */
+ mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
+
+ /* schedule a tasklet to handle descriptors callbacks */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * submit a descriptor to the DMA engine
+ */
+static dma_cookie_t
+mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ int desq_ptr;
+ void *dest_hw_desc;
+ dma_cookie_t cookie;
+ struct mv_xor_v2_sw_desc *sw_desc =
+ container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
+ struct mv_xor_v2_device *xor_dev =
+ container_of(tx->chan, struct mv_xor_v2_device, dmachan);
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s sw_desc %p: async_tx %p\n",
+ __func__, sw_desc, &sw_desc->async_tx);
+
+ /* assign coookie */
+ spin_lock_bh(&xor_dev->lock);
+ cookie = dma_cookie_assign(tx);
+
+ /* get the next available slot in the DESQ */
+ desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
+
+ /* copy the HW descriptor from the SW descriptor to the DESQ */
+ dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+
+ memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
+
+ xor_dev->npendings++;
+
+ spin_unlock_bh(&xor_dev->lock);
+
+ return cookie;
+}
+
+/*
+ * Prepare a SW descriptor
+ */
+static struct mv_xor_v2_sw_desc *
+mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+
+ if (list_empty(&xor_dev->free_sw_desc)) {
+ spin_unlock_bh(&xor_dev->lock);
+ /* schedule tasklet to free some descriptors */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+ return NULL;
+ }
+
+ /* get a free SW descriptor from the SW DESQ */
+ sw_desc = list_first_entry(&xor_dev->free_sw_desc,
+ struct mv_xor_v2_sw_desc, free_list);
+ list_del(&sw_desc->free_list);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+ /* set the async tx descriptor */
+ dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
+ sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+ async_tx_ack(&sw_desc->async_tx);
+
+ return sw_desc;
+}
+
+/*
+ * Prepare a HW descriptor for a memcpy operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev;
+
+ xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s len: %zu src %pad dest %pad flags: %ld\n",
+ __func__, len, &src, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ sw_desc->async_tx.flags = flags;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the MEMCPY control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* Set source address */
+ hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
+ hw_descriptor->fill_pattern_src_addr[1] =
+ upper_32_bits(src) & 0xFFFF;
+
+ /* Set Destination address */
+ hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+ hw_descriptor->fill_pattern_src_addr[3] =
+ upper_32_bits(dest) & 0xFFFF;
+
+ /* Set buffers size */
+ hw_descriptor->buff_size = len;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for a XOR operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+ int i;
+
+ if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
+ return NULL;
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ sw_desc->async_tx.flags = flags;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the XOR control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
+ hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* Set the data buffers */
+ for (i = 0; i < src_cnt; i++)
+ mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
+
+ hw_descriptor->desc_ctrl |=
+ src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
+
+ /* Set Destination address */
+ hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+ hw_descriptor->fill_pattern_src_addr[3] =
+ upper_32_bits(dest) & 0xFFFF;
+
+ /* Set buffers size */
+ hw_descriptor->buff_size = len;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for interrupt operation.
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the INTERRUPT control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * push pending transactions to hardware
+ */
+static void mv_xor_v2_issue_pending(struct dma_chan *chan)
+{
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ spin_lock_bh(&xor_dev->lock);
+
+ /*
+ * update the engine with the number of descriptors to
+ * process
+ */
+ mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
+ xor_dev->npendings = 0;
+
+ /* Activate the channel */
+ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+ spin_unlock_bh(&xor_dev->lock);
+}
+
+static inline
+int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
+ int *pending_ptr)
+{
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+ /* get the next pending descriptor index */
+ *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
+
+ /* get the number of descriptors pending handle */
+ return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+}
+
+/*
+ * handle the descriptors after HW process
+ */
+static void mv_xor_v2_tasklet(unsigned long data)
+{
+ struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+ int pending_ptr, num_of_pending, i;
+ struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
+ struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
+
+ dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
+
+ /* get the pending descriptors parameters */
+ num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
+
+ /* next HW descriptor */
+ next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
+
+ /* loop over free descriptors */
+ for (i = 0; i < num_of_pending; i++) {
+
+ if (pending_ptr > MV_XOR_V2_DESC_NUM)
+ pending_ptr = 0;
+
+ if (next_pending_sw_desc != NULL)
+ next_pending_hw_desc++;
+
+ /* get the SW descriptor related to the HW descriptor */
+ next_pending_sw_desc =
+ &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
+
+ /* call the callback */
+ if (next_pending_sw_desc->async_tx.cookie > 0) {
+ /*
+ * update the channel's completed cookie - no
+ * lock is required the IMSG threshold provide
+ * the locking
+ */
+ dma_cookie_complete(&next_pending_sw_desc->async_tx);
+
+ if (next_pending_sw_desc->async_tx.callback)
+ next_pending_sw_desc->async_tx.callback(
+ next_pending_sw_desc->async_tx.callback_param);
+
+ dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
+ }
+
+ dma_run_dependencies(&next_pending_sw_desc->async_tx);
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+
+ /* add the SW descriptor to the free descriptors list */
+ list_add(&next_pending_sw_desc->free_list,
+ &xor_dev->free_sw_desc);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+ /* increment the next descriptor */
+ pending_ptr++;
+ }
+
+ if (num_of_pending != 0) {
+ /* free the descriptores */
+ mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
+ }
+
+ /* Update IMSG threshold, to enable new IMSG interrupts */
+ mv_xor_v2_set_imsg_thrd(xor_dev, 0);
+}
+
+/*
+ * Set DMA Interrupt-message (IMSG) parameters
+ */
+static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
+
+ writel(msg->address_lo,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
+ writel(msg->address_hi & 0xFFFF,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
+ writel(msg->data,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
+}
+
+static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
+{
+ u32 reg;
+
+ /* write the DESQ size to the DMA engine */
+ writel(MV_XOR_V2_DESC_NUM,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
+
+ /* write the DESQ address to the DMA enngine*/
+ writel(xor_dev->hw_desq & 0xFFFFFFFF,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
+ writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
+
+ /* enable the DMA engine */
+ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+ /*
+ * This is a temporary solution, until we activate the
+ * SMMU. Set the attributes for reading & writing data buffers
+ * & descriptors to:
+ *
+ * - OuterShareable - Snoops will be performed on CPU caches
+ * - Enable cacheable - Bufferable, Modifiable, Other Allocate
+ * and Allocate
+ */
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+ reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+ reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+ MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+ reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+ reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+ MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+
+ /* BW CTRL - set values to optimize the XOR performance:
+ *
+ * - Set WrBurstLen & RdBurstLen - the unit will issue
+ * maximum of 256B write/read transactions.
+ * - Limit the number of outstanding write & read data
+ * (OBB/IBB) requests to the maximal value.
+ */
+ reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
+
+ /* Disable the AXI timer feature */
+ reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+ reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+
+ return 0;
+}
+
+static int mv_xor_v2_probe(struct platform_device *pdev)
+{
+ struct mv_xor_v2_device *xor_dev;
+ struct resource *res;
+ int i, ret = 0;
+ struct dma_device *dma_dev;
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct msi_desc *msi_desc;
+
+ BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
+ MV_XOR_V2_EXT_DESC_SIZE);
+
+ xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
+ if (!xor_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xor_dev->dma_base))
+ return PTR_ERR(xor_dev->dma_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xor_dev->glob_base))
+ return PTR_ERR(xor_dev->glob_base);
+
+ platform_set_drvdata(pdev, xor_dev);
+
+ xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(xor_dev->clk)) {
+ ret = clk_prepare_enable(xor_dev->clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
+ mv_xor_v2_set_msi_msg);
+ if (ret)
+ goto disable_clk;
+
+ msi_desc = first_msi_entry(&pdev->dev);
+ if (!msi_desc)
+ goto free_msi_irqs;
+
+ ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+ mv_xor_v2_interrupt_handler, 0,
+ dev_name(&pdev->dev), xor_dev);
+ if (ret)
+ goto free_msi_irqs;
+
+ tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
+ (unsigned long) xor_dev);
+
+ xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
+
+ dma_cookie_init(&xor_dev->dmachan);
+
+ /*
+ * allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly flush the writes
+ */
+ xor_dev->hw_desq_virt =
+ dma_alloc_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ &xor_dev->hw_desq, GFP_KERNEL);
+ if (!xor_dev->hw_desq_virt) {
+ ret = -ENOMEM;
+ goto free_msi_irqs;
+ }
+
+ /* alloc memory for the SW descriptors */
+ xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) *
+ MV_XOR_V2_DESC_NUM, GFP_KERNEL);
+ if (!xor_dev->sw_desq) {
+ ret = -ENOMEM;
+ goto free_hw_desq;
+ }
+
+ spin_lock_init(&xor_dev->lock);
+
+ /* init the free SW descriptors list */
+ INIT_LIST_HEAD(&xor_dev->free_sw_desc);
+
+ /* add all SW descriptors to the free list */
+ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
+ xor_dev->sw_desq[i].idx = i;
+ list_add(&xor_dev->sw_desq[i].free_list,
+ &xor_dev->free_sw_desc);
+ }
+
+ dma_dev = &xor_dev->dmadev;
+
+ /* set DMA capabilities */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /* init dma link list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* set base routines */
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
+ dma_dev->dev = &pdev->dev;
+
+ dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
+ dma_dev->max_xor = 8;
+ dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
+
+ xor_dev->dmachan.device = dma_dev;
+
+ list_add_tail(&xor_dev->dmachan.device_node,
+ &dma_dev->channels);
+
+ mv_xor_v2_descq_init(xor_dev);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto free_hw_desq;
+
+ dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
+
+ return 0;
+
+free_hw_desq:
+ dma_free_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ xor_dev->hw_desq_virt, xor_dev->hw_desq);
+free_msi_irqs:
+ platform_msi_domain_free_irqs(&pdev->dev);
+disable_clk:
+ if (!IS_ERR(xor_dev->clk))
+ clk_disable_unprepare(xor_dev->clk);
+ return ret;
+}
+
+static int mv_xor_v2_remove(struct platform_device *pdev)
+{
+ struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&xor_dev->dmadev);
+
+ dma_free_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ xor_dev->hw_desq_virt, xor_dev->hw_desq);
+
+ platform_msi_domain_free_irqs(&pdev->dev);
+
+ clk_disable_unprepare(xor_dev->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mv_xor_v2_dt_ids[] = {
+ { .compatible = "marvell,xor-v2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_v2_driver = {
+ .probe = mv_xor_v2_probe,
+ .remove = mv_xor_v2_remove,
+ .driver = {
+ .name = "mv_xor_v2",
+ .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
+ },
+};
+
+module_platform_driver(mv_xor_v2_driver);
+
+MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 2b5a198ac77e..08c45c185549 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -227,6 +227,7 @@ struct nbpf_device {
void __iomem *base;
struct clk *clk;
const struct nbpf_config *config;
+ unsigned int eirq;
struct nbpf_channel chan[];
};
@@ -1300,10 +1301,9 @@ static int nbpf_probe(struct platform_device *pdev)
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
sizeof(nbpf->chan[0]), GFP_KERNEL);
- if (!nbpf) {
- dev_err(dev, "Memory allocation failed\n");
+ if (!nbpf)
return -ENOMEM;
- }
+
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
@@ -1376,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev)
IRQF_SHARED, "dma error", nbpf);
if (ret < 0)
return ret;
+ nbpf->eirq = eirq;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1447,6 +1448,17 @@ e_clk_off:
static int nbpf_remove(struct platform_device *pdev)
{
struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+ int i;
+
+ devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
+
+ for (i = 0; i < nbpf->config->num_channels; i++) {
+ struct nbpf_channel *chan = nbpf->chan + i;
+
+ devm_free_irq(&pdev->dev, chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+ }
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&nbpf->dma_dev);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1e984e18c126..d99ca2b511c4 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -59,6 +59,8 @@ struct omap_sg {
dma_addr_t addr;
uint32_t en; /* number of elements (24-bit) */
uint32_t fn; /* number of frames (16-bit) */
+ int32_t fi; /* for double indexing */
+ int16_t ei; /* for double indexing */
};
struct omap_desc {
@@ -66,7 +68,8 @@ struct omap_desc {
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
- int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
+ int16_t ei; /* for double indexing */
uint8_t es; /* CSDP_DATA_TYPE_xxx */
uint32_t ccr; /* CCR value */
uint16_t clnk_ctrl; /* CLNK_CTRL value */
@@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
}
omap_dma_chan_write(c, cxsa, sg->addr);
- omap_dma_chan_write(c, cxei, 0);
- omap_dma_chan_write(c, cxfi, 0);
+ omap_dma_chan_write(c, cxei, sg->ei);
+ omap_dma_chan_write(c, cxfi, sg->fi);
omap_dma_chan_write(c, CEN, sg->en);
omap_dma_chan_write(c, CFN, sg->fn);
@@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c)
}
omap_dma_chan_write(c, cxsa, d->dev_addr);
- omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxei, d->ei);
omap_dma_chan_write(c, cxfi, d->fi);
omap_dma_chan_write(c, CSDP, d->csdp);
omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
@@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_desc *d;
+ struct omap_sg *sg;
+ uint8_t data_type;
+ size_t src_icg, dst_icg;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
+ if (data_type > CSDP_DATA_TYPE_32)
+ data_type = CSDP_DATA_TYPE_32;
+
+ sg = &d->sg[0];
+ d->dir = DMA_MEM_TO_MEM;
+ d->dev_addr = xt->src_start;
+ d->es = data_type;
+ sg->en = xt->sgl[0].size / BIT(data_type);
+ sg->fn = xt->numf;
+ sg->addr = xt->dst_start;
+ d->sglen = 1;
+ d->ccr = c->ccr;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ d->fi = src_icg;
+ } else if (xt->src_inc) {
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ d->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: SRC constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ if (dst_icg) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+ sg->ei = 1;
+ sg->fi = dst_icg;
+ } else if (xt->dst_inc) {
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ sg->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: DST constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+ d->csdp = data_type;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+ } else {
+ d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+ d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
{
struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
od->ddev.device_tx_status = omap_dma_tx_status;
@@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
+ od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
od->ddev.device_config = omap_dma_slave_config;
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
@@ -1204,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev)
static int omap_dma_remove(struct platform_device *pdev)
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
+ int irq;
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
+ irq = platform_get_irq(pdev, 1);
+ devm_free_irq(&pdev->dev, irq, od);
+
dma_async_device_unregister(&od->ddev);
if (!od->legacy) {
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 372b4359da97..4fc3ffbd5ca0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
/* Allocate a new DMAC and its Channels */
pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
- if (!pl330) {
- dev_err(&adev->dev, "unable to allocate mem\n");
+ if (!pl330)
return -ENOMEM;
- }
pd = &pl330->ddma;
pd->dev = &adev->dev;
@@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pl330->peripherals) {
ret = -ENOMEM;
- dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
goto probe_err2;
}
@@ -3005,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
+ int i, irq;
pm_runtime_get_noresume(pl330->ddma.dev);
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ devm_free_irq(&adev->dev, irq, pl330);
+ }
+
dma_async_device_unregister(&pl330->ddma);
/* Idle the DMAC */
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9217f893b0d1..da3688b94bdc 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* create a device */
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev) {
- dev_err(&ofdev->dev, "failed to allocate device\n");
initcode = PPC_ADMA_INIT_ALLOC;
ret = -ENOMEM;
goto err_adev_alloc;
@@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* create a channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
- dev_err(&ofdev->dev, "can't allocate channel structure\n");
initcode = PPC_ADMA_INIT_CHANNEL;
ret = -ENOMEM;
goto err_chan_alloc;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index e756a30ccba2..dc7850a422b8 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -21,6 +21,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/wait.h>
#include <linux/dma/pxa-dma.h>
#include "dmaengine.h"
@@ -118,6 +119,8 @@ struct pxad_chan {
struct pxad_phy *phy;
struct dma_pool *desc_pool; /* Descriptors pool */
dma_cookie_t bus_error;
+
+ wait_queue_head_t wq_state;
};
struct pxad_device {
@@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
- .owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
@@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
*/
phy_writel(chan->phy, desc->first, DDADR);
phy_enable(chan->phy, chan->misaligned);
+ wake_up(&chan->wq_state);
}
static void set_updater_desc(struct pxad_desc_sw *sw_desc,
@@ -717,6 +720,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
}
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ wake_up(&chan->wq_state);
return IRQ_HANDLED;
}
@@ -1268,6 +1272,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
return ret;
}
+static void pxad_synchronize(struct dma_chan *dchan)
+{
+ struct pxad_chan *chan = to_pxad_chan(dchan);
+
+ wait_event(chan->wq_state, !is_chan_running(chan));
+ vchan_synchronize(&chan->vc);
+}
+
static void pxad_free_channels(struct dma_device *dmadev)
{
struct pxad_chan *c, *cn;
@@ -1372,6 +1384,7 @@ static int pxad_init_dmadev(struct platform_device *op,
pdev->slave.device_tx_status = pxad_tx_status;
pdev->slave.device_issue_pending = pxad_issue_pending;
pdev->slave.device_config = pxad_config;
+ pdev->slave.device_synchronize = pxad_synchronize;
pdev->slave.device_terminate_all = pxad_terminate_all;
if (op->dev.coherent_dma_mask)
@@ -1389,6 +1402,7 @@ static int pxad_init_dmadev(struct platform_device *op,
return -ENOMEM;
c->vc.desc_free = pxad_free_desc;
vchan_init(&c->vc, &pdev->slave);
+ init_waitqueue_head(&c->wq_state);
}
return dma_async_device_register(&pdev->slave);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 969b48176745..03c4eb3fd314 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -48,6 +48,7 @@
#include <linux/of_dma.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
#include "../virt-dma.h"
@@ -58,6 +59,8 @@ struct bam_desc_hw {
__le16 flags;
};
+#define BAM_DMA_AUTOSUSPEND_DELAY 100
+
#define DESC_FLAG_INT BIT(15)
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan)
struct bam_device *bdev = bchan->bdev;
u32 val;
unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return;
vchan_free_chan_resources(to_virt_chan(chan));
if (bchan->curr_txd) {
dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
- return;
+ goto err;
}
spin_lock_irqsave(&bchan->vc.lock, flags);
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan)
/* disable irq */
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+err:
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
}
/**
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan)
struct bam_chan *bchan = to_bam_chan(chan);
struct bam_device *bdev = bchan->bdev;
unsigned long flag;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&bchan->vc.lock, flag);
writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 1;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return 0;
}
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan)
struct bam_chan *bchan = to_bam_chan(chan);
struct bam_device *bdev = bchan->bdev;
unsigned long flag;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&bchan->vc.lock, flag);
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 0;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return 0;
}
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
{
struct bam_device *bdev = data;
u32 clr_mask = 0, srcs = 0;
+ int ret;
srcs |= process_channel_irqs(bdev);
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
+
if (srcs & BAM_IRQ) {
clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
}
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+
return IRQ_HANDLED;
}
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan)
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
sizeof(struct bam_desc_hw));
+ int ret;
lockdep_assert_held(&bchan->vc.lock);
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan)
async_desc = container_of(vd, struct bam_async_desc, vd);
bchan->curr_txd = async_desc;
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return;
+
/* on first use, initialize the channel hardware */
if (!bchan->initialized)
bam_chan_init_hw(bchan, async_desc->dir);
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan)
wmb();
writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
}
/**
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data)
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
}
+
}
/**
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
err_unregister_dma:
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev)
struct bam_device *bdev = platform_get_drvdata(pdev);
u32 i;
+ pm_runtime_force_suspend(&pdev->dev);
+
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&bdev->common);
@@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+
+ clk_disable(bdev->bamclk);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(bdev->bamclk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_suspend(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ clk_unprepare(bdev->bamclk);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_resume(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare(bdev->bamclk);
+ if (ret)
+ return ret;
+
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bam_dma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
+ SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
+ NULL)
+};
+
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
.remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
+ .pm = &bam_dma_pm_ops,
.of_match_table = bam_of_match,
},
};
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 41b5c6dee713..b2374cd91e45 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ tasklet_kill(&dmadev->task);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index f3929001539b..ad20dfb64c71 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task);
+ tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
lldev->pending_tre_count = 0;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index c0e365321310..82f36e466083 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
new_pdev = platform_device_register_full(&pdevinfo);
- if (!new_pdev) {
- ret = -ENODEV;
+ if (IS_ERR(new_pdev)) {
+ ret = PTR_ERR(new_pdev);
goto out;
}
of_dma_configure(&new_pdev->dev, child);
@@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void)
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct device_node *child;
- for (child = of_find_matching_node(NULL, hidma_mgmt_match); child;
- child = of_find_matching_node(child, hidma_mgmt_match)) {
+ for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
of_node_put(child);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 17ccdfd28f37..ce67075589f5 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&s3cchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE) {
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
- }
/*
* There's no point calculating the residue if there's
* no txstate to store the value.
*/
- if (!txstate) {
+ if (ret == DMA_COMPLETE || !txstate) {
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
}
@@ -1105,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
*/
for (i = 0; i < channels; i++) {
chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan) {
- dev_err(dmadev->dev,
- "%s no memory for channel\n", __func__);
+ if (!chan)
return -ENOMEM;
- }
chan->id = i;
chan->host = s3cdma;
@@ -1143,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
struct s3c24xx_dma_chan *next;
list_for_each_entry_safe(chan,
- next, &dmadev->channels, vc.chan.device_node)
+ next, &dmadev->channels, vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
+ tasklet_kill(&chan->vc.task);
+ }
}
/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
@@ -1366,6 +1361,18 @@ err_memcpy:
return ret;
}
+static void s3c24xx_dma_free_irq(struct platform_device *pdev,
+ struct s3c24xx_dma_engine *s3cdma)
+{
+ int i;
+
+ for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+
+ devm_free_irq(&pdev->dev, phy->irq, phy);
+ }
+}
+
static int s3c24xx_dma_remove(struct platform_device *pdev)
{
const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
@@ -1376,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&s3cdma->slave);
dma_async_device_unregister(&s3cdma->memcpy);
+ s3c24xx_dma_free_irq(pdev, s3cdma);
+
s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index dfb17926297b..0dd953884d1d 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
{
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
- return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
+ return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
}
static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
@@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
spin_lock_irqsave(&chan->lock, flags);
list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
- list_add_tail(&desc->node, &chan->desc.free);
+ list_add(&desc->node, &chan->desc.free);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
list_splice_init(&rchan->desc.done, &list);
list_splice_init(&rchan->desc.wait, &list);
+ rchan->desc.running = NULL;
+
list_for_each_entry(desc, &list, node)
rcar_dmac_realloc_hwdesc(rchan, desc, 0);
@@ -1143,6 +1145,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
struct rcar_dmac_desc *desc = chan->desc.running;
struct rcar_dmac_xfer_chunk *running = NULL;
struct rcar_dmac_xfer_chunk *chunk;
+ enum dma_status status;
unsigned int residue = 0;
unsigned int dptr = 0;
@@ -1150,12 +1153,38 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
return 0;
/*
+ * If the cookie corresponds to a descriptor that has been completed
+ * there is no residue. The same check has already been performed by the
+ * caller but without holding the channel lock, so the descriptor could
+ * now be complete.
+ */
+ status = dma_cookie_status(&chan->chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ return 0;
+
+ /*
* If the cookie doesn't correspond to the currently running transfer
* then the descriptor hasn't been processed yet, and the residue is
* equal to the full descriptor size.
*/
- if (cookie != desc->async_tx.cookie)
- return desc->size;
+ if (cookie != desc->async_tx.cookie) {
+ list_for_each_entry(desc, &chan->desc.pending, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+ list_for_each_entry(desc, &chan->desc.active, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+
+ /*
+ * No descriptor found for the cookie, there's thus no residue.
+ * This shouldn't happen if the calling driver passes a correct
+ * cookie value.
+ */
+ WARN(1, "No descriptor for cookie!");
+ return 0;
+ }
/*
* In descriptor mode the descriptor running pointer is not maintained
@@ -1202,6 +1231,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
residue = rcar_dmac_chan_get_residue(rchan, cookie);
spin_unlock_irqrestore(&rchan->lock, flags);
+ /* if there's no residue, the cookie is complete */
+ if (!residue)
+ return DMA_COMPLETE;
+
dma_set_residue(txstate, residue);
return status;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 80d86402490e..c94ffab0d25c 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
GFP_KERNEL);
- if (!sh_chan) {
- dev_err(sdev->dma_dev.dev,
- "No free memory for allocating dma channels!\n");
+ if (!sh_chan)
return -ENOMEM;
- }
schan = &sh_chan->shdma_chan;
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
@@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
GFP_KERNEL);
- if (!shdev) {
- dev_err(&pdev->dev, "Not enough memory\n");
+ if (!shdev)
return -ENOMEM;
- }
dma_dev = &shdev->shdma_dev.dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 6da2eaa6c294..69b9564dc9d9 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
int err;
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
- if (!sc) {
- dev_err(sdev->dma_dev.dev,
- "No free memory for allocating dma channels!\n");
+ if (!sc)
return -ENOMEM;
- }
schan = &sc->shdma_chan;
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
@@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev)
err = -ENOMEM;
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
GFP_KERNEL);
- if (!su_dev) {
- dev_err(&pdev->dev, "Not enough memory\n");
+ if (!su_dev)
return err;
- }
dma_dev = &su_dev->shdma_dev.dma_dev;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index e48350e65089..d8bc3f2a71db 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
int ret, i;
sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
- if (!sdma) {
- dev_err(dev, "Memory exhausted!\n");
+ if (!sdma)
return -ENOMEM;
- }
+
data = (struct sirfsoc_dmadata *)
(of_match_device(op->dev.driver->of_match_table,
&op->dev)->data);
@@ -981,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
of_dma_controller_free(op->dev.of_node);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
+ tasklet_kill(&sdma->tasklet);
irq_dispose_mapping(sdma->irq);
pm_runtime_disable(&op->dev);
if (!pm_runtime_status_suspended(&op->dev))
@@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
.exec = sirfsoc_dma_execute_hw_a6,
.type = SIRFSOC_DMA_VER_A6,
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
.exec = sirfsoc_dma_execute_hw_a7v1,
.type = SIRFSOC_DMA_VER_A7V1,
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
.exec = sirfsoc_dma_execute_hw_a7v2,
.type = SIRFSOC_DMA_VER_A7V2,
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6fb8307468ab..8b18e44a02d5 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_COMPLETE)
+ if (ret != DMA_COMPLETE && txstate)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
(num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
- if (base == NULL) {
- d40_err(&pdev->dev, "Out of memory\n");
+ if (base == NULL)
goto failure;
- }
base->rev = rev;
base->clk = clk;
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 27b818dee7c7..13b42dd9900c 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -10,7 +10,7 @@
#include "ste_dma40_ll.h"
-u8 d40_width_to_bits(enum dma_slave_buswidth width)
+static u8 d40_width_to_bits(enum dma_slave_buswidth width)
{
if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
return STEDMA40_ESIZE_8_BIT;
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 5065ca43face..3835fcde3545 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, state);
- if (ret == DMA_COMPLETE)
+ if (ret == DMA_COMPLETE || !state)
return ret;
spin_lock_irqsave(&vchan->vc.lock, flags);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 01e316f73559..6ab9eb98588a 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Allocate DMA desc */
dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
- if (!dma_desc) {
- dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
+ if (!dma_desc)
return NULL;
- }
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
@@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
spin_unlock_irqrestore(&tdc->lock, flags);
sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
- if (!sg_req)
- dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
+
return sg_req;
}
@@ -484,7 +481,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
* load new configuration.
*/
tegra_dma_pause(tdc, false);
- status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
/*
* If interrupt is pending then do nothing as the ISR will handle
@@ -822,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
if (dma_desc->txd.cookie == cookie) {
- residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
- dma_desc->bytes_requested);
- dma_set_residue(txstate, residual);
ret = dma_desc->dma_status;
- spin_unlock_irqrestore(&tdc->lock, flags);
- return ret;
+ goto found;
}
}
@@ -836,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
dma_desc = sg_req->dma_desc;
if (dma_desc->txd.cookie == cookie) {
- residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
- dma_desc->bytes_requested);
- dma_set_residue(txstate, residual);
ret = dma_desc->dma_status;
- spin_unlock_irqrestore(&tdc->lock, flags);
- return ret;
+ goto found;
}
}
- dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
+ dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
+ dma_desc = NULL;
+
+found:
+ if (dma_desc && txstate) {
+ residual = dma_desc->bytes_requested -
+ (dma_desc->bytes_transferred %
+ dma_desc->bytes_requested);
+ dma_set_residue(txstate, residual);
+ }
+
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -905,7 +902,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
enum dma_slave_buswidth *slave_bw)
{
-
switch (direction) {
case DMA_MEM_TO_DEV:
*apb_addr = tdc->dma_sconfig.dst_addr;
@@ -948,8 +944,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
- unsigned int i;
- struct scatterlist *sg;
+ unsigned int i;
+ struct scatterlist *sg;
unsigned long csr, ahb_seq, apb_ptr, apb_seq;
struct list_head req_list;
struct tegra_dma_sg_req *sg_req = NULL;
@@ -1062,7 +1058,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc = NULL;
- struct tegra_dma_sg_req *sg_req = NULL;
+ struct tegra_dma_sg_req *sg_req = NULL;
unsigned long csr, ahb_seq, apb_ptr, apb_seq;
int len;
size_t remain_len;
@@ -1204,7 +1200,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma *tdma = tdc->tdma;
-
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
struct list_head dma_desc_list;
@@ -1305,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
static int tegra_dma_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct resource *res;
struct tegra_dma *tdma;
int ret;
int i;
@@ -1319,10 +1314,8 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
- if (!tdma) {
- dev_err(&pdev->dev, "Error: memory allocation failed\n");
+ if (!tdma)
return -ENOMEM;
- }
tdma->dev = &pdev->dev;
tdma->chip_data = cdata;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index e107779b1a2e..5ae294b256a7 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = {
.probe = ti_dma_xbar_probe,
};
-int omap_dmaxbar_init(void)
+static int omap_dmaxbar_init(void)
{
return platform_driver_register(&ti_dma_xbar_driver);
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 559cd4073698..e82745aa42a8 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
int err;
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
- if (!td_desc) {
- dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ if (!td_desc)
goto out;
- }
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
- if (!td_desc->desc_list) {
- dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ if (!td_desc->desc_list)
goto err;
- }
dma_async_tx_descriptor_init(&td_desc->txd, chan);
td_desc->txd.tx_submit = td_tx_submit;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 8849318b32b7..7632290e7c14 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
{
struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
+
dma_async_device_unregister(&dc->dma);
- if (dc->irq >= 0)
+ if (dc->irq >= 0) {
+ devm_free_irq(&pdev->dev, dc->irq, dc);
tasklet_kill(&dc->tasklet);
+ }
dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
return 0;
}
@@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev)
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
txx9dmac_off(ddev);
- if (ddev->irq >= 0)
+ if (ddev->irq >= 0) {
+ devm_free_irq(&pdev->dev, ddev->irq, ddev);
tasklet_kill(&ddev->tasklet);
+ }
return 0;
}
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 3c4e9f2fea28..9e91f8f5b087 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1 +1,2 @@
-obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
+obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_dma.c
index df9118540b91..4e223d094433 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -45,6 +45,7 @@
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "../dmaengine.h"
@@ -113,7 +114,7 @@
#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -157,12 +158,25 @@
/* AXI DMA Specific Masks/Bit fields */
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
#define XILINX_DMA_NUM_APP_WORDS 5
+/* Multi-Channel DMA Descriptor offsets*/
+#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
+#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
+
+/* Multi-Channel DMA Masks/Shifts */
+#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
+#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
+#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
+#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
+#define XILINX_DMA_BD_STRIDE_SHIFT 0
+#define XILINX_DMA_BD_VSIZE_SHIFT 19
+
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -194,22 +208,22 @@ struct xilinx_vdma_desc_hw {
/**
* struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
* @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
- * @pad2: Reserved @0x0C
- * @pad3: Reserved @0x10
- * @pad4: Reserved @0x14
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @pad1: Reserved @0x10
+ * @pad2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
*/
struct xilinx_axidma_desc_hw {
u32 next_desc;
- u32 pad1;
+ u32 next_desc_msb;
u32 buf_addr;
- u32 pad2;
- u32 pad3;
- u32 pad4;
+ u32 buf_addr_msb;
+ u32 mcdma_control;
+ u32 vsize_stride;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
@@ -218,21 +232,21 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
- * @pad1: Reserved @0x04
+ * @next_descmsb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
- * @pad2: Reserved @0x0C
+ * @src_addrmsb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
- * @pad3: Reserved @0x14
+ * @dest_addrmsb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
struct xilinx_cdma_desc_hw {
u32 next_desc;
- u32 pad1;
+ u32 next_desc_msb;
u32 src_addr;
- u32 pad2;
+ u32 src_addr_msb;
u32 dest_addr;
- u32 pad3;
+ u32 dest_addr_msb;
u32 control;
u32 status;
} __aligned(64);
@@ -278,11 +292,13 @@ struct xilinx_cdma_tx_segment {
* @async_tx: Async transaction descriptor
* @segments: TX segments list
* @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
*/
struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
+ bool cyclic;
};
/**
@@ -302,6 +318,7 @@ struct xilinx_dma_tx_descriptor {
* @direction: Transfer direction
* @num_frms: Number of frames
* @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
* @tasklet: Cleanup work after irq
@@ -312,6 +329,7 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @start_transfer: Differentiate b/w DMA IP's transfer
*/
struct xilinx_dma_chan {
@@ -330,6 +348,7 @@ struct xilinx_dma_chan {
enum dma_transfer_direction direction;
int num_frms;
bool has_sg;
+ bool cyclic;
bool genlock;
bool err;
struct tasklet_struct tasklet;
@@ -340,7 +359,9 @@ struct xilinx_dma_chan {
u32 desc_submitcount;
u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_axidma_tx_segment *cyclic_seg_v;
void (*start_transfer)(struct xilinx_dma_chan *chan);
+ u16 tdest;
};
struct xilinx_dma_config {
@@ -357,6 +378,7 @@ struct xilinx_dma_config {
* @common: DMA device structure
* @chan: Driver specific DMA channel
* @has_sg: Specifies whether Scatter-Gather is present or not
+ * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -366,6 +388,8 @@ struct xilinx_dma_config {
* @txs_clk: DMA mm2s stream clock
* @rx_clk: DMA s2mm clock
* @rxs_clk: DMA s2mm stream clock
+ * @nr_channels: Number of channels DMA device supports
+ * @chan_id: DMA channel identifier
*/
struct xilinx_dma_device {
void __iomem *regs;
@@ -373,6 +397,7 @@ struct xilinx_dma_device {
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
bool has_sg;
+ bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -382,6 +407,8 @@ struct xilinx_dma_device {
struct clk *txs_clk;
struct clk *rx_clk;
struct clk *rxs_clk;
+ u32 nr_channels;
+ u32 chan_id;
};
/* Macros */
@@ -454,6 +481,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
}
+static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
+{
+ lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
+}
+
+static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ if (chan->ext_addr)
+ dma_writeq(chan, reg, addr);
+ else
+ dma_ctrl_write(chan, reg, addr);
+}
+
+static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used,
+ size_t period_len)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
+ period_len);
+ } else {
+ hw->buf_addr = buf_addr + sg_used + period_len;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -491,11 +546,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
struct xilinx_cdma_tx_segment *segment;
dma_addr_t phys;
- segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
if (!segment)
return NULL;
- memset(segment, 0, sizeof(*segment));
segment->phys = phys;
return segment;
@@ -513,11 +567,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
struct xilinx_axidma_tx_segment *segment;
dma_addr_t phys;
- segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
if (!segment)
return NULL;
- memset(segment, 0, sizeof(*segment));
segment->phys = phys;
return segment;
@@ -660,13 +713,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan);
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ }
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
}
/**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc,
+ unsigned long *flags)
+{
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, *flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, *flags);
+ }
+}
+
+/**
* xilinx_dma_chan_desc_cleanup - Clean channel descriptors
* @chan: Driver specific DMA channel
*/
@@ -681,6 +758,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
dma_async_tx_callback callback;
void *callback_param;
+ if (desc->cyclic) {
+ xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+ break;
+ }
+
/* Remove from the list of running transactions */
list_del(&desc->node);
@@ -757,7 +839,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/*
* For AXI DMA case after submitting a pending_list, keep
* an extra segment allocated so that the "next descriptor"
@@ -768,6 +850,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
*/
chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ /*
+ * For cyclic DMA mode we need to program the tail Descriptor
+ * register with a value which is not a part of the BD chain
+ * so allocating a desc segment during channel allocation for
+ * programming tail descriptor.
+ */
+ chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ }
+
dma_cookie_init(dchan);
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
@@ -1065,12 +1156,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if (chan->has_sg) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
/* Update tail ptr register which will start the transfer */
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
} else {
/* In simple mode */
struct xilinx_cdma_tx_segment *segment;
@@ -1082,8 +1173,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
hw = &segment->hw;
- dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
- dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+ xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+ xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1124,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
+ if (chan->has_sg && !chan->xdev->mcdma) {
+ old_head = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ new_head = chan->seg_v;
+ /* Copy Buffer Descriptor fields. */
+ new_head->hw = old_head->hw;
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
+ /* Swap and save new reserve */
+ list_replace_init(&old_head->node, &new_head->node);
+ chan->seg_v = old_head;
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
+ tail_segment->hw.next_desc = chan->seg_v->phys;
+ head_desc->async_tx.phys = new_head->phys;
+ }
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -1146,9 +1239,25 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg)
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
+ if (chan->has_sg && !chan->xdev->mcdma)
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ if (chan->has_sg && chan->xdev->mcdma) {
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+ } else {
+ if (!chan->tdest) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+ } else {
+ dma_ctrl_write(chan,
+ XILINX_DMA_MCRX_CDESC(chan->tdest),
+ head_desc->async_tx.phys);
+ }
+ }
+ }
xilinx_dma_start(chan);
@@ -1156,9 +1265,27 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
return;
/* Start the transfer */
- if (chan->has_sg) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->cyclic)
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ chan->cyclic_seg_v->phys);
+ else
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else if (chan->has_sg && chan->xdev->mcdma) {
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
+ } else {
+ if (!chan->tdest) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ dma_ctrl_write(chan,
+ XILINX_DMA_MCRX_TDESC(chan->tdest),
+ tail_segment->phys);
+ }
+ }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1168,7 +1295,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
node);
hw = &segment->hw;
- dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
/* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1209,7 +1336,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
list_del(&desc->node);
- dma_cookie_complete(&desc->async_tx);
+ if (!desc->cyclic)
+ dma_cookie_complete(&desc->async_tx);
list_add_tail(&desc->node, &chan->done_list);
}
}
@@ -1397,6 +1525,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags;
int err;
+ if (chan->cyclic) {
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return -EBUSY;
+ }
+
if (chan->err) {
/*
* If reset fails, need to hard reset the system.
@@ -1414,6 +1547,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* Put this transaction onto the tail of the pending queue */
append_desc_queue(chan, desc);
+ if (desc->cyclic)
+ chan->cyclic = true;
+
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
@@ -1541,6 +1677,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
hw->control = len;
hw->src_addr = dma_src;
hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
/* Fill the previous next descriptor with current */
prev = list_last_entry(&desc->segments,
@@ -1623,7 +1763,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
hw = &segment->hw;
/* Fill in the descriptor */
- hw->buf_addr = sg_dma_address(sg) + sg_used;
+ xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
+ sg_used, 0);
hw->control = copy;
@@ -1669,12 +1810,204 @@ error:
}
/**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+ size_t copy, sg_used;
+ unsigned int num_periods;
+ int i;
+ u32 reg;
+
+ if (!period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ if (!num_periods)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ chan->direction = direction;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ for (i = 0; i < num_periods; ++i) {
+ sg_used = 0;
+
+ while (sg_used < period_len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, period_len - sg_used,
+ XILINX_DMA_MAX_TRANS_LEN);
+ hw = &segment->hw;
+ xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
+ period_len * i);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head_segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head_segment->phys;
+
+ desc->cyclic = true;
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.next_desc = (u32) head_segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (direction == DMA_MEM_TO_DEV) {
+ head_segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ if (xt->frame_size != 1)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ chan->direction = xt->dir;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ if (xt->dir != DMA_MEM_TO_DEV)
+ hw->buf_addr = xt->dst_start;
+ else
+ hw->buf_addr = xt->src_start;
+
+ hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
+ hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
+ XILINX_DMA_BD_VSIZE_MASK;
+ hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
+ XILINX_DMA_BD_STRIDE_MASK;
+ hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
* @chan: Driver specific DMA Channel pointer
*/
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ u32 reg;
+
+ if (chan->cyclic)
+ xilinx_dma_chan_reset(chan);
/* Halt the DMA engine */
xilinx_dma_halt(chan);
@@ -1682,6 +2015,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
+ if (chan->cyclic) {
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ chan->cyclic = false;
+ }
+
return 0;
}
@@ -1972,7 +2312,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
* Return: '0' on success and failure value on error
*/
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node)
+ struct device_node *node, int chan_id)
{
struct xilinx_dma_chan *chan;
bool has_dre = false;
@@ -2014,9 +2354,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
if (!has_dre)
xdev->common.copy_align = fls(width - 1);
- if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
+ if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
- chan->id = 0;
+ chan->id = chan_id;
+ chan->tdest = chan_id;
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2027,9 +2370,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->flush_on_fsync = true;
}
} else if (of_device_is_compatible(node,
- "xlnx,axi-vdma-s2mm-channel")) {
+ "xlnx,axi-vdma-s2mm-channel") ||
+ of_device_is_compatible(node,
+ "xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
- chan->id = 1;
+ chan->id = chan_id;
+ chan->tdest = chan_id - xdev->nr_channels;
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2084,6 +2430,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/**
+ * xilinx_dma_child_probe - Per child node probe
+ * It get number of dma-channels per child node from
+ * device-tree and initializes all the channels.
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: 0 always.
+ */
+static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+ struct device_node *node) {
+ int ret, i, nr_channels = 1;
+
+ ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+ if ((ret < 0) && xdev->mcdma)
+ dev_warn(xdev->dev, "missing dma-channels property\n");
+
+ for (i = 0; i < nr_channels; i++)
+ xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
+
+ xdev->nr_channels += nr_channels;
+
+ return 0;
+}
+
+/**
* of_dma_xilinx_xlate - Translation function
* @dma_spec: Pointer to DMA specifier as found in the device tree
* @ofdma: Pointer to DMA controller data
@@ -2096,7 +2468,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
+ if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
@@ -2172,6 +2544,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
@@ -2218,7 +2592,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+ xdev->common.device_prep_dma_cyclic =
+ xilinx_dma_prep_dma_cyclic;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_dma_prep_interleaved;
/* Residue calculation is supported by only AXI DMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -2234,13 +2613,13 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
- err = xilinx_dma_chan_probe(xdev, child);
+ err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
goto disable_clks;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
- for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
+ for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames;
}
@@ -2263,7 +2642,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
disable_clks:
xdma_disable_allclks(xdev);
error:
- for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
+ for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
@@ -2285,7 +2664,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&xdev->common);
- for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
+ for (i = 0; i < xdev->nr_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
new file mode 100644
index 000000000000..6d221e5c72ee
--- /dev/null
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -0,0 +1,1151 @@
+/*
+ * DMA driver for Xilinx ZynqMP DMA Engine
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register Offsets */
+#define ZYNQMP_DMA_ISR 0x100
+#define ZYNQMP_DMA_IMR 0x104
+#define ZYNQMP_DMA_IER 0x108
+#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_CTRL0 0x110
+#define ZYNQMP_DMA_CTRL1 0x114
+#define ZYNQMP_DMA_DATA_ATTR 0x120
+#define ZYNQMP_DMA_DSCR_ATTR 0x124
+#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128
+#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C
+#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130
+#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134
+#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138
+#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C
+#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140
+#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144
+#define ZYNQMP_DMA_SRC_START_LSB 0x158
+#define ZYNQMP_DMA_SRC_START_MSB 0x15C
+#define ZYNQMP_DMA_DST_START_LSB 0x160
+#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_RATE_CTRL 0x18C
+#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
+#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
+#define ZYNQMP_DMA_CTRL2 0x200
+
+/* Interrupt registers bit field definitions */
+#define ZYNQMP_DMA_DONE BIT(10)
+#define ZYNQMP_DMA_AXI_WR_DATA BIT(9)
+#define ZYNQMP_DMA_AXI_RD_DATA BIT(8)
+#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7)
+#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6)
+#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5)
+#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4)
+#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3)
+#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2)
+#define ZYNQMP_DMA_INV_APB BIT(0)
+
+/* Control 0 register bit field definitions */
+#define ZYNQMP_DMA_OVR_FETCH BIT(7)
+#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6)
+#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3)
+
+/* Control 1 register bit field definitions */
+#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0)
+
+/* Data Attribute register bit field definitions */
+#define ZYNQMP_DMA_ARBURST GENMASK(27, 26)
+#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22)
+#define ZYNQMP_DMA_ARCACHE_OFST 22
+#define ZYNQMP_DMA_ARQOS GENMASK(21, 18)
+#define ZYNQMP_DMA_ARQOS_OFST 18
+#define ZYNQMP_DMA_ARLEN GENMASK(17, 14)
+#define ZYNQMP_DMA_ARLEN_OFST 14
+#define ZYNQMP_DMA_AWBURST GENMASK(13, 12)
+#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8)
+#define ZYNQMP_DMA_AWCACHE_OFST 8
+#define ZYNQMP_DMA_AWQOS GENMASK(7, 4)
+#define ZYNQMP_DMA_AWQOS_OFST 4
+#define ZYNQMP_DMA_AWLEN GENMASK(3, 0)
+#define ZYNQMP_DMA_AWLEN_OFST 0
+
+/* Descriptor Attribute register bit field definitions */
+#define ZYNQMP_DMA_AXCOHRNT BIT(8)
+#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4)
+#define ZYNQMP_DMA_AXCACHE_OFST 4
+#define ZYNQMP_DMA_AXQOS GENMASK(3, 0)
+#define ZYNQMP_DMA_AXQOS_OFST 0
+
+/* Control register 2 bit field definitions */
+#define ZYNQMP_DMA_ENABLE BIT(0)
+
+/* Buffer Descriptor definitions */
+#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10
+#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4
+#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2
+#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1
+
+/* Interrupt Mask specific definitions */
+#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \
+ ZYNQMP_DMA_AXI_WR_DATA | \
+ ZYNQMP_DMA_AXI_RD_DST_DSCR | \
+ ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
+ ZYNQMP_DMA_INV_APB)
+#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
+ ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
+ ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
+#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
+ ZYNQMP_DMA_INT_ERR | \
+ ZYNQMP_DMA_INT_OVRFL | \
+ ZYNQMP_DMA_DST_DSCR_DONE)
+
+/* Max number of descriptors per channel */
+#define ZYNQMP_DMA_NUM_DESCS 32
+
+/* Max transfer size per descriptor */
+#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+
+/* Reset values for data attributes */
+#define ZYNQMP_DMA_AXCACHE_VAL 0xF
+#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
+#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
+
+#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
+
+#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF
+
+/* Bus width in bits */
+#define ZYNQMP_DMA_BUS_WIDTH_64 64
+#define ZYNQMP_DMA_BUS_WIDTH_128 128
+
+#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
+
+#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
+ common)
+#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
+ async_tx)
+
+/**
+ * struct zynqmp_dma_desc_ll - Hw linked list descriptor
+ * @addr: Buffer address
+ * @size: Size of the buffer
+ * @ctrl: Control word
+ * @nxtdscraddr: Next descriptor base address
+ * @rsvd: Reserved field and for Hw internal use.
+ */
+struct zynqmp_dma_desc_ll {
+ u64 addr;
+ u32 size;
+ u32 ctrl;
+ u64 nxtdscraddr;
+ u64 rsvd;
+}; __aligned(64)
+
+/**
+ * struct zynqmp_dma_desc_sw - Per Transaction structure
+ * @src: Source address for simple mode dma
+ * @dst: Destination address for simple mode dma
+ * @len: Transfer length for simple mode dma
+ * @node: Node in the channel descriptor list
+ * @tx_list: List head for the current transfer
+ * @async_tx: Async transaction descriptor
+ * @src_v: Virtual address of the src descriptor
+ * @src_p: Physical address of the src descriptor
+ * @dst_v: Virtual address of the dst descriptor
+ * @dst_p: Physical address of the dst descriptor
+ */
+struct zynqmp_dma_desc_sw {
+ u64 src;
+ u64 dst;
+ u32 len;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+ struct zynqmp_dma_desc_ll *src_v;
+ dma_addr_t src_p;
+ struct zynqmp_dma_desc_ll *dst_v;
+ dma_addr_t dst_p;
+};
+
+/**
+ * struct zynqmp_dma_chan - Driver specific DMA channel structure
+ * @zdev: Driver specific device structure
+ * @regs: Control registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @free_list: Descriptors free
+ * @active_list: Descriptors active
+ * @sw_desc_pool: SW descriptor pool
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool_v: Statically allocated descriptor base
+ * @desc_pool_p: Physical allocated descriptor base
+ * @desc_free_cnt: Descriptor available count
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @is_dmacoherent: Tells whether dma operations are coherent or not
+ * @tasklet: Cleanup work after irq
+ * @idle : Channel status;
+ * @desc_size: Size of the low level descriptor
+ * @err: Channel has errors
+ * @bus_width: Bus width
+ * @src_burst_len: Source burst length
+ * @dst_burst_len: Dest burst length
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
+ */
+struct zynqmp_dma_chan {
+ struct zynqmp_dma_device *zdev;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head free_list;
+ struct list_head active_list;
+ struct zynqmp_dma_desc_sw *sw_desc_pool;
+ struct list_head done_list;
+ struct dma_chan common;
+ void *desc_pool_v;
+ dma_addr_t desc_pool_p;
+ u32 desc_free_cnt;
+ struct device *dev;
+ int irq;
+ bool is_dmacoherent;
+ struct tasklet_struct tasklet;
+ bool idle;
+ u32 desc_size;
+ bool err;
+ u32 bus_width;
+ u32 src_burst_len;
+ u32 dst_burst_len;
+ struct clk *clk_main;
+ struct clk *clk_apb;
+};
+
+/**
+ * struct zynqmp_dma_device - DMA device structure
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ */
+struct zynqmp_dma_device {
+ struct device *dev;
+ struct dma_device common;
+ struct zynqmp_dma_chan *chan;
+};
+
+static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
+ u64 value)
+{
+ lo_hi_writeq(value, chan->regs + reg);
+}
+
+/**
+ * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
+ * @chan: ZynqMP DMA DMA channel pointer
+ * @desc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *desc)
+{
+ dma_addr_t addr;
+
+ addr = desc->src_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
+ addr = desc->dst_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
+}
+
+/**
+ * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @desc: Hw descriptor pointer
+ */
+static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
+ void *desc)
+{
+ struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
+
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
+ hw++;
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
+}
+
+/**
+ * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Hw descriptor pointer
+ * @src: Source buffer address
+ * @dst: Destination buffer address
+ * @len: Transfer length
+ * @prev: Previous hw descriptor pointer
+ */
+static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_ll *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t len,
+ struct zynqmp_dma_desc_ll *prev)
+{
+ struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
+
+ sdesc->size = ddesc->size = len;
+ sdesc->addr = src;
+ ddesc->addr = dst;
+
+ sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
+ if (chan->is_dmacoherent) {
+ sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ }
+
+ if (prev) {
+ dma_addr_t addr = chan->desc_pool_p +
+ ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
+ ddesc = prev + 1;
+ prev->nxtdscraddr = addr;
+ ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+}
+
+/**
+ * zynqmp_dma_init - Initialize the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
+{
+ u32 val;
+
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ val = readl(chan->regs + ZYNQMP_DMA_ISR);
+ writel(val, chan->regs + ZYNQMP_DMA_ISR);
+
+ if (chan->is_dmacoherent) {
+ val = ZYNQMP_DMA_AXCOHRNT;
+ val = (val & ~ZYNQMP_DMA_AXCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
+ writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
+ }
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ if (chan->is_dmacoherent) {
+ val = (val & ~ZYNQMP_DMA_ARCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
+ val = (val & ~ZYNQMP_DMA_AWCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
+ }
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+
+ /* Clearing the interrupt account rgisters */
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+ chan->idle = true;
+}
+
+/**
+ * zynqmp_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct zynqmp_dma_chan *chan = to_chan(tx->chan);
+ struct zynqmp_dma_desc_sw *desc, *new;
+ dma_cookie_t cookie;
+
+ new = tx_to_desc(tx);
+ spin_lock_bh(&chan->lock);
+ cookie = dma_cookie_assign(tx);
+
+ if (!list_empty(&chan->pending_list)) {
+ desc = list_last_entry(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!list_empty(&desc->tx_list))
+ desc = list_last_entry(&desc->tx_list,
+ struct zynqmp_dma_desc_sw, node);
+ desc->src_v->nxtdscraddr = new->src_p;
+ desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ desc->dst_v->nxtdscraddr = new->dst_p;
+ desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ }
+
+ list_add_tail(&new->node, &chan->pending_list);
+ spin_unlock_bh(&chan->lock);
+
+ return cookie;
+}
+
+/**
+ * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
+ * @chan: ZynqMP DMA channel pointer
+ *
+ * Return: The sw descriptor
+ */
+static struct zynqmp_dma_desc_sw *
+zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ spin_lock_bh(&chan->lock);
+ desc = list_first_entry(&chan->free_list,
+ struct zynqmp_dma_desc_sw, node);
+ list_del(&desc->node);
+ spin_unlock_bh(&chan->lock);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ /* Clear the src and dst descriptor memory */
+ memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+ memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+
+ return desc;
+}
+
+/**
+ * zynqmp_dma_free_descriptor - Issue pending transactions
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *sdesc)
+{
+ struct zynqmp_dma_desc_sw *child, *next;
+
+ chan->desc_free_cnt++;
+ list_add_tail(&sdesc->node, &chan->free_list);
+ list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
+ chan->desc_free_cnt++;
+ list_move_tail(&child->node, &chan->free_list);
+ }
+}
+
+/**
+ * zynqmp_dma_free_desc_list - Free descriptors list
+ * @chan: ZynqMP DMA channel pointer
+ * @list: List to parse and delete the descriptor
+ */
+static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
+ struct list_head *list)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node)
+ zynqmp_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * zynqmp_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ struct zynqmp_dma_desc_sw *desc;
+ int i;
+
+ chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
+ GFP_KERNEL);
+ if (!chan->sw_desc_pool)
+ return -ENOMEM;
+
+ chan->idle = true;
+ chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
+
+ INIT_LIST_HEAD(&chan->free_list);
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
+ list_add_tail(&desc->node, &chan->free_list);
+ }
+
+ chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
+ (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
+ if (!chan->desc_pool_v)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
+ desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
+ desc->src_p = chan->desc_pool_p +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
+ desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+
+ return ZYNQMP_DMA_NUM_DESCS;
+}
+
+/**
+ * zynqmp_dma_start - Start DMA channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
+{
+ writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ chan->idle = false;
+ writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
+}
+
+/**
+ * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
+ * @chan: ZynqMP DMA channel pointer
+ * @status: Interrupt status value
+ */
+static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+{
+ u32 val;
+
+ if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+}
+
+static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+{
+ u32 val;
+
+ val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+ val |= ZYNQMP_DMA_POINT_TYPE_SG;
+ writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ val = (val & ~ZYNQMP_DMA_ARLEN) |
+ (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ val = (val & ~ZYNQMP_DMA_AWLEN) |
+ (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+}
+
+/**
+ * zynqmp_dma_device_config - Zynqmp dma device configuration
+ * @dchan: DMA channel
+ * @config: DMA device config
+ */
+static int zynqmp_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ chan->src_burst_len = config->src_maxburst;
+ chan->dst_burst_len = config->dst_maxburst;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_start_transfer - Initiate the new transfer
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ if (!chan->idle)
+ return;
+
+ zynqmp_dma_config(chan);
+
+ desc = list_first_entry_or_null(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ zynqmp_dma_update_desc_to_ctrlr(chan, desc);
+ zynqmp_dma_start(chan);
+}
+
+
+/**
+ * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @chan: ZynqMP DMA channel
+ */
+static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock(&chan->lock);
+ callback(callback_param);
+ spin_lock(&chan->lock);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ zynqmp_dma_free_descriptor(chan, desc);
+ }
+}
+
+/**
+ * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ desc = list_first_entry_or_null(&chan->active_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ zynqmp_dma_start_transfer(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_free_descriptors - Free channel descriptors
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
+{
+ zynqmp_dma_free_desc_list(chan, &chan->active_list);
+ zynqmp_dma_free_desc_list(chan, &chan->pending_list);
+ zynqmp_dma_free_desc_list(chan, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ zynqmp_dma_free_descriptors(chan);
+ spin_unlock_bh(&chan->lock);
+ dma_free_coherent(chan->dev,
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
+ chan->desc_pool_v, chan->desc_pool_p);
+ kfree(chan->sw_desc_pool);
+}
+
+/**
+ * zynqmp_dma_reset - Reset the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
+{
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+
+ zynqmp_dma_complete_descriptor(chan);
+ zynqmp_dma_chan_desc_cleanup(chan);
+ zynqmp_dma_free_descriptors(chan);
+ zynqmp_dma_init(chan);
+}
+
+/**
+ * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the ZynqMP DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ u32 isr, imr, status;
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = readl(chan->regs + ZYNQMP_DMA_ISR);
+ imr = readl(chan->regs + ZYNQMP_DMA_IMR);
+ status = isr & ~imr;
+
+ writel(isr, chan->regs + ZYNQMP_DMA_ISR);
+ if (status & ZYNQMP_DMA_INT_DONE) {
+ tasklet_schedule(&chan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_DONE)
+ chan->idle = true;
+
+ if (status & ZYNQMP_DMA_INT_ERR) {
+ chan->err = true;
+ tasklet_schedule(&chan->tasklet);
+ dev_err(chan->dev, "Channel %p has errors\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_INT_OVRFL) {
+ zynqmp_dma_handle_ovfl_int(chan, status);
+ dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the ZynqMP DMA channel structure
+ */
+static void zynqmp_dma_do_tasklet(unsigned long data)
+{
+ struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ u32 count;
+
+ spin_lock(&chan->lock);
+
+ if (chan->err) {
+ zynqmp_dma_reset(chan);
+ chan->err = false;
+ goto unlock;
+ }
+
+ count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+ while (count) {
+ zynqmp_dma_complete_descriptor(chan);
+ zynqmp_dma_chan_desc_cleanup(chan);
+ count--;
+ }
+
+ if (chan->idle)
+ zynqmp_dma_start_transfer(chan);
+
+unlock:
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
+ * @dchan: DMA channel pointer
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ zynqmp_dma_free_descriptors(chan);
+ spin_unlock_bh(&chan->lock);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, ulong flags)
+{
+ struct zynqmp_dma_chan *chan;
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ void *desc = NULL, *prev = NULL;
+ size_t copy;
+ u32 desc_cnt;
+
+ chan = to_chan(dchan);
+
+ if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
+ return NULL;
+
+ desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ do {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+
+ copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
+ dma_dst, copy, prev);
+ prev = desc;
+ len -= copy;
+ dma_src += copy;
+ dma_dst += copy;
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ async_tx_ack(&first->async_tx);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ void *desc = NULL, *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+ ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+ len, prev);
+ prev = desc;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_chan_remove - Channel remove function
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
+{
+ if (!chan)
+ return;
+
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+ clk_disable_unprepare(chan->clk_apb);
+ clk_disable_unprepare(chan->clk_main);
+}
+
+/**
+ * zynqmp_dma_chan_probe - Per Channel Probing
+ * @zdev: Driver specific device structure
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+ struct platform_device *pdev)
+{
+ struct zynqmp_dma_chan *chan;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ int err;
+
+ chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = zdev->dev;
+ chan->zdev = zdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chan->regs))
+ return PTR_ERR(chan->regs);
+
+ chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+ chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
+ chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+ if (err < 0) {
+ dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+ return err;
+ }
+
+ if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
+ chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
+ dev_err(zdev->dev, "invalid bus-width value");
+ return -EINVAL;
+ }
+
+ chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
+ zdev->chan = chan;
+ tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->free_list);
+
+ dma_cookie_init(&chan->common);
+ chan->common.device = &zdev->common;
+ list_add_tail(&chan->common.device_node, &zdev->common.channels);
+
+ zynqmp_dma_init(chan);
+ chan->irq = platform_get_irq(pdev, 0);
+ if (chan->irq < 0)
+ return -ENXIO;
+ err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
+ "zynqmp-dma", chan);
+ if (err)
+ return err;
+ chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(chan->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(chan->clk_main);
+ }
+
+ chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(chan->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(chan->clk_apb);
+ }
+
+ err = clk_prepare_enable(chan->clk_main);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(chan->clk_apb);
+ if (err) {
+ clk_disable_unprepare(chan->clk_main);
+ dev_err(&pdev->dev, "Unable to enable apb clock.\n");
+ return err;
+ }
+
+ chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
+ chan->idle = true;
+ return 0;
+}
+
+/**
+ * of_zynqmp_dma_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&zdev->chan->common);
+}
+
+/**
+ * zynqmp_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev;
+ struct dma_device *p;
+ int ret;
+
+ zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&zdev->common.channels);
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_SG, zdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
+
+ p = &zdev->common;
+ p->device_prep_dma_sg = zynqmp_dma_prep_sg;
+ p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
+ p->device_terminate_all = zynqmp_dma_device_terminate_all;
+ p->device_issue_pending = zynqmp_dma_issue_pending;
+ p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
+ p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
+ p->device_tx_status = dma_cookie_status;
+ p->device_config = zynqmp_dma_device_config;
+ p->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, zdev);
+
+ ret = zynqmp_dma_chan_probe(zdev, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Probing channel failed\n");
+ goto free_chan_resources;
+ }
+
+ p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
+ p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
+
+ dma_async_device_register(&zdev->common);
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_zynqmp_dma_xlate, zdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&zdev->common);
+ goto free_chan_resources;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
+
+ return 0;
+
+free_chan_resources:
+ zynqmp_dma_chan_remove(zdev->chan);
+ return ret;
+}
+
+/**
+ * zynqmp_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&zdev->common);
+
+ zynqmp_dma_chan_remove(zdev->chan);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
+
+static struct platform_driver zynqmp_dma_driver = {
+ .driver = {
+ .name = "xilinx-zynqmp-dma",
+ .of_match_table = zynqmp_dma_of_match,
+ },
+ .probe = zynqmp_dma_probe,
+ .remove = zynqmp_dma_remove,
+};
+
+module_platform_driver(zynqmp_dma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");