diff options
Diffstat (limited to 'drivers/dma')
32 files changed, 2022 insertions, 356 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 446687cc2334..605b016bcea4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -62,6 +62,7 @@ config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE + select DMA_ENGINE_RAID select DCA help Enable support for the Intel(R) I/OAT DMA engine present @@ -112,6 +113,7 @@ config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION select DMA_ENGINE + select DMA_ENGINE_RAID select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- Enable support for the Marvell XOR engine. @@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA tristate "AMCC PPC440SPe ADMA support" depends on 440SPe || 440SP select DMA_ENGINE + select DMA_ENGINE_RAID select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ASYNC_TX_ENABLE_CHANNEL_SWITCH help @@ -289,9 +292,11 @@ config MMP_TDMA bool "MMP Two-Channel DMA support" depends on ARCH_MMP select DMA_ENGINE + select MMP_SRAM help Support the MMP Two-Channel DMA engine. This engine used for MMP Audio DMA and pxa910 SQU. + It needs sram driver under mach-mmp. Say Y here if you enabled MMP ADMA, otherwise say N. @@ -301,6 +306,12 @@ config DMA_OMAP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS +config DMA_BCM2835 + tristate "BCM2835 DMA engine support" + depends on (ARCH_BCM2835 || MACH_BCM2708) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config TI_CPPI41 tristate "AM33xx CPPI41 DMA support" depends on ARCH_OMAP @@ -331,6 +342,15 @@ config K3_DMA Support the DMA engine for Hisilicon K3 platform devices. +config MOXART_DMA + tristate "MOXART DMA support" + depends on ARCH_MOXART + select DMA_ENGINE + select DMA_OF + select DMA_VIRTUAL_CHANNELS + help + Enable support for the MOXA ART SoC DMA controller. + config DMA_ENGINE bool @@ -352,6 +372,7 @@ config NET_DMA bool "Network: TCP receive copy offload" depends on DMA_ENGINE && NET default (INTEL_IOATDMA || FSL_DMA) + depends on BROKEN help This enables the use of DMA engines in the network stack to offload receive copy-to-user operations, freeing CPU cycles. @@ -377,4 +398,7 @@ config DMATEST Simple DMA test client. Say N unless you're debugging a DMA Device driver. +config DMA_ENGINE_RAID + bool + endif diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 0ce2da97e429..a029d0f4a1be 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_K3_DMA) += k3dma.o +obj-$(CONFIG_MOXART_DMA) += moxart-dma.o diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index e69b03c0fa50..1e506afa33f5 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock); * @adev: ACPI device to match with * @adma: struct acpi_dma of the given DMA controller * - * Returns 1 on success, 0 when no information is available, or appropriate - * errno value on error. - * * In order to match a device from DSDT table to the corresponding CSRT device * we use MMIO address and IRQ. + * + * Return: + * 1 on success, 0 when no information is available, or appropriate errno value + * on error. */ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, struct acpi_device *adev, struct acpi_dma *adma) @@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, * * We are using this table to get the request line range of the specific DMA * controller to be used later. - * */ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) { @@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) * @data pointer to controller specific data to be used by * translation function * - * Returns 0 on success or appropriate errno value on error. - * * Allocated memory should be freed with appropriate acpi_dma_controller_free() * call. + * + * Return: + * 0 on success or appropriate errno value on error. */ int acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) @@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register); * @dev: struct device of DMA controller * * Memory allocated by acpi_dma_controller_register() is freed here. + * + * Return: + * 0 on success or appropriate errno value on error. */ int acpi_dma_controller_free(struct device *dev) { @@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res) * Managed acpi_dma_controller_register(). DMA controller registered by this * function are automatically freed on driver detach. See * acpi_dma_controller_register() for more information. + * + * Return: + * 0 on success or appropriate errno value on error. */ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) @@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); * @adma: struct acpi_dma of DMA controller * @dma_spec: dma specifier to update * - * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. - * * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource * Descriptor": * DMA Request Line bits is a platform-relative number uniquely @@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); * mapping is done in a controller-specific OS driver. * That's why we can safely adjust slave_id when the appropriate controller is * found. + * + * Return: + * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. */ static int acpi_dma_update_dma_spec(struct acpi_dma *adma, struct acpi_dma_spec *dma_spec) @@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) * @dev: struct device to get DMA request from * @index: index of FixedDMA descriptor for @dev * - * Returns pointer to appropriate dma channel on success or NULL on error. + * Return: + * Pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index) @@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); * translate the names "tx" and "rx" here based on the most common case where * the first FixedDMA descriptor is TX and second is RX. * - * Returns pointer to appropriate dma channel on success or NULL on error. + * Return: + * Pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, const char *name) @@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); * @adma: pointer to ACPI DMA controller data * * A simple translation function for ACPI based devices. Passes &struct - * dma_spec to the DMA controller driver provided filter function. Returns - * pointer to the channel if found or %NULL otherwise. + * dma_spec to the DMA controller driver provided filter function. + * + * Return: + * Pointer to the channel if found or %NULL otherwise. */ struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, struct acpi_dma *adma) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index ec4ee5c1fe9d..8114731a1c62 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -83,6 +83,7 @@ #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) return false; } +EXPORT_SYMBOL_GPL(pl08x_filter_id); /* * Just check that the device is there and active @@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) /* Register slave channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, pl08x->pd->num_slave_channels, true); - if (ret <= 0) { + if (ret < 0) { dev_warn(&pl08x->adev->dev, "%s failed to enumerate slave channels - %d\n", __func__, ret); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index f31d647acdfa..2787aba60c6b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} #if defined(VERBOSE_DEBUG) static void vdbg_dump_regs(struct at_dma_chan *atchan) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c new file mode 100644 index 000000000000..a03602164e3e --- /dev/null +++ b/drivers/dma/bcm2835-dma.c @@ -0,0 +1,707 @@ +/* + * BCM2835 DMA engine support + * + * This driver only supports cyclic DMA transfers + * as needed for the I2S module. + * + * Author: Florian Meier <florian.meier@koalo.de> + * Copyright 2013 + * + * Based on + * OMAP DMAengine support by Russell King + * + * BCM2708 DMA Driver + * Copyright (C) 2010 Broadcom + * + * Raspberry Pi PCM I2S ALSA Driver + * Copyright (c) by Phil Poole 2013 + * + * MARVELL MMP Peripheral DMA Driver + * Copyright 2012 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_dma.h> + +#include "virt-dma.h" + +struct bcm2835_dmadev { + struct dma_device ddev; + spinlock_t lock; + void __iomem *base; + struct device_dma_parameters dma_parms; +}; + +struct bcm2835_dma_cb { + uint32_t info; + uint32_t src; + uint32_t dst; + uint32_t length; + uint32_t stride; + uint32_t next; + uint32_t pad[2]; +}; + +struct bcm2835_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + bool cyclic; + unsigned int dreq; + + int ch; + struct bcm2835_desc *desc; + + void __iomem *chan_base; + int irq_number; +}; + +struct bcm2835_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + + unsigned int control_block_size; + struct bcm2835_dma_cb *control_block_base; + dma_addr_t control_block_base_phys; + + unsigned int frames; + size_t size; +}; + +#define BCM2835_DMA_CS 0x00 +#define BCM2835_DMA_ADDR 0x04 +#define BCM2835_DMA_SOURCE_AD 0x0c +#define BCM2835_DMA_DEST_AD 0x10 +#define BCM2835_DMA_NEXTCB 0x1C + +/* DMA CS Control and Status bits */ +#define BCM2835_DMA_ACTIVE BIT(0) +#define BCM2835_DMA_INT BIT(2) +#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ +#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ +#define BCM2835_DMA_ERR BIT(8) +#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ +#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ + +#define BCM2835_DMA_INT_EN BIT(0) +#define BCM2835_DMA_D_INC BIT(4) +#define BCM2835_DMA_D_DREQ BIT(6) +#define BCM2835_DMA_S_INC BIT(8) +#define BCM2835_DMA_S_DREQ BIT(10) + +#define BCM2835_DMA_PER_MAP(x) ((x) << 16) + +#define BCM2835_DMA_DATA_TYPE_S8 1 +#define BCM2835_DMA_DATA_TYPE_S16 2 +#define BCM2835_DMA_DATA_TYPE_S32 4 +#define BCM2835_DMA_DATA_TYPE_S128 16 + +#define BCM2835_DMA_BULK_MASK BIT(0) +#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) + +/* Valid only for channels 0 - 14, 15 has its own base address */ +#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ +#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) + +static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) +{ + return container_of(d, struct bcm2835_dmadev, ddev); +} + +static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct bcm2835_chan, vc.chan); +} + +static inline struct bcm2835_desc *to_bcm2835_dma_desc( + struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct bcm2835_desc, vd.tx); +} + +static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) +{ + struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); + dma_free_coherent(desc->vd.tx.chan->device->dev, + desc->control_block_size, + desc->control_block_base, + desc->control_block_base_phys); + kfree(desc); +} + +static int bcm2835_dma_abort(void __iomem *chan_base) +{ + unsigned long cs; + long int timeout = 10000; + + cs = readl(chan_base + BCM2835_DMA_CS); + if (!(cs & BCM2835_DMA_ACTIVE)) + return 0; + + /* Write 0 to the active bit - Pause the DMA */ + writel(0, chan_base + BCM2835_DMA_CS); + + /* Wait for any current AXI transfer to complete */ + while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + cpu_relax(); + cs = readl(chan_base + BCM2835_DMA_CS); + } + + /* We'll un-pause when we set of our next DMA */ + if (!timeout) + return -ETIMEDOUT; + + if (!(cs & BCM2835_DMA_ACTIVE)) + return 0; + + /* Terminate the control block chain */ + writel(0, chan_base + BCM2835_DMA_NEXTCB); + + /* Abort the whole DMA */ + writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, + chan_base + BCM2835_DMA_CS); + + return 0; +} + +static void bcm2835_dma_start_desc(struct bcm2835_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct bcm2835_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_bcm2835_dma_desc(&vd->tx); + + writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); +} + +static irqreturn_t bcm2835_dma_callback(int irq, void *data) +{ + struct bcm2835_chan *c = data; + struct bcm2835_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Acknowledge interrupt */ + writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + + d = c->desc; + + if (d) { + /* TODO Only works for cyclic DMA */ + vchan_cyclic_callback(&d->vd); + } + + /* Keep the DMA engine running */ + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + dev_dbg(c->vc.chan.device->dev, + "Allocating DMA channel %d\n", c->ch); + + return request_irq(c->irq_number, + bcm2835_dma_callback, 0, "DMA IRQ", c); +} + +static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + free_irq(c->irq_number, c); + + dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); +} + +static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) +{ + return d->size; +} + +static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) +{ + unsigned int i; + size_t size; + + for (size = i = 0; i < d->frames; i++) { + struct bcm2835_dma_cb *control_block = + &d->control_block_base[i]; + size_t this_size = control_block->length; + dma_addr_t dma; + + if (d->dir == DMA_DEV_TO_MEM) + dma = control_block->dst; + else + dma = control_block->src; + + if (size) + size += this_size; + else if (addr >= dma && addr < dma + this_size) + size += dma + this_size - addr; + } + + return size; +} + +static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = + bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct bcm2835_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); + else if (d->dir == DMA_DEV_TO_MEM) + pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); + else + pos = 0; + + txstate->residue = bcm2835_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void bcm2835_dma_issue_pending(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + unsigned long flags; + + c->cyclic = true; /* Nothing else is implemented */ + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) + bcm2835_dma_start_desc(c); + + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct bcm2835_desc *d; + dma_addr_t dev_addr; + unsigned int es, sync_type; + unsigned int frame; + + /* Grab configuration */ + if (!is_slave_direction(direction)) { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (direction == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + sync_type = BCM2835_DMA_S_DREQ; + } else { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + sync_type = BCM2835_DMA_D_DREQ; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = BCM2835_DMA_DATA_TYPE_S32; + break; + default: + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d), GFP_NOWAIT); + if (!d) + return NULL; + + d->dir = direction; + d->frames = buf_len / period_len; + + /* Allocate memory for control blocks */ + d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); + d->control_block_base = dma_zalloc_coherent(chan->device->dev, + d->control_block_size, &d->control_block_base_phys, + GFP_NOWAIT); + + if (!d->control_block_base) { + kfree(d); + return NULL; + } + + /* + * Iterate over all frames, create a control block + * for each frame and link them together. + */ + for (frame = 0; frame < d->frames; frame++) { + struct bcm2835_dma_cb *control_block = + &d->control_block_base[frame]; + + /* Setup adresses */ + if (d->dir == DMA_DEV_TO_MEM) { + control_block->info = BCM2835_DMA_D_INC; + control_block->src = dev_addr; + control_block->dst = buf_addr + frame * period_len; + } else { + control_block->info = BCM2835_DMA_S_INC; + control_block->src = buf_addr + frame * period_len; + control_block->dst = dev_addr; + } + + /* Enable interrupt */ + control_block->info |= BCM2835_DMA_INT_EN; + + /* Setup synchronization */ + if (sync_type != 0) + control_block->info |= sync_type; + + /* Setup DREQ channel */ + if (c->dreq != 0) + control_block->info |= + BCM2835_DMA_PER_MAP(c->dreq); + + /* Length of a frame */ + control_block->length = period_len; + d->size += control_block->length; + + /* + * Next block is the next frame. + * This DMA engine driver currently only supports cyclic DMA. + * Therefore, wrap around at number of frames. + */ + control_block->next = d->control_block_base_phys + + sizeof(struct bcm2835_dma_cb) + * ((frame + 1) % d->frames); + } + + return vchan_tx_prep(&c->vc, &d->vd, flags); +} + +static int bcm2835_dma_slave_config(struct bcm2835_chan *c, + struct dma_slave_config *cfg) +{ + if ((cfg->direction == DMA_DEV_TO_MEM && + cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + (cfg->direction == DMA_MEM_TO_DEV && + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + !is_slave_direction(cfg->direction)) { + return -EINVAL; + } + + c->cfg = *cfg; + + return 0; +} + +static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) +{ + struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); + unsigned long flags; + int timeout = 10000; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after bcm_dma_abort() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + bcm2835_dma_abort(c->chan_base); + + /* Wait for stopping */ + while (--timeout) { + if (!(readl(c->chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_ACTIVE)) + break; + + cpu_relax(); + } + + if (!timeout) + dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + switch (cmd) { + case DMA_SLAVE_CONFIG: + return bcm2835_dma_slave_config(c, + (struct dma_slave_config *)arg); + + case DMA_TERMINATE_ALL: + return bcm2835_dma_terminate_all(c); + + default: + return -ENXIO; + } +} + +static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) +{ + struct bcm2835_chan *c; + + c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->vc.desc_free = bcm2835_dma_desc_free; + vchan_init(&c->vc, &d->ddev); + INIT_LIST_HEAD(&c->node); + + d->ddev.chancnt++; + + c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); + c->ch = chan_id; + c->irq_number = irq; + + return 0; +} + +static void bcm2835_dma_free(struct bcm2835_dmadev *od) +{ + struct bcm2835_chan *c, *next; + + list_for_each_entry_safe(c, next, &od->ddev.channels, + vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + } +} + +static const struct of_device_id bcm2835_dma_of_match[] = { + { .compatible = "brcm,bcm2835-dma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); + +static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, + struct of_dma *ofdma) +{ + struct bcm2835_dmadev *d = ofdma->of_dma_data; + struct dma_chan *chan; + + chan = dma_get_any_slave_channel(&d->ddev); + if (!chan) + return NULL; + + /* Set DREQ from param */ + to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; + + return chan; +} + +static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = false; + caps->cmd_terminate = true; + + return 0; +} + +static int bcm2835_dma_probe(struct platform_device *pdev) +{ + struct bcm2835_dmadev *od; + struct resource *res; + void __iomem *base; + int rc; + int i; + int irq; + uint32_t chans_available; + + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + pdev->dev.dma_parms = &od->dma_parms; + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + od->base = base; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; + od->ddev.device_tx_status = bcm2835_dma_tx_status; + od->ddev.device_issue_pending = bcm2835_dma_issue_pending; + od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps; + od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; + od->ddev.device_control = bcm2835_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + spin_lock_init(&od->lock); + + platform_set_drvdata(pdev, od); + + /* Request DMA channel mask from device tree */ + if (of_property_read_u32(pdev->dev.of_node, + "brcm,dma-channel-mask", + &chans_available)) { + dev_err(&pdev->dev, "Failed to get channel mask\n"); + rc = -EINVAL; + goto err_no_dma; + } + + /* + * Do not use the FIQ and BULK channels, + * because they are used by the GPU. + */ + chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); + + for (i = 0; i < pdev->num_resources; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) + break; + + if (chans_available & (1 << i)) { + rc = bcm2835_dma_chan_init(od, i, irq); + if (rc) + goto err_no_dma; + } + } + + dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(pdev->dev.of_node, + bcm2835_dma_xlate, od); + if (rc) { + dev_err(&pdev->dev, "Failed to register DMA controller\n"); + goto err_no_dma; + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + dev_err(&pdev->dev, + "Failed to register slave DMA engine device: %d\n", rc); + goto err_no_dma; + } + + dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); + + return 0; + +err_no_dma: + bcm2835_dma_free(od); + return rc; +} + +static int bcm2835_dma_remove(struct platform_device *pdev) +{ + struct bcm2835_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + bcm2835_dma_free(od); + + return 0; +} + +static struct platform_driver bcm2835_dma_driver = { + .probe = bcm2835_dma_probe, + .remove = bcm2835_dma_remove, + .driver = { + .name = "bcm2835-dma", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(bcm2835_dma_of_match), + }, +}; + +module_platform_driver(bcm2835_dma_driver); + +MODULE_ALIAS("platform:bcm2835-dma"); +MODULE_DESCRIPTION("BCM2835 DMA engine driver"); +MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index c29dacff66fa..c18aebf7d5aa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_chans; irq = irq_of_parse_and_map(dev->of_node, 0); - if (!irq) + if (!irq) { + ret = -EINVAL; goto err_irq; + } cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ea806bdc12ef..ed610b497518 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -535,11 +535,41 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) } EXPORT_SYMBOL_GPL(dma_get_slave_channel); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + int err; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* lock against __dma_request_channel */ + mutex_lock(&dma_list_mutex); + + chan = private_candidate(&mask, device, NULL, NULL); + if (chan) { + err = dma_chan_get(chan); + if (err) { + pr_debug("%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + chan = NULL; + } + } + + mutex_unlock(&dma_list_mutex); + + return chan; +} +EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); + /** * __dma_request_channel - try to allocate an exclusive channel * @mask: capabilities that the channel must satisfy * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn + * + * Returns pointer to appropriate DMA channel on success or NULL. */ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) @@ -591,18 +621,43 @@ EXPORT_SYMBOL_GPL(__dma_request_channel); * dma_request_slave_channel - try to allocate an exclusive slave channel * @dev: pointer to client device structure * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or an error pointer. */ -struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) +struct dma_chan *dma_request_slave_channel_reason(struct device *dev, + const char *name) { + struct dma_chan *chan; + /* If device-tree is present get slave info from here */ if (dev->of_node) return of_dma_request_slave_channel(dev->of_node, name); /* If device was enumerated by ACPI get slave info from here */ - if (ACPI_HANDLE(dev)) - return acpi_dma_request_slave_chan_by_name(dev, name); + if (ACPI_HANDLE(dev)) { + chan = acpi_dma_request_slave_chan_by_name(dev, name); + if (chan) + return chan; + } - return NULL; + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); + +/** + * dma_request_slave_channel - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or NULL. + */ +struct dma_chan *dma_request_slave_channel(struct device *dev, + const char *name) +{ + struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(ch)) + return NULL; + return ch; } EXPORT_SYMBOL_GPL(dma_request_slave_channel); @@ -912,7 +967,7 @@ struct dmaengine_unmap_pool { #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } static struct dmaengine_unmap_pool unmap_pool[] = { __UNMAP_POOL(2), - #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) __UNMAP_POOL(16), __UNMAP_POOL(128), __UNMAP_POOL(256), @@ -1054,7 +1109,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_cookie_t cookie; unsigned long flags; - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); if (!unmap) return -ENOMEM; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 20f9a3aaf926..05b6dea770a4 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); -static char test_device[20]; +static char test_device[32]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); @@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); struct dmatest_params { unsigned int buf_size; char channel[20]; - char device[20]; + char device[32]; unsigned int threads_per_chan; unsigned int max_channels; unsigned int iterations; @@ -539,9 +539,9 @@ static int dmatest_func(void *data) um->len = params->buf_size; for (i = 0; i < src_cnt; i++) { - unsigned long buf = (unsigned long) thread->srcs[i]; + void *buf = thread->srcs[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); @@ -559,9 +559,9 @@ static int dmatest_func(void *data) /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - unsigned long buf = (unsigned long) thread->dsts[i]; + void *buf = thread->dsts[i]; struct page *pg = virt_to_page(buf); - unsigned pg_off = buf & ~PAGE_MASK; + unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 7516be4677cf..13ac3f240e79 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc, struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 ctllo; - /* Software emulation of LLP mode relies on interrupts to continue - * multi block transfer. */ + /* + * Software emulation of LLP mode relies on interrupts to continue + * multi block transfer. + */ ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; channel_writel(dwc, SAR, desc->lli.sar); @@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) &dwc->flags); if (was_soft_llp) { dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start new LLP transfer " - "inside ongoing one\n"); + "BUG: Attempted to start new LLP transfer inside ongoing one\n"); return; } @@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) return; } - dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, - (unsigned long long)llp); + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { /* Initial residue value */ @@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, unlikely(status_xfer & dwc->mask)) { int i; - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " - "interrupt, stopping DMA transfer\n", - status_xfer ? "xfer" : "error"); + dev_err(chan2dev(&dwc->chan), + "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); spin_lock_irqsave(&dwc->lock, flags); @@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, u32 ctllo; dev_vdbg(chan2dev(chan), - "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, - (unsigned long long)dest, (unsigned long long)src, - len, flags); + "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, + &dest, &src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); @@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, /* Let's make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " - "period %zu periods %d\n", (unsigned long long)buf_addr, - buf_len, period_len, periods); + dev_dbg(chan2dev(&dwc->chan), + "cyclic prepared buf %pad len %zu period %zu periods %d\n", + &buf_addr, buf_len, period_len, periods); cdesc->periods = periods; dwc->cdesc = cdesc; @@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, dwc_params); - /* Decode maximum block size for given channel. The + /* + * Decode maximum block size for given channel. The * stored 4 bit value represents blocks from 0x00 for 3 - * up to 0x0a for 4095. */ + * up to 0x0a for 4095. + */ dwc->block_size = (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; dwc->nollp = diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 2539ea0cbc63..cd8da451d199 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) echan->alloced = true; echan->slot[0] = echan->ch_num; - dev_info(dev, "allocated channel for %u:%u\n", - EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); + dev_dbg(dev, "allocated channel for %u:%u\n", + EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); return 0; @@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan) echan->alloced = false; } - dev_info(dev, "freeing channel for %u\n", echan->ch_num); + dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); } /* Send pending descriptor to hardware */ diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7086a16a55f2..f157c6f76b32 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, hw->count = CPU_TO_DMA(chan, count, 32); } -static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) -{ - return DMA_TO_CPU(chan, desc->hw.count, 32); -} - static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { @@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } -static dma_addr_t get_desc_src(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; -} - static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { @@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } -static dma_addr_t get_desc_dst(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - u64 snoop_bits; - - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) - ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; -} - static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { @@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; - dma_cookie_t cookie; + dma_cookie_t cookie = -EINVAL; spin_lock_irqsave(&chan->desc_lock, flags); @@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct dma_async_tx_descriptor *txd = &desc->async_tx; - struct device *dev = chan->common.device->dev; - dma_addr_t src = get_desc_src(chan, desc); - dma_addr_t dst = get_desc_dst(chan, desc); - u32 len = get_desc_cnt(chan, desc); /* Run the link descriptor callback function */ if (txd->callback) { diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 1ffc24484d23..d56e83599825 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -41,7 +41,7 @@ * channel is allowed to transfer before the DMA engine pauses * the current channel and switches to the next channel */ -#define FSL_DMA_MR_BWC 0x08000000 +#define FSL_DMA_MR_BWC 0x0A000000 /* Special MR definition for MPC8349 */ #define FSL_DMA_MR_EOTIE 0x00000080 diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c75679d42028..19041cefabb1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -323,6 +323,7 @@ struct sdma_engine { struct clk *clk_ipg; struct clk *clk_ahb; spinlock_t channel_0_lock; + u32 script_number; struct sdma_script_start_addrs *script_addrs; const struct sdma_driver_data *drvdata; }; @@ -448,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = { { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, + { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdma_dt_ids); @@ -724,6 +726,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->app_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_app_addr; break; + case IMX_DMATYPE_SSI_DUAL: + per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; + break; case IMX_DMATYPE_SSI_SP: case IMX_DMATYPE_MMC: case IMX_DMATYPE_SDHC: @@ -1238,6 +1244,7 @@ static void sdma_issue_pending(struct dma_chan *chan) } #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -1246,7 +1253,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma, s32 *saddr_arr = (u32 *)sdma->script_addrs; int i; - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) + /* use the default firmware in ROM if missing external firmware */ + if (!sdma->script_number) + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + + for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; } @@ -1272,6 +1283,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) goto err_firmware; if (header->ram_code_start + header->ram_code_size > fw->size) goto err_firmware; + switch (header->version_major) { + case 1: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + break; + case 2: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; + break; + default: + dev_err(sdma->dev, "unknown firmware version\n"); + goto err_firmware; + } addr = (void *)header + header->script_addrs_start; ram_code = (void *)header + header->ram_code_start; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a49c777607c..4e3549a16132 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { chan = ioat_chan_by_index(instance, bit); - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { struct ioat_chan_common *chan = data; - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; } @@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c chan->timer.function = device->timer_fn; chan->timer.data = data; tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); - tasklet_disable(&chan->cleanup_task); } /** @@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - tasklet_enable(&chan->cleanup_task); + set_bit(IOAT_RUN, &chan->state); ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", __func__, ioat->desccount); return ioat->desccount; } +void ioat_stop(struct ioat_chan_common *chan) +{ + struct ioatdma_device *device = chan->device; + struct pci_dev *pdev = device->pdev; + int chan_id = chan_num(chan); + struct msix_entry *msix; + + /* 1/ stop irq from firing tasklets + * 2/ stop the tasklet from re-arming irqs + */ + clear_bit(IOAT_RUN, &chan->state); + + /* flush inflight interrupts */ + switch (device->irq_mode) { + case IOAT_MSIX: + msix = &device->msix_entries[chan_id]; + synchronize_irq(msix->vector); + break; + case IOAT_MSI: + case IOAT_INTX: + synchronize_irq(pdev->irq); + break; + default: + break; + } + + /* flush inflight timers */ + del_timer_sync(&chan->timer); + + /* flush inflight tasklet runs */ + tasklet_kill(&chan->cleanup_task); + + /* final cleanup now that everything is quiesced and can't re-arm */ + device->cleanup_fn((unsigned long) &chan->common); +} + /** * ioat1_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned @@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) if (ioat->desccount == 0) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - ioat1_cleanup(ioat); + ioat_stop(chan); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, static void ioat1_cleanup_event(unsigned long data) { struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat1_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -817,7 +855,15 @@ int ioat_dma_self_test(struct ioatdma_device *device) } dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_src)) { + dev_err(dev, "mapping src buffer failed\n"); + goto free_resources; + } dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_dest)) { + dev_err(dev, "mapping dest buffer failed\n"); + goto unmap_src; + } flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); @@ -855,8 +901,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) } unmap_dma: - dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); +unmap_src: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 11fb877ddca9..e982f00a9843 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *device); int ioat_dma_setup_interrupts(struct ioatdma_device *device); +void ioat_stop(struct ioat_chan_common *chan); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5d3affe7e976..8d1058085eeb 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) void ioat2_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat2_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->issued = 0; ioat->tail = 0; ioat->alloc_order = order; + set_bit(IOAT_RUN, &chan->state); spin_unlock_bh(&ioat->prep_lock); spin_unlock_bh(&chan->cleanup_lock); - tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); /* check that we got off the ground */ @@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); if (is_ioat_active(status) || is_ioat_idle(status)) { - set_bit(IOAT_RUN, &chan->state); return 1 << ioat->alloc_order; } else { u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); @@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) if (!ioat->ring) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - device->cleanup_fn((unsigned long) c); + ioat_stop(chan); device->reset_hw(chan); - clear_bit(IOAT_RUN, &chan->state); spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 820817e97e62..b9b38a1cf92f 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) static void ioat3_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat3_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index e26075408e9b..a1f911aaf220 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( dma_addr_t addr, src = 0, dst = 0; int num = sglen, i; - if (sgl == 0) + if (sgl == NULL) return NULL; for_each_sg(sgl, sg, sglen, i) { @@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev) return 0; } -SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); +static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); static struct platform_driver k3_pdma_driver = { .driver = { diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8869500ab92b..b439679f4126 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -5,6 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> @@ -32,38 +33,37 @@ #define DTADR 0x0208 #define DCMD 0x020c -#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ -#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ -#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ -#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ -#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ -#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ -#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ -#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ - -#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ -#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ -#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ -#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ -#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ -#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ -#define DCSR_EORINTR (1 << 9) /* The end of Receive */ - -#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ - (((n) & 0x3f) << 2)) -#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ -#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ +#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ +#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ +#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ +#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ +#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ +#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ +#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ +#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ + +#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ +#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ +#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ +#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ +#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ +#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ +#define DCSR_EORINTR BIT(9) /* The end of Receive */ + +#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) +#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ +#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ -#define DDADR_STOP (1 << 0) /* Stop (read / write) */ - -#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ -#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ -#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ -#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ -#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ -#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ -#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ +#define DDADR_STOP BIT(0) /* Stop (read / write) */ + +#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ +#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ +#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ +#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ +#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ +#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ +#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ @@ -132,10 +132,14 @@ struct mmp_pdma_device { spinlock_t phy_lock; /* protect alloc/free phy channels */ }; -#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) -#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) -#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) -#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) +#define tx_to_mmp_pdma_desc(tx) \ + container_of(tx, struct mmp_pdma_desc_sw, async_tx) +#define to_mmp_pdma_desc(lh) \ + container_of(lh, struct mmp_pdma_desc_sw, node) +#define to_mmp_pdma_chan(dchan) \ + container_of(dchan, struct mmp_pdma_chan, chan) +#define to_mmp_pdma_dev(dmadev) \ + container_of(dmadev, struct mmp_pdma_device, device) static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) { @@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy) writel(dalgn, phy->base + DALGN); reg = (phy->idx << 2) + DCSR; - writel(readl(phy->base + reg) | DCSR_RUN, - phy->base + reg); + writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); } static void disable_chan(struct mmp_pdma_phy *phy) { u32 reg; - if (phy) { - reg = (phy->idx << 2) + DCSR; - writel(readl(phy->base + reg) & ~DCSR_RUN, - phy->base + reg); - } + if (!phy) + return; + + reg = (phy->idx << 2) + DCSR; + writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); } static int clear_chan_irq(struct mmp_pdma_phy *phy) @@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy) u32 dint = readl(phy->base + DINT); u32 reg = (phy->idx << 2) + DCSR; - if (dint & BIT(phy->idx)) { - /* clear irq */ - dcsr = readl(phy->base + reg); - writel(dcsr, phy->base + reg); - if ((dcsr & DCSR_BUSERR) && (phy->vchan)) - dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); - return 0; - } - return -EAGAIN; + if (!(dint & BIT(phy->idx))) + return -EAGAIN; + + /* clear irq */ + dcsr = readl(phy->base + reg); + writel(dcsr, phy->base + reg); + if ((dcsr & DCSR_BUSERR) && (phy->vchan)) + dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); + + return 0; } static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) { struct mmp_pdma_phy *phy = dev_id; - if (clear_chan_irq(phy) == 0) { - tasklet_schedule(&phy->vchan->tasklet); - return IRQ_HANDLED; - } else + if (clear_chan_irq(phy) != 0) return IRQ_NONE; + + tasklet_schedule(&phy->vchan->tasklet); + return IRQ_HANDLED; } static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) @@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) if (irq_num) return IRQ_HANDLED; - else - return IRQ_NONE; + + return IRQ_NONE; } /* lookup free phy channel as descending priority */ @@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) */ spin_lock_irqsave(&pdev->phy_lock, flags); - for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { + for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { for (i = 0; i < pdev->dma_channels; i++) { - if (prio != ((i & 0xf) >> 2)) + if (prio != (i & 0xf) >> 2) continue; phy = &pdev->phy[i]; if (!phy->vchan) { @@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) if (chan->desc_pool) return 1; - chan->desc_pool = - dma_pool_create(dev_name(&dchan->dev->device), chan->dev, - sizeof(struct mmp_pdma_desc_sw), - __alignof__(struct mmp_pdma_desc_sw), 0); + chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), + chan->dev, + sizeof(struct mmp_pdma_desc_sw), + __alignof__(struct mmp_pdma_desc_sw), + 0); if (!chan->desc_pool) { dev_err(chan->dev, "unable to allocate descriptor pool\n"); return -ENOMEM; } + mmp_pdma_free_phy(chan); chan->idle = true; chan->dev_addr = 0; @@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) } static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, - struct list_head *list) + struct list_head *list) { struct mmp_pdma_desc_sw *desc, *_desc; @@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) static struct dma_async_tx_descriptor * mmp_pdma_prep_memcpy(struct dma_chan *dchan, - dma_addr_t dma_dst, dma_addr_t dma_src, - size_t len, unsigned long flags) + dma_addr_t dma_dst, dma_addr_t dma_src, + size_t len, unsigned long flags) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; @@ -515,8 +521,8 @@ fail: static struct dma_async_tx_descriptor * mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction dir, - unsigned long flags, void *context) + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; @@ -591,10 +597,11 @@ fail: return NULL; } -static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( - struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, - size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) +static struct dma_async_tx_descriptor * +mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, + dma_addr_t buf_addr, size_t len, size_t period_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; @@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( goto fail; } - new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | - (DCMD_LENGTH & period_len); + new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | + (DCMD_LENGTH & period_len)); new->desc.dsadr = dma_src; new->desc.dtadr = dma_dst; @@ -677,12 +684,11 @@ fail: } static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, - unsigned long arg) + unsigned long arg) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); struct dma_slave_config *cfg = (void *)arg; unsigned long flags; - int ret = 0; u32 maxburst = 0, addr = 0; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; @@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, return -ENOSYS; } - return ret; + return 0; } static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, - dma_cookie_t cookie, struct dma_tx_state *txstate) + dma_cookie_t cookie, + struct dma_tx_state *txstate) { return dma_cookie_status(dchan, cookie, txstate); } @@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op) return 0; } -static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, - int idx, int irq) +static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) { struct mmp_pdma_phy *phy = &pdev->phy[idx]; struct mmp_pdma_chan *chan; int ret; - chan = devm_kzalloc(pdev->dev, - sizeof(struct mmp_pdma_chan), GFP_KERNEL); + chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), + GFP_KERNEL); if (chan == NULL) return -ENOMEM; @@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, phy->base = pdev->base; if (irq) { - ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, 0, "pdma", phy); + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, + "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, INIT_LIST_HEAD(&chan->chain_running); /* register virt channel to dma engine */ - list_add_tail(&chan->chan.device_node, - &pdev->device.channels); + list_add_tail(&chan->chan.device_node, &pdev->device.channels); return 0; } @@ -893,33 +898,15 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct mmp_pdma_device *d = ofdma->of_dma_data; - struct dma_chan *chan, *candidate; - -retry: - candidate = NULL; + struct dma_chan *chan; - /* walk the list of channels registered with the current instance and - * find one that is currently unused */ - list_for_each_entry(chan, &d->device.channels, device_node) - if (chan->client_count == 0) { - candidate = chan; - break; - } - - if (!candidate) + chan = dma_get_any_slave_channel(&d->device); + if (!chan) return NULL; - /* dma_get_slave_channel will return NULL if we lost a race between - * the lookup and the reservation */ - chan = dma_get_slave_channel(candidate); + to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; - if (chan) { - struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); - c->drcmr = dma_spec->args[0]; - return chan; - } - - goto retry; + return chan; } static int mmp_pdma_probe(struct platform_device *op) @@ -934,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op) pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); if (!pdev) return -ENOMEM; + pdev->dev = &op->dev; spin_lock_init(&pdev->phy_lock); @@ -945,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op) of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); if (of_id) - of_property_read_u32(pdev->dev->of_node, - "#dma-channels", &dma_channels); + of_property_read_u32(pdev->dev->of_node, "#dma-channels", + &dma_channels); else if (pdata && pdata->dma_channels) dma_channels = pdata->dma_channels; else @@ -958,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op) irq_num++; } - pdev->phy = devm_kzalloc(pdev->dev, - dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); + pdev->phy = devm_kcalloc(pdev->dev, + dma_channels, sizeof(struct mmp_pdma_chan), + GFP_KERNEL); if (pdev->phy == NULL) return -ENOMEM; @@ -968,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op) if (irq_num != dma_channels) { /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); - ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, 0, "pdma", pdev); + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, + "pdma", pdev); if (ret) return ret; } @@ -1045,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) if (chan->device->dev->driver != &mmp_pdma_driver.driver) return false; - c->drcmr = *(unsigned int *) param; + c->drcmr = *(unsigned int *)param; return true; } @@ -1053,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); module_platform_driver(mmp_pdma_driver); -MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); +MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); MODULE_AUTHOR("Marvell International Ltd."); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 3ddacc14a736..33f96aaa80c7 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -121,11 +121,13 @@ struct mmp_tdma_chan { int idx; enum mmp_tdma_type type; int irq; - unsigned long reg_base; + void __iomem *reg_base; size_t buf_len; size_t period_len; size_t pos; + + struct gen_pool *pool; }; #define TDMA_CHANNEL_NUM 2 @@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) { - unsigned int tdcr; + unsigned int tdcr = 0; mmp_tdma_disable_chan(tdmac); @@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); - gpool = sram_get_gpool("asram"); + gpool = tdmac->pool; if (tdmac->desc_arr) gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, size); @@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) struct gen_pool *gpool; int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); - gpool = sram_get_gpool("asram"); + gpool = tdmac->pool; if (!gpool) return NULL; @@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev) } static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, - int idx, int irq, int type) + int idx, int irq, + int type, struct gen_pool *pool) { struct mmp_tdma_chan *tdmac; @@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->chan.device = &tdev->device; tdmac->idx = idx; tdmac->type = type; - tdmac->reg_base = (unsigned long)tdev->base + idx * 4; + tdmac->reg_base = tdev->base + idx * 4; + tdmac->pool = pool; tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) int i, ret; int irq = 0, irq_num = 0; int chan_num = TDMA_CHANNEL_NUM; + struct gen_pool *pool; of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); if (of_id) @@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&tdev->device.channels); + if (pdev->dev.of_node) + pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); + else + pool = sram_get_gpool("asram"); + if (!pool) { + dev_err(&pdev->dev, "asram pool not available\n"); + return -ENOMEM; + } + if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, @@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) /* initialize channel parameters */ for (i = 0; i < chan_num; i++) { irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); - ret = mmp_tdma_chan_init(tdev, i, irq, type); + ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); if (ret) return ret; } diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c new file mode 100644 index 000000000000..3258e484e4f6 --- /dev/null +++ b/drivers/dma/moxart-dma.c @@ -0,0 +1,699 @@ +/* + * MOXA ART SoCs DMA Engine support. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_dma.h> +#include <linux/bitops.h> + +#include <asm/cacheflush.h> + +#include "dmaengine.h" +#include "virt-dma.h" + +#define APB_DMA_MAX_CHANNEL 4 + +#define REG_OFF_ADDRESS_SOURCE 0 +#define REG_OFF_ADDRESS_DEST 4 +#define REG_OFF_CYCLES 8 +#define REG_OFF_CTRL 12 +#define REG_OFF_CHAN_SIZE 16 + +#define APB_DMA_ENABLE BIT(0) +#define APB_DMA_FIN_INT_STS BIT(1) +#define APB_DMA_FIN_INT_EN BIT(2) +#define APB_DMA_BURST_MODE BIT(3) +#define APB_DMA_ERR_INT_STS BIT(4) +#define APB_DMA_ERR_INT_EN BIT(5) + +/* + * Unset: APB + * Set: AHB + */ +#define APB_DMA_SOURCE_SELECT 0x40 +#define APB_DMA_DEST_SELECT 0x80 + +#define APB_DMA_SOURCE 0x100 +#define APB_DMA_DEST 0x1000 + +#define APB_DMA_SOURCE_MASK 0x700 +#define APB_DMA_DEST_MASK 0x7000 + +/* + * 000: No increment + * 001: +1 (Burst=0), +4 (Burst=1) + * 010: +2 (Burst=0), +8 (Burst=1) + * 011: +4 (Burst=0), +16 (Burst=1) + * 101: -1 (Burst=0), -4 (Burst=1) + * 110: -2 (Burst=0), -8 (Burst=1) + * 111: -4 (Burst=0), -16 (Burst=1) + */ +#define APB_DMA_SOURCE_INC_0 0 +#define APB_DMA_SOURCE_INC_1_4 0x100 +#define APB_DMA_SOURCE_INC_2_8 0x200 +#define APB_DMA_SOURCE_INC_4_16 0x300 +#define APB_DMA_SOURCE_DEC_1_4 0x500 +#define APB_DMA_SOURCE_DEC_2_8 0x600 +#define APB_DMA_SOURCE_DEC_4_16 0x700 +#define APB_DMA_DEST_INC_0 0 +#define APB_DMA_DEST_INC_1_4 0x1000 +#define APB_DMA_DEST_INC_2_8 0x2000 +#define APB_DMA_DEST_INC_4_16 0x3000 +#define APB_DMA_DEST_DEC_1_4 0x5000 +#define APB_DMA_DEST_DEC_2_8 0x6000 +#define APB_DMA_DEST_DEC_4_16 0x7000 + +/* + * Request signal select source/destination address for DMA hardware handshake. + * + * The request line number is a property of the DMA controller itself, + * e.g. MMC must always request channels where dma_slave_config->slave_id is 5. + * + * 0: No request / Grant signal + * 1-15: Request / Grant signal + */ +#define APB_DMA_SOURCE_REQ_NO 0x1000000 +#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000 +#define APB_DMA_DEST_REQ_NO 0x10000 +#define APB_DMA_DEST_REQ_NO_MASK 0xf0000 + +#define APB_DMA_DATA_WIDTH 0x100000 +#define APB_DMA_DATA_WIDTH_MASK 0x300000 +/* + * Data width of transfer: + * + * 00: Word + * 01: Half + * 10: Byte + */ +#define APB_DMA_DATA_WIDTH_4 0 +#define APB_DMA_DATA_WIDTH_2 0x100000 +#define APB_DMA_DATA_WIDTH_1 0x200000 + +#define APB_DMA_CYCLES_MASK 0x00ffffff + +#define MOXART_DMA_DATA_TYPE_S8 0x00 +#define MOXART_DMA_DATA_TYPE_S16 0x01 +#define MOXART_DMA_DATA_TYPE_S32 0x02 + +struct moxart_sg { + dma_addr_t addr; + uint32_t len; +}; + +struct moxart_desc { + enum dma_transfer_direction dma_dir; + dma_addr_t dev_addr; + unsigned int sglen; + unsigned int dma_cycles; + struct virt_dma_desc vd; + uint8_t es; + struct moxart_sg sg[0]; +}; + +struct moxart_chan { + struct virt_dma_chan vc; + + void __iomem *base; + struct moxart_desc *desc; + + struct dma_slave_config cfg; + + bool allocated; + bool error; + int ch_num; + unsigned int line_reqno; + unsigned int sgidx; +}; + +struct moxart_dmadev { + struct dma_device dma_slave; + struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; +}; + +struct moxart_filter_data { + struct moxart_dmadev *mdc; + struct of_phandle_args *dma_spec; +}; + +static const unsigned int es_bytes[] = { + [MOXART_DMA_DATA_TYPE_S8] = 1, + [MOXART_DMA_DATA_TYPE_S16] = 2, + [MOXART_DMA_DATA_TYPE_S32] = 4, +}; + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct moxart_chan, vc.chan); +} + +static inline struct moxart_desc *to_moxart_dma_desc( + struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct moxart_desc, vd.tx); +} + +static void moxart_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct moxart_desc, vd)); +} + +static int moxart_terminate_all(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + u32 ctrl; + + dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch); + + spin_lock_irqsave(&ch->vc.lock, flags); + + if (ch->desc) + ch->desc = NULL; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); + writel(ctrl, ch->base + REG_OFF_CTRL); + + vchan_get_all_descriptors(&ch->vc, &head); + spin_unlock_irqrestore(&ch->vc.lock, flags); + vchan_dma_desc_free_list(&ch->vc, &head); + + return 0; +} + +static int moxart_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + u32 ctrl; + + ch->cfg = *cfg; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl |= APB_DMA_BURST_MODE; + ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK); + ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK); + + switch (ch->cfg.src_addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + ctrl |= APB_DMA_DATA_WIDTH_1; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_1_4; + else + ctrl |= APB_DMA_SOURCE_INC_1_4; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + ctrl |= APB_DMA_DATA_WIDTH_2; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_2_8; + else + ctrl |= APB_DMA_SOURCE_INC_2_8; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + ctrl &= ~APB_DMA_DATA_WIDTH; + if (ch->cfg.direction != DMA_MEM_TO_DEV) + ctrl |= APB_DMA_DEST_INC_4_16; + else + ctrl |= APB_DMA_SOURCE_INC_4_16; + break; + default: + return -EINVAL; + } + + if (ch->cfg.direction == DMA_MEM_TO_DEV) { + ctrl &= ~APB_DMA_DEST_SELECT; + ctrl |= APB_DMA_SOURCE_SELECT; + ctrl |= (ch->line_reqno << 16 & + APB_DMA_DEST_REQ_NO_MASK); + } else { + ctrl |= APB_DMA_DEST_SELECT; + ctrl &= ~APB_DMA_SOURCE_SELECT; + ctrl |= (ch->line_reqno << 24 & + APB_DMA_SOURCE_REQ_NO_MASK); + } + + writel(ctrl, ch->base + REG_OFF_CTRL); + + return 0; +} + +static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case DMA_PAUSE: + case DMA_RESUME: + return -EINVAL; + case DMA_TERMINATE_ALL: + moxart_terminate_all(chan); + break; + case DMA_SLAVE_CONFIG: + ret = moxart_slave_config(chan, (struct dma_slave_config *)arg); + break; + default: + ret = -ENOSYS; + } + + return ret; +} + +static struct dma_async_tx_descriptor *moxart_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct moxart_desc *d; + enum dma_slave_buswidth dev_width; + dma_addr_t dev_addr; + struct scatterlist *sgent; + unsigned int es; + unsigned int i; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "%s: invalid DMA direction\n", + __func__); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = ch->cfg.src_addr; + dev_width = ch->cfg.src_addr_width; + } else { + dev_addr = ch->cfg.dst_addr; + dev_width = ch->cfg.dst_addr_width; + } + + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = MOXART_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = MOXART_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = MOXART_DMA_DATA_TYPE_S32; + break; + default: + dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n", + __func__, dev_width); + return NULL; + } + + d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dma_dir = dir; + d->dev_addr = dev_addr; + d->es = es; + + for_each_sg(sgl, sgent, sg_len, i) { + d->sg[i].addr = sg_dma_address(sgent); + d->sg[i].len = sg_dma_len(sgent); + } + + d->sglen = sg_len; + + ch->error = 0; + + return vchan_tx_prep(&ch->vc, &d->vd, tx_flags); +} + +static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct moxart_dmadev *mdc = ofdma->of_dma_data; + struct dma_chan *chan; + struct moxart_chan *ch; + + chan = dma_get_any_slave_channel(&mdc->dma_slave); + if (!chan) + return NULL; + + ch = to_moxart_dma_chan(chan); + ch->line_reqno = dma_spec->args[0]; + + return chan; +} + +static int moxart_alloc_chan_resources(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + + dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n", + __func__, ch->ch_num); + ch->allocated = 1; + + return 0; +} + +static void moxart_free_chan_resources(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + + vchan_free_chan_resources(&ch->vc); + + dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n", + __func__, ch->ch_num); + ch->allocated = 0; +} + +static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr, + dma_addr_t dst_addr) +{ + writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE); + writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST); +} + +static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len) +{ + struct moxart_desc *d = ch->desc; + unsigned int sglen_div = es_bytes[d->es]; + + d->dma_cycles = len >> sglen_div; + + /* + * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16 + * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ). + */ + writel(d->dma_cycles, ch->base + REG_OFF_CYCLES); + + dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n", + __func__, d->dma_cycles, len); +} + +static void moxart_start_dma(struct moxart_chan *ch) +{ + u32 ctrl; + + ctrl = readl(ch->base + REG_OFF_CTRL); + ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); + writel(ctrl, ch->base + REG_OFF_CTRL); +} + +static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx) +{ + struct moxart_desc *d = ch->desc; + struct moxart_sg *sg = ch->desc->sg + idx; + + if (ch->desc->dma_dir == DMA_MEM_TO_DEV) + moxart_dma_set_params(ch, sg->addr, d->dev_addr); + else if (ch->desc->dma_dir == DMA_DEV_TO_MEM) + moxart_dma_set_params(ch, d->dev_addr, sg->addr); + + moxart_set_transfer_params(ch, sg->len); + + moxart_start_dma(ch); +} + +static void moxart_dma_start_desc(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&ch->vc); + + if (!vd) { + ch->desc = NULL; + return; + } + + list_del(&vd->node); + + ch->desc = to_moxart_dma_desc(&vd->tx); + ch->sgidx = 0; + + moxart_dma_start_sg(ch, 0); +} + +static void moxart_issue_pending(struct dma_chan *chan) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&ch->vc.lock, flags); + if (vchan_issue_pending(&ch->vc) && !ch->desc) + moxart_dma_start_desc(chan); + spin_unlock_irqrestore(&ch->vc.lock, flags); +} + +static size_t moxart_dma_desc_size(struct moxart_desc *d, + unsigned int completed_sgs) +{ + unsigned int i; + size_t size; + + for (size = i = completed_sgs; i < d->sglen; i++) + size += d->sg[i].len; + + return size; +} + +static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch) +{ + size_t size; + unsigned int completed_cycles, cycles; + + size = moxart_dma_desc_size(ch->desc, ch->sgidx); + cycles = readl(ch->base + REG_OFF_CYCLES); + completed_cycles = (ch->desc->dma_cycles - cycles); + size -= completed_cycles << es_bytes[ch->desc->es]; + + dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size); + + return size; +} + +static enum dma_status moxart_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct moxart_chan *ch = to_moxart_dma_chan(chan); + struct virt_dma_desc *vd; + struct moxart_desc *d; + enum dma_status ret; + unsigned long flags; + + /* + * dma_cookie_status() assigns initial residue value. + */ + ret = dma_cookie_status(chan, cookie, txstate); + + spin_lock_irqsave(&ch->vc.lock, flags); + vd = vchan_find_desc(&ch->vc, cookie); + if (vd) { + d = to_moxart_dma_desc(&vd->tx); + txstate->residue = moxart_dma_desc_size(d, 0); + } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) { + txstate->residue = moxart_dma_desc_size_in_flight(ch); + } + spin_unlock_irqrestore(&ch->vc.lock, flags); + + if (ch->error) + return DMA_ERROR; + + return ret; +} + +static void moxart_dma_init(struct dma_device *dma, struct device *dev) +{ + dma->device_prep_slave_sg = moxart_prep_slave_sg; + dma->device_alloc_chan_resources = moxart_alloc_chan_resources; + dma->device_free_chan_resources = moxart_free_chan_resources; + dma->device_issue_pending = moxart_issue_pending; + dma->device_tx_status = moxart_tx_status; + dma->device_control = moxart_control; + dma->dev = dev; + + INIT_LIST_HEAD(&dma->channels); +} + +static irqreturn_t moxart_dma_interrupt(int irq, void *devid) +{ + struct moxart_dmadev *mc = devid; + struct moxart_chan *ch = &mc->slave_chans[0]; + unsigned int i; + unsigned long flags; + u32 ctrl; + + dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__); + + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { + if (!ch->allocated) + continue; + + ctrl = readl(ch->base + REG_OFF_CTRL); + + dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n", + __func__, ch, ch->base, ctrl); + + if (ctrl & APB_DMA_FIN_INT_STS) { + ctrl &= ~APB_DMA_FIN_INT_STS; + if (ch->desc) { + spin_lock_irqsave(&ch->vc.lock, flags); + if (++ch->sgidx < ch->desc->sglen) { + moxart_dma_start_sg(ch, ch->sgidx); + } else { + vchan_cookie_complete(&ch->desc->vd); + moxart_dma_start_desc(&ch->vc.chan); + } + spin_unlock_irqrestore(&ch->vc.lock, flags); + } + } + + if (ctrl & APB_DMA_ERR_INT_STS) { + ctrl &= ~APB_DMA_ERR_INT_STS; + ch->error = 1; + } + + writel(ctrl, ch->base + REG_OFF_CTRL); + } + + return IRQ_HANDLED; +} + +static int moxart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + static void __iomem *dma_base_addr; + int ret, i; + unsigned int irq; + struct moxart_chan *ch; + struct moxart_dmadev *mdc; + + mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); + if (!mdc) { + dev_err(dev, "can't allocate DMA container\n"); + return -ENOMEM; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + dev_err(dev, "no IRQ resource\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dma_base_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(dma_base_addr)) + return PTR_ERR(dma_base_addr); + + dma_cap_zero(mdc->dma_slave.cap_mask); + dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask); + dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask); + + moxart_dma_init(&mdc->dma_slave, dev); + + ch = &mdc->slave_chans[0]; + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { + ch->ch_num = i; + ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE; + ch->allocated = 0; + + ch->vc.desc_free = moxart_dma_desc_free; + vchan_init(&ch->vc, &mdc->dma_slave); + + dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n", + __func__, i, ch->ch_num, ch->base); + } + + platform_set_drvdata(pdev, mdc); + + ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0, + "moxart-dma-engine", mdc); + if (ret) { + dev_err(dev, "devm_request_irq failed\n"); + return ret; + } + + ret = dma_async_device_register(&mdc->dma_slave); + if (ret) { + dev_err(dev, "dma_async_device_register failed\n"); + return ret; + } + + ret = of_dma_controller_register(node, moxart_of_xlate, mdc); + if (ret) { + dev_err(dev, "of_dma_controller_register failed\n"); + dma_async_device_unregister(&mdc->dma_slave); + return ret; + } + + dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq); + + return 0; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct moxart_dmadev *m = platform_get_drvdata(pdev); + + dma_async_device_unregister(&m->dma_slave); + + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + + return 0; +} + +static const struct of_device_id moxart_dma_match[] = { + { .compatible = "moxa,moxart-dma" }, + { } +}; + +static struct platform_driver moxart_driver = { + .probe = moxart_probe, + .remove = moxart_remove, + .driver = { + .name = "moxart-dma-engine", + .owner = THIS_MODULE, + .of_match_table = moxart_dma_match, + }, +}; + +static int moxart_init(void) +{ + return platform_driver_register(&moxart_driver); +} +subsys_initcall(moxart_init); + +static void __exit moxart_exit(void) +{ + platform_driver_unregister(&moxart_driver); +} +module_exit(moxart_exit); + +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); +MODULE_DESCRIPTION("MOXART DMA engine driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7807f0ef4e20..766b68ed505c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) hw_desc->desc_command = (1 << 31); } -static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_dest_addr; -} - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -503,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) if (!mv_can_chain(grp_start)) goto submit_done; - dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", - old_chain_tail->async_tx.phys); + dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", + &old_chain_tail->async_tx.phys); /* fix up the hardware chain */ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); @@ -533,7 +527,8 @@ submit_done: /* returns the number of allocated descriptors */ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) { - char *hw_desc; + void *virt_desc; + dma_addr_t dma_desc; int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; @@ -548,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) " %d descriptor slots", idx); break; } - hw_desc = (char *) mv_chan->dma_desc_pool_virt; - slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + virt_desc = mv_chan->dma_desc_pool_virt; + slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); INIT_LIST_HEAD(&slot->tx_list); - hw_desc = (char *) mv_chan->dma_desc_pool; - slot->async_tx.phys = - (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; + dma_desc = mv_chan->dma_desc_pool; + slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; slot->idx = idx++; spin_lock_bh(&mv_chan->lock); @@ -588,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, int slot_cnt; dev_dbg(mv_chan_to_devp(mv_chan), - "%s dest: %x src %x len: %u flags: %ld\n", - __func__, dest, src, len, flags); + "%s dest: %pad src %pad len: %u flags: %ld\n", + __func__, &dest, &src, len, flags); if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; @@ -632,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); dev_dbg(mv_chan_to_devp(mv_chan), - "%s src_cnt: %d len: dest %x %u flags: %ld\n", - __func__, src_cnt, len, dest, flags); + "%s src_cnt: %d len: %u dest %pad flags: %ld\n", + __func__, src_cnt, len, &dest, flags); spin_lock_bh(&mv_chan->lock); slot_cnt = mv_chan_xor_slot_count(len, src_cnt); @@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) /* * Perform a transaction to verify the HW works. */ -#define MV_XOR_TEST_SIZE 2000 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) { @@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; int err = 0; - src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); + dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ - for (i = 0; i < MV_XOR_TEST_SIZE; i++) + for (i = 0; i < PAGE_SIZE; i++) ((u8 *) src)[i] = (u8)i; dma_chan = &mv_chan->dmachan; @@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) goto out; } - dest_dma = dma_map_single(dma_chan->device->dev, dest, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } + + src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, + PAGE_SIZE, DMA_TO_DEVICE); + unmap->to_cnt = 1; + unmap->addr[0] = src_dma; - src_dma = dma_map_single(dma_chan->device->dev, src, - MV_XOR_TEST_SIZE, DMA_TO_DEVICE); + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, + PAGE_SIZE, DMA_FROM_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dest_dma; + + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, - MV_XOR_TEST_SIZE, 0); + PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); async_tx_ack(tx); @@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, - MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); - if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { + PAGE_SIZE, DMA_FROM_DEVICE); + if (memcmp(src, dest, PAGE_SIZE)) { dev_err(dma_chan->device->dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; @@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: kfree(src); @@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; + struct dmaengine_unmap_data *unmap; struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; + int src_count = MV_XOR_NUM_SRC_TEST; - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { xor_srcs[src_idx] = alloc_page(GFP_KERNEL); if (!xor_srcs[src_idx]) { while (src_idx--) @@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } /* Fill in src buffers */ - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { + for (src_idx = 0; src_idx < src_count; src_idx++) { u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } - for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) + for (src_idx = 0; src_idx < src_count; src_idx++) cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | @@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) goto out; } + unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, + GFP_KERNEL); + if (!unmap) { + err = -ENOMEM; + goto free_resources; + } + /* test xor */ - dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, - DMA_FROM_DEVICE); + for (i = 0; i < src_count; i++) { + unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], + 0, PAGE_SIZE, DMA_TO_DEVICE); + dma_srcs[i] = unmap->addr[i]; + unmap->to_cnt++; + } - for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) - dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], - 0, PAGE_SIZE, DMA_TO_DEVICE); + unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + dest_dma = unmap->addr[src_count]; + unmap->from_cnt = 1; + unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, - MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); + src_count, PAGE_SIZE, 0); cookie = mv_xor_tx_submit(tx); mv_xor_issue_pending(dma_chan); @@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) } free_resources: + dmaengine_unmap_put(unmap); mv_xor_free_chan_resources(dma_chan); out: - src_idx = MV_XOR_NUM_SRC_TEST; + src_idx = src_count; while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); @@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) int i = 0; for_each_child_of_node(pdev->dev.of_node, np) { + struct mv_xor_chan *chan; dma_cap_mask_t cap_mask; int irq; @@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); - xordev->channels[i] = NULL; + chan = mv_xor_channel_add(xordev, pdev, i, + cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); irq_dispose_mapping(irq); goto err_channel_add; } + xordev->channels[i] = chan; i++; } } else if (pdata && pdata->channels) { for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { struct mv_xor_channel_data *cd; + struct mv_xor_chan *chan; int irq; cd = &pdata->channels[i]; @@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) goto err_channel_add; } - xordev->channels[i] = - mv_xor_channel_add(xordev, pdev, i, - cd->cap_mask, irq); - if (IS_ERR(xordev->channels[i])) { - ret = PTR_ERR(xordev->channels[i]); + chan = mv_xor_channel_add(xordev, pdev, i, + cd->cap_mask, irq); + if (IS_ERR(chan)) { + ret = PTR_ERR(chan); goto err_channel_add; } + + xordev->channels[i] = chan; } } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 0b88dd3d05f4..e8fe9dc455f4 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -143,7 +143,7 @@ static int of_dma_match_channel(struct device_node *np, const char *name, * @np: device node to get DMA request from * @name: name of desired channel * - * Returns pointer to appropriate dma channel on success or NULL on error. + * Returns pointer to appropriate DMA channel on success or an error pointer. */ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, const char *name) @@ -152,17 +152,18 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, struct of_dma *ofdma; struct dma_chan *chan; int count, i; + int ret_no_channel = -ENODEV; if (!np || !name) { pr_err("%s: not enough information provided\n", __func__); - return NULL; + return ERR_PTR(-ENODEV); } count = of_property_count_strings(np, "dma-names"); if (count < 0) { pr_err("%s: dma-names property of node '%s' missing or empty\n", __func__, np->full_name); - return NULL; + return ERR_PTR(-ENODEV); } for (i = 0; i < count; i++) { @@ -172,10 +173,12 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, mutex_lock(&of_dma_lock); ofdma = of_dma_find_controller(&dma_spec); - if (ofdma) + if (ofdma) { chan = ofdma->of_dma_xlate(&dma_spec, ofdma); - else + } else { + ret_no_channel = -EPROBE_DEFER; chan = NULL; + } mutex_unlock(&of_dma_lock); @@ -185,7 +188,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, return chan; } - return NULL; + return ERR_PTR(ret_no_channel); } /** diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 2f66cf4e54fe..362e7c49f2e1 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_chan *c = to_omap_dma_chan(chan); - dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); return omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback, c, &c->dma_ch); @@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); + dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); } static size_t omap_dma_sg_size(struct omap_sg *sg) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cdf0483b8f2d..73fa9b7a10ab 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -543,7 +543,9 @@ struct dma_pl330_chan { /* DMA-Engine Channel */ struct dma_chan chan; - /* List of to be xfered descriptors */ + /* List of submitted descriptors */ + struct list_head submitted_list; + /* List of issued descriptors */ struct list_head work_list; /* List of completed descriptors */ struct list_head completed_list; @@ -578,12 +580,16 @@ struct dma_pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; + /* Holds info about sg limitations */ + struct device_dma_parameters dma_parms; + /* Pool of descriptors available for the DMAC's channels */ struct list_head desc_pool; /* To protect desc_pool manipulation */ spinlock_t pool_lock; /* Peripheral channels connected to this DMAC */ + unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ }; @@ -606,11 +612,6 @@ struct dma_pl330_desc { struct dma_pl330_chan *pchan; }; -struct dma_pl330_filter_args { - struct dma_pl330_dmac *pdmac; - unsigned int chan_id; -}; - static inline void _callback(struct pl330_req *r, enum pl330_op_err err) { if (r && r->xfer_cb) @@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err) tasklet_schedule(&pch->task); } -static bool pl330_dt_filter(struct dma_chan *chan, void *param) -{ - struct dma_pl330_filter_args *fargs = param; - - if (chan->device != &fargs->pdmac->ddma) - return false; - - return (chan->chan_id == fargs->chan_id); -} - bool pl330_filter(struct dma_chan *chan, void *param) { u8 *peri_id; @@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, { int count = dma_spec->args_count; struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; - struct dma_pl330_filter_args fargs; - dma_cap_mask_t cap; - - if (!pdmac) - return NULL; + unsigned int chan_id; if (count != 1) return NULL; - fargs.pdmac = pdmac; - fargs.chan_id = dma_spec->args[0]; - - dma_cap_zero(cap); - dma_cap_set(DMA_SLAVE, cap); - dma_cap_set(DMA_CYCLIC, cap); + chan_id = dma_spec->args[0]; + if (chan_id >= pdmac->num_peripherals) + return NULL; - return dma_request_channel(cap, pl330_dt_filter, &fargs); + return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); } static int pl330_alloc_chan_resources(struct dma_chan *chan) @@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); /* Mark all desc done */ + list_for_each_entry(desc, &pch->submitted_list, node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); + } + list_for_each_entry(desc, &pch->work_list , node) { desc->status = FREE; dma_cookie_complete(&desc->txd); @@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned dma_cookie_complete(&desc->txd); } + list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); @@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, static void pl330_issue_pending(struct dma_chan *chan) { - pl330_tasklet((unsigned long) to_pchan(chan)); + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + list_splice_tail_init(&pch->submitted_list, &pch->work_list); + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet((unsigned long)pch); } /* @@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) dma_cookie_assign(&desc->txd); - list_move_tail(&desc->node, &pch->work_list); + list_move_tail(&desc->node, &pch->submitted_list); } cookie = dma_cookie_assign(&last->txd); - list_add_tail(&last->node, &pch->work_list); + list_add_tail(&last->node, &pch->submitted_list); spin_unlock_irqrestore(&pch->lock, flags); return cookie; @@ -2492,12 +2489,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->pchan = NULL; desc->req.x = &desc->px; desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.privileged = 0; - desc->rqcfg.insnaccess = 0; desc->rqcfg.scctl = SCCTRL0; desc->rqcfg.dcctl = DCCTRL0; desc->req.cfg = &desc->rqcfg; @@ -2517,7 +2511,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) if (!pdmac) return 0; - desc = kmalloc(count * sizeof(*desc), flg); + desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; @@ -2887,6 +2881,7 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan, caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = false; caps->cmd_terminate = true; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; return 0; } @@ -2962,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) else num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + pdmac->num_peripherals = num_chan; + pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); if (!pdmac->peripherals) { ret = -ENOMEM; @@ -2976,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) else pch->chan.private = adev->dev.of_node; + INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); spin_lock_init(&pch->lock); @@ -3023,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) "unable to register DMA to the generic DT DMA helpers\n"); } } + + adev->dev.dma_parms = &pdmac->dma_parms; + /* * This is the limit for transfers with a buswidth of 1, larger * buswidths will have larger limits. diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 8da48c6b2a38..ce7a8d7564ba 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, } /** - * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation - */ -static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, - int value, unsigned long flags) -{ - struct dma_cdb *hw_desc = desc->hw_desc; - - memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); - desc->hw_next = NULL; - desc->src_cnt = 1; - desc->dst_cnt = 1; - - if (flags & DMA_PREP_INTERRUPT) - set_bit(PPC440SPE_DESC_INT, &desc->flags); - else - clear_bit(PPC440SPE_DESC_INT, &desc->flags); - - hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); - hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); - hw_desc->opc = DMA_CDB_OPC_DFILL128; -} - -/** * ppc440spe_desc_set_src_addr - set source address into the descriptor */ static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, @@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( struct ppc440spe_adma_chan *chan, dma_cookie_t cookie) { - int i; - BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; @@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ppc440spe_adma_prep_dma_interrupt; } pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " - "( %s%s%s%s%s%s%s)\n", + "( %s%s%s%s%s%s)\n", dev_name(adev->dev), dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", @@ -4139,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev) regs = ioremap(res.start, resource_size(&res)); if (!regs) { dev_err(&ofdev->dev, "failed to ioremap regs!\n"); + ret = -ENOMEM; goto err_regs_alloc; } diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 6aec3ad814d3..d4d3a3109b16 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) } EXPORT_SYMBOL(sirfsoc_dma_filter_id); +#define SIRFSOC_DMA_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = true; + caps->cmd_terminate = true; + + return 0; +} + static int sirfsoc_dma_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; @@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma->device_tx_status = sirfsoc_dma_tx_status; dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; + dma->device_slave_caps = sirfsoc_dma_device_slave_caps; INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_SLAVE, dma->cap_mask); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b8c031b7de4e..bf18c786ed40 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data) struct d40_chan *d40c = (struct d40_chan *) data; struct d40_desc *d40d; unsigned long flags; + bool callback_active; dma_async_tx_callback callback; void *callback_param; @@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data) } /* Callback to client */ + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; @@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data) spin_unlock_irqrestore(&d40c->lock, flags); - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) + if (callback_active && callback) callback(callback_param); return; @@ -2409,6 +2411,7 @@ static void d40_set_prio_realtime(struct d40_chan *d40c) #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) +#define D40_DT_FLAGS_HIGH_PRIO(flags) ((flags >> 4) & 0x1) static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) @@ -2446,6 +2449,9 @@ static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, cfg.use_fixed_channel = true; } + if (D40_DT_FLAGS_HIGH_PRIO(flags)) + cfg.high_priority = true; + return dma_request_channel(cap, stedma40_filter, &cfg); } diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 73654e33f13b..03ad64ecaaf0 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1,7 +1,7 @@ /* * DMA driver for Nvidia's Tegra20 APB DMA controller. * - * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,11 +29,12 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> -#include <linux/clk/tegra.h> #include "dmaengine.h" @@ -99,6 +100,11 @@ #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) +/* Tegra148 specific registers */ +#define TEGRA_APBDMA_CHAN_WCOUNT 0x20 + +#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 + /* * If any burst is in flight and DMA paused then this is the time to complete * on-flight burst and update DMA status register. @@ -108,21 +114,22 @@ /* Channel base address offset from APBDMA base address */ #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 -/* DMA channel register space size */ -#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 - struct tegra_dma; /* * tegra_dma_chip_data Tegra chip specific DMA data * @nr_channels: Number of channels available in the controller. + * @channel_reg_size: Channel register size/stride. * @max_dma_count: Maximum DMA transfer count supported by DMA controller. * @support_channel_pause: Support channel wise pause of dma. + * @support_separate_wcount_reg: Support separate word count register. */ struct tegra_dma_chip_data { int nr_channels; + int channel_reg_size; int max_dma_count; bool support_channel_pause; + bool support_separate_wcount_reg; }; /* DMA channel registers */ @@ -132,6 +139,7 @@ struct tegra_dma_channel_regs { unsigned long apb_ptr; unsigned long ahb_seq; unsigned long apb_seq; + unsigned long wcount; }; /* @@ -199,6 +207,7 @@ struct tegra_dma_channel { void *callback_param; /* Channel-slave specific configuration */ + unsigned int slave_id; struct dma_slave_config dma_sconfig; struct tegra_dma_channel_regs channel_reg; }; @@ -208,6 +217,7 @@ struct tegra_dma { struct dma_device dma_dev; struct device *dev; struct clk *dma_clk; + struct reset_control *rst; spinlock_t global_lock; void __iomem *base_addr; const struct tegra_dma_chip_data *chip_data; @@ -339,6 +349,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc, } memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); + if (!tdc->slave_id) + tdc->slave_id = sconfig->slave_id; tdc->config_init = true; return 0; } @@ -421,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc, tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount); /* Start DMA */ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, @@ -460,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, /* Safe to program new configuration */ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); + if (tdc->tdma->chip_data->support_separate_wcount_reg) + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, + nsg_req->ch_regs.wcount); tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); nsg_req->configured = true; @@ -713,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) struct tegra_dma_desc *dma_desc; unsigned long flags; unsigned long status; + unsigned long wcount; bool was_busy; spin_lock_irqsave(&tdc->lock, flags); @@ -733,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) tdc->isr_handler(tdc, true); status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); } + if (tdc->tdma->chip_data->support_separate_wcount_reg) + wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); + else + wcount = status; was_busy = tdc->busy; tegra_dma_stop(tdc); @@ -741,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); sgreq->dma_desc->bytes_transferred += - get_current_xferred_count(tdc, sgreq, status); + get_current_xferred_count(tdc, sgreq, wcount); } tegra_dma_resume(tdc); @@ -903,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, return -EINVAL; } +static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, + struct tegra_dma_channel_regs *ch_regs, u32 len) +{ + u32 len_field = (len - 4) & 0xFFFC; + + if (tdc->tdma->chip_data->support_separate_wcount_reg) + ch_regs->wcount = len_field; + else + ch_regs->csr |= len_field; +} + static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, @@ -941,7 +974,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; - csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; if (flags & DMA_PREP_INTERRUPT) csr |= TEGRA_APBDMA_CSR_IE_EOC; @@ -986,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; @@ -1085,7 +1119,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( csr |= TEGRA_APBDMA_CSR_FLOW; if (flags & DMA_PREP_INTERRUPT) csr |= TEGRA_APBDMA_CSR_IE_EOC; - csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1115,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); sg_req->ch_regs.apb_ptr = apb_ptr; sg_req->ch_regs.ahb_ptr = mem; - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); + sg_req->ch_regs.csr = csr; + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); sg_req->ch_regs.apb_seq = apb_seq; sg_req->ch_regs.ahb_seq = ahb_seq; sg_req->configured = false; @@ -1205,32 +1240,69 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) kfree(sg_req); } clk_disable_unprepare(tdma->dma_clk); + + tdc->slave_id = 0; +} + +static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct tegra_dma *tdma = ofdma->of_dma_data; + struct dma_chan *chan; + struct tegra_dma_channel *tdc; + + chan = dma_get_any_slave_channel(&tdma->dma_dev); + if (!chan) + return NULL; + + tdc = to_tegra_dma_chan(chan); + tdc->slave_id = dma_spec->args[0]; + + return chan; } /* Tegra20 specific DMA controller information */ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .nr_channels = 16, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra30 specific DMA controller information */ static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, .max_dma_count = 1024UL * 64, .support_channel_pause = false, + .support_separate_wcount_reg = false, }; /* Tegra114 specific DMA controller information */ static const struct tegra_dma_chip_data tegra114_dma_chip_data = { .nr_channels = 32, + .channel_reg_size = 0x20, + .max_dma_count = 1024UL * 64, + .support_channel_pause = true, + .support_separate_wcount_reg = false, +}; + +/* Tegra148 specific DMA controller information */ +static const struct tegra_dma_chip_data tegra148_dma_chip_data = { + .nr_channels = 32, + .channel_reg_size = 0x40, .max_dma_count = 1024UL * 64, .support_channel_pause = true, + .support_separate_wcount_reg = true, }; static const struct of_device_id tegra_dma_of_match[] = { { + .compatible = "nvidia,tegra148-apbdma", + .data = &tegra148_dma_chip_data, + }, { .compatible = "nvidia,tegra114-apbdma", .data = &tegra114_dma_chip_data, }, { @@ -1282,6 +1354,12 @@ static int tegra_dma_probe(struct platform_device *pdev) return PTR_ERR(tdma->dma_clk); } + tdma->rst = devm_reset_control_get(&pdev->dev, "dma"); + if (IS_ERR(tdma->rst)) { + dev_err(&pdev->dev, "Error: Missing reset\n"); + return PTR_ERR(tdma->rst); + } + spin_lock_init(&tdma->global_lock); pm_runtime_enable(&pdev->dev); @@ -1302,9 +1380,9 @@ static int tegra_dma_probe(struct platform_device *pdev) } /* Reset DMA controller */ - tegra_periph_reset_assert(tdma->dma_clk); + reset_control_assert(tdma->rst); udelay(2); - tegra_periph_reset_deassert(tdma->dma_clk); + reset_control_deassert(tdma->rst); /* Enable global DMA registers */ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); @@ -1318,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev) struct tegra_dma_channel *tdc = &tdma->channels[i]; tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + - i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; + i * cdata->channel_reg_size; res = platform_get_resource(pdev, IORESOURCE_IRQ, i); if (!res) { @@ -1376,10 +1454,20 @@ static int tegra_dma_probe(struct platform_device *pdev) goto err_irq; } + ret = of_dma_controller_register(pdev->dev.of_node, + tegra_dma_of_xlate, tdma); + if (ret < 0) { + dev_err(&pdev->dev, + "Tegra20 APB DMA OF registration failed %d\n", ret); + goto err_unregister_dma_dev; + } + dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", cdata->nr_channels); return 0; +err_unregister_dma_dev: + dma_async_device_unregister(&tdma->dma_dev); err_irq: while (--i >= 0) { struct tegra_dma_channel *tdc = &tdma->channels[i]; diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index bae6c29f5502..17686caf64d5 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; - struct txx9dmac_slave *ds = dc->chan.private; dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 85c19d63f9fb..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) static inline void vchan_cookie_complete(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + dma_cookie_t cookie; + cookie = vd->tx.cookie; dma_cookie_complete(&vd->tx); dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", - vd, vd->tx.cookie); + vd, cookie); list_add_tail(&vd->node, &vc->desc_completed); tasklet_schedule(&vc->task); |