diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-31 01:46:31 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-31 01:46:31 +0200 |
commit | db06816cb9ceb21a2bf24122407b7b4fe419c3fc (patch) | |
tree | 25a96cbc392edfe35ea6dcfaa4b0b2a7601d779d | |
parent | Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jac... (diff) | |
parent | dmaengine: at_hdmac: add DMA slave transfers (diff) | |
download | linux-db06816cb9ceb21a2bf24122407b7b4fe419c3fc.tar.xz linux-db06816cb9ceb21a2bf24122407b7b4fe419c3fc.zip |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
dmaengine: at_hdmac: add DMA slave transfers
dmaengine: at_hdmac: new driver for the Atmel AHB DMA Controller
dmaengine: dmatest: correct thread_count while using multiple thread per channel
dmaengine: dmatest: add a maximum number of test iterations
drivers/dma: Remove unnecessary semicolons
drivers/dma/fsldma.c: Remove unnecessary semicolons
dmaengine: move HIGHMEM64G restriction to ASYNC_TX_DMA
fsldma: do not clear bandwidth control bits on the 83xx controller
fsldma: enable external start for the 83xx controller
fsldma: use PCI Read Multiple command
-rw-r--r-- | arch/arm/mach-at91/include/mach/at_hdmac.h | 102 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 2 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 1213 | ||||
-rw-r--r-- | drivers/dma/at_hdmac_regs.h | 353 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 21 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 17 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 1 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 2 |
10 files changed, 1711 insertions, 13 deletions
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h new file mode 100644 index 000000000000..187cb58345c0 --- /dev/null +++ b/arch/arm/mach-at91/include/mach/at_hdmac.h @@ -0,0 +1,102 @@ +/* + * Header file for the Atmel AHB DMA Controller driver + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT_HDMAC_H +#define AT_HDMAC_H + +#include <linux/dmaengine.h> + +/** + * struct at_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @cap_mask: dma_capability flags supported by the platform + */ +struct at_dma_platform_data { + unsigned int nr_channels; + dma_cap_mask_t cap_mask; +}; + +/** + * enum at_dma_slave_width - DMA slave register access width. + * @AT_DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @AT_DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @AT_DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum at_dma_slave_width { + AT_DMA_SLAVE_WIDTH_8BIT = 0, + AT_DMA_SLAVE_WIDTH_16BIT, + AT_DMA_SLAVE_WIDTH_32BIT, +}; + +/** + * struct at_dma_slave - Controller-specific information about a slave + * @dma_dev: required DMA master device + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + * @cfg: Platform-specific initializer for the CFG register + * @ctrla: Platform-specific initializer for the CTRLA register + */ +struct at_dma_slave { + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum at_dma_slave_width reg_width; + u32 cfg; + u32 ctrla; +}; + + +/* Platform-configurable bits in CFG */ +#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ +#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ +#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ +#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ +#define ATC_SRC_H2SEL_SW (0x0 << 9) +#define ATC_SRC_H2SEL_HW (0x1 << 9) +#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ +#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ +#define ATC_DST_H2SEL_SW (0x0 << 13) +#define ATC_DST_H2SEL_HW (0x1 << 13) +#define ATC_SOD (0x1 << 16) /* Stop On Done */ +#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ +#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ +#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */ +#define ATC_LOCK_IF_L_CHUNK (0x0 << 22) +#define ATC_LOCK_IF_L_BUFFER (0x1 << 22) +#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */ +#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */ +#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28) +#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) +#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) + +/* Platform-configurable bits in CTRLA */ +#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */ +#define ATC_SCSIZE_1 (0x0 << 16) +#define ATC_SCSIZE_4 (0x1 << 16) +#define ATC_SCSIZE_8 (0x2 << 16) +#define ATC_SCSIZE_16 (0x3 << 16) +#define ATC_SCSIZE_32 (0x4 << 16) +#define ATC_SCSIZE_64 (0x5 << 16) +#define ATC_SCSIZE_128 (0x6 << 16) +#define ATC_SCSIZE_256 (0x7 << 16) +#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */ +#define ATC_DCSIZE_1 (0x0 << 20) +#define ATC_DCSIZE_4 (0x1 << 20) +#define ATC_DCSIZE_8 (0x2 << 20) +#define ATC_DCSIZE_16 (0x3 << 20) +#define ATC_DCSIZE_32 (0x4 << 20) +#define ATC_DCSIZE_64 (0x5 << 20) +#define ATC_DCSIZE_128 (0x6 << 20) +#define ATC_DCSIZE_256 (0x7 << 20) + +#endif /* AT_HDMAC_H */ diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 95fe2c8d6c51..90dd3f8bd283 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -300,7 +300,7 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum); static int __init async_xor_init(void) { - #ifdef CONFIG_DMA_ENGINE + #ifdef CONFIG_ASYNC_TX_DMA /* To conserve stack space the input src_list (array of page pointers) * is reused to hold the array of dma addresses passed to the driver. * This conversion is only possible when dma_addr_t is less than the diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 070357aaedbc..81e1020fb514 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,7 +4,7 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on !HIGHMEM64G && HAS_DMA + depends on HAS_DMA help DMA engines can do asynchronous data transfers without involving the host CPU. Currently, this framework can be @@ -46,6 +46,14 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Atmel AT32ap7000. +config AT_HDMAC + tristate "Atmel AHB DMA support" + depends on ARCH_AT91SAM9RL + select DMA_ENGINE + help + Support the Atmel AHB DMA controller. This can be integrated in + chips such as the Atmel AT91SAM9RL. + config FSL_DMA tristate "Freescale Elo and Elo Plus DMA support" depends on FSL_SOC @@ -108,7 +116,7 @@ config NET_DMA config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE + depends on DMA_ENGINE && !HIGHMEM64G help This allows the async_tx api to take advantage of offload engines for memcpy, memset, xor, and raid6 p+q operations. If your platform has diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a0b6564800c4..40e1e0083571 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o +obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c new file mode 100644 index 000000000000..9a1e5fb412ed --- /dev/null +++ b/drivers/dma/at_hdmac.c @@ -0,0 +1,1213 @@ +/* + * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * This supports the Atmel AHB DMA Controller, + * + * The driver has currently been tested with the Atmel AT91SAM9RL + * and AT91SAM9G45 series. + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "at_hdmac_regs.h" + +/* + * Glossary + * -------- + * + * at_hdmac : Name of the ATmel AHB DMA Controller + * at_dma_ / atdma : ATmel DMA controller entity related + * atc_ / atchan : ATmel DMA Channel entity related + */ + +#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) +#define ATC_DEFAULT_CTRLA (0) +#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ + |ATC_DIF(1)) + +/* + * Initial number of descriptors to allocate for each channel. This could + * be increased during dma usage. + */ +static unsigned int init_nr_desc_per_channel = 64; +module_param(init_nr_desc_per_channel, uint, 0644); +MODULE_PARM_DESC(init_nr_desc_per_channel, + "initial descriptors per channel (default: 64)"); + + +/* prototypes */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); + + +/*----------------------------------------------------------------------*/ + +static struct at_desc *atc_first_active(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->active_list, + struct at_desc, desc_node); +} + +static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) +{ + return list_first_entry(&atchan->queue, + struct at_desc, desc_node); +} + +/** + * atc_alloc_descriptor - allocate and return an initilized descriptor + * @chan: the channel to allocate descriptors for + * @gfp_flags: GFP allocation flags + * + * Note: The ack-bit is positioned in the descriptor flag at creation time + * to make initial allocation more convenient. This bit will be cleared + * and control will be given to client at usage time (during + * preparation functions). + */ +static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, + gfp_t gfp_flags) +{ + struct at_desc *desc = NULL; + struct at_dma *atdma = to_at_dma(chan->device); + dma_addr_t phys; + + desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); + if (desc) { + memset(desc, 0, sizeof(struct at_desc)); + dma_async_tx_descriptor_init(&desc->txd, chan); + /* txd.flags will be overwritten in prep functions */ + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.tx_submit = atc_tx_submit; + desc->txd.phys = phys; + } + + return desc; +} + +/** + * atc_desc_get - get a unsused descriptor from free_list + * @atchan: channel we want a new descriptor for + */ +static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *ret = NULL; + unsigned int i = 0; + LIST_HEAD(tmp_list); + + spin_lock_bh(&atchan->lock); + list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { + i++; + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&atchan->chan_common), + "desc %p not ACKed\n", desc); + } + spin_unlock_bh(&atchan->lock); + dev_vdbg(chan2dev(&atchan->chan_common), + "scanned %u descriptors on freelist\n", i); + + /* no more descriptor available in initial pool: create one more */ + if (!ret) { + ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); + if (ret) { + spin_lock_bh(&atchan->lock); + atchan->descs_allocated++; + spin_unlock_bh(&atchan->lock); + } else { + dev_err(chan2dev(&atchan->chan_common), + "not enough descriptors available\n"); + } + } + + return ret; +} + +/** + * atc_desc_put - move a descriptor, including any children, to the free list + * @atchan: channel we work on + * @desc: descriptor, at the head of a chain, to move to free list + */ +static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) +{ + if (desc) { + struct at_desc *child; + + spin_lock_bh(&atchan->lock); + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dev_vdbg(chan2dev(&atchan->chan_common), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->txd.tx_list, &atchan->free_list); + dev_vdbg(chan2dev(&atchan->chan_common), + "moving desc %p to freelist\n", desc); + list_add(&desc->desc_node, &atchan->free_list); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_assign_cookie - compute and assign new cookie + * @atchan: channel we work on + * @desc: descriptor to asign cookie for + * + * Called with atchan->lock held and bh disabled + */ +static dma_cookie_t +atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_cookie_t cookie = atchan->chan_common.cookie; + + if (++cookie < 0) + cookie = 1; + + atchan->chan_common.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +/** + * atc_dostart - starts the DMA engine for real + * @atchan: the channel we want to start + * @first: first descriptor in the list we want to begin with + * + * Called with atchan->lock held and bh disabled + */ +static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: Attempted to start non-idle channel\n"); + dev_err(chan2dev(&atchan->chan_common), + " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, DSCR)); + + /* The tasklet will hopefully advance the queue... */ + return; + } + + vdbg_dump_regs(atchan); + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + channel_writel(atchan, SADDR, 0); + channel_writel(atchan, DADDR, 0); + channel_writel(atchan, CTRLA, 0); + channel_writel(atchan, CTRLB, 0); + channel_writel(atchan, DSCR, first->txd.phys); + dma_writel(atdma, CHER, atchan->mask); + + vdbg_dump_regs(atchan); +} + +/** + * atc_chain_complete - finish work for one transaction chain + * @atchan: channel we work on + * @desc: descriptor at the head of the chain we want do complete + * + * Called with atchan->lock held and bh disabled */ +static void +atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) +{ + dma_async_tx_callback callback; + void *param; + struct dma_async_tx_descriptor *txd = &desc->txd; + + dev_vdbg(chan2dev(&atchan->chan_common), + "descriptor %u complete\n", txd->cookie); + + atchan->completed_cookie = txd->cookie; + callback = txd->callback; + param = txd->callback_param; + + /* move children to free_list */ + list_splice_init(&txd->tx_list, &atchan->free_list); + /* move myself to free_list */ + list_move(&desc->desc_node, &atchan->free_list); + + /* unmap dma addresses */ + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(chan2parent(&atchan->chan_common), + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + else + dma_unmap_page(chan2parent(&atchan->chan_common), + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(chan2parent(&atchan->chan_common), + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + else + dma_unmap_page(chan2parent(&atchan->chan_common), + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + } + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); + + dma_run_dependencies(txd); +} + +/** + * atc_complete_all - finish work for all transactions + * @atchan: channel to complete transactions for + * + * Eventually submit queued descriptors if any + * + * Assume channel is idle while calling this function + * Called with atchan->lock held and bh disabled + */ +static void atc_complete_all(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); + + BUG_ON(atc_chan_is_enabled(atchan)); + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + if (!list_empty(&atchan->queue)) + atc_dostart(atchan, atc_first_queued(atchan)); + /* empty active_list now it is completed */ + list_splice_init(&atchan->active_list, &list); + /* empty queue list by moving descriptors (if any) to active_list */ + list_splice_init(&atchan->queue, &atchan->active_list); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_cleanup_descriptors - cleanup up finished descriptors in active_list + * @atchan: channel to be cleaned up + * + * Called with atchan->lock held and bh disabled + */ +static void atc_cleanup_descriptors(struct at_dma_chan *atchan) +{ + struct at_desc *desc, *_desc; + struct at_desc *child; + + dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); + + list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { + if (!(desc->lli.ctrla & ATC_DONE)) + /* This one is currently in progress */ + return; + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + if (!(child->lli.ctrla & ATC_DONE)) + /* Currently in progress */ + return; + + /* + * No descriptors so far seem to be in progress, i.e. + * this chain must be done. + */ + atc_chain_complete(atchan, desc); + } +} + +/** + * atc_advance_work - at the end of a transaction, move forward + * @atchan: channel where the transaction ended + * + * Called with atchan->lock held and bh disabled + */ +static void atc_advance_work(struct at_dma_chan *atchan) +{ + dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); + + if (list_empty(&atchan->active_list) || + list_is_singular(&atchan->active_list)) { + atc_complete_all(atchan); + } else { + atc_chain_complete(atchan, atc_first_active(atchan)); + /* advance work */ + atc_dostart(atchan, atc_first_active(atchan)); + } +} + + +/** + * atc_handle_error - handle errors reported by DMA controller + * @atchan: channel where error occurs + * + * Called with atchan->lock held and bh disabled + */ +static void atc_handle_error(struct at_dma_chan *atchan) +{ + struct at_desc *bad_desc; + struct at_desc *child; + + /* + * The descriptor currently at the head of the active list is + * broked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + bad_desc = atc_first_active(atchan); + list_del_init(&bad_desc->desc_node); + + /* As we are stopped, take advantage to push queued descriptors + * in active_list */ + list_splice_init(&atchan->queue, atchan->active_list.prev); + + /* Try to restart the controller */ + if (!list_empty(&atchan->active_list)) + atc_dostart(atchan, atc_first_active(atchan)); + + /* + * KERN_CRITICAL may seem harsh, but since this only happens + * when someone submits a bad physical address in a + * descriptor, we should consider ourselves lucky that the + * controller flagged an error instead of scribbling over + * random memory locations. + */ + dev_crit(chan2dev(&atchan->chan_common), + "Bad descriptor submitted for DMA!\n"); + dev_crit(chan2dev(&atchan->chan_common), + " cookie: %d\n", bad_desc->txd.cookie); + atc_dump_lli(atchan, &bad_desc->lli); + list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + atc_dump_lli(atchan, &child->lli); + + /* Pretend the descriptor completed successfully */ + atc_chain_complete(atchan, bad_desc); +} + + +/*-- IRQ & Tasklet ---------------------------------------------------*/ + +static void atc_tasklet(unsigned long data) +{ + struct at_dma_chan *atchan = (struct at_dma_chan *)data; + + /* Channel cannot be enabled here */ + if (atc_chan_is_enabled(atchan)) { + dev_err(chan2dev(&atchan->chan_common), + "BUG: channel enabled in tasklet\n"); + return; + } + + spin_lock(&atchan->lock); + if (test_and_clear_bit(0, &atchan->error_status)) + atc_handle_error(atchan); + else + atc_advance_work(atchan); + + spin_unlock(&atchan->lock); +} + +static irqreturn_t at_dma_interrupt(int irq, void *dev_id) +{ + struct at_dma *atdma = (struct at_dma *)dev_id; + struct at_dma_chan *atchan; + int i; + u32 status, pending, imr; + int ret = IRQ_NONE; + + do { + imr = dma_readl(atdma, EBCIMR); + status = dma_readl(atdma, EBCISR); + pending = status & imr; + + if (!pending) + break; + + dev_vdbg(atdma->dma_common.dev, + "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", + status, imr, pending); + + for (i = 0; i < atdma->dma_common.chancnt; i++) { + atchan = &atdma->chan[i]; + if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { + if (pending & AT_DMA_ERR(i)) { + /* Disable channel on AHB error */ + dma_writel(atdma, CHDR, atchan->mask); + /* Give information to tasklet */ + set_bit(0, &atchan->error_status); + } + tasklet_schedule(&atchan->tasklet); + ret = IRQ_HANDLED; + } + } + + } while (pending); + + return ret; +} + + +/*-- DMA Engine API --------------------------------------------------*/ + +/** + * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine + * @desc: descriptor at the head of the transaction chain + * + * Queue chain if DMA engine is working already + * + * Cookie increment and adding to active_list or queue must be atomic + */ +static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct at_desc *desc = txd_to_at_desc(tx); + struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&atchan->lock); + cookie = atc_assign_cookie(atchan, desc); + + if (list_empty(&atchan->active_list)) { + dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", + desc->txd.cookie); + atc_dostart(atchan, desc); + list_add_tail(&desc->desc_node, &atchan->active_list); + } else { + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); + list_add_tail(&desc->desc_node, &atchan->queue); + } + + spin_unlock_bh(&atchan->lock); + + return cookie; +} + +/** + * atc_prep_dma_memcpy - prepare a memcpy operation + * @chan: the channel to prepare operation on + * @dest: operation virtual destination address + * @src: operation virtual source address + * @len: operation length + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc = NULL; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + size_t xfer_count; + size_t offset; + unsigned int src_width; + unsigned int dst_width; + u32 ctrla; + u32 ctrlb; + + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", + dest, src, len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + ctrla = ATC_DEFAULT_CTRLA; + ctrlb = ATC_DEFAULT_CTRLB + | ATC_SRC_ADDR_MODE_INCR + | ATC_DST_ADDR_MODE_INCR + | ATC_FC_MEM2MEM; + + /* + * We can be a lot more clever here, but this should take care + * of the most common optimization. + */ + if (!((src | dest | len) & 3)) { + ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; + src_width = dst_width = 2; + } else if (!((src | dest | len) & 1)) { + ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; + src_width = dst_width = 1; + } else { + ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; + src_width = dst_width = 0; + } + + for (offset = 0; offset < len; offset += xfer_count << src_width) { + xfer_count = min_t(size_t, (len - offset) >> src_width, + ATC_BTSIZE_MAX); + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + desc->lli.saddr = src + offset; + desc->lli.daddr = dest + offset; + desc->lli.ctrla = ctrla | xfer_count; + desc->lli.ctrlb = ctrlb; + + desc->txd.cookie = 0; + async_tx_ack(&desc->txd); + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + } + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = len; + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(desc); + + desc->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + atc_desc_put(atchan, first); + return NULL; +} + + +/** + * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: tx descriptor status flags + */ +static struct dma_async_tx_descriptor * +atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_slave *atslave = chan->private; + struct at_desc *first = NULL; + struct at_desc *prev = NULL; + u32 ctrla; + u32 ctrlb; + dma_addr_t reg; + unsigned int reg_width; + unsigned int mem_width; + unsigned int i; + struct scatterlist *sg; + size_t total_len = 0; + + dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", + direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + flags); + + if (unlikely(!atslave || !sg_len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + reg_width = atslave->reg_width; + + sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); + + ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; + ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; + + switch (direction) { + case DMA_TO_DEVICE: + ctrla |= ATC_DST_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_FIXED + | ATC_SRC_ADDR_MODE_INCR + | ATC_FC_MEM2PER; + reg = atslave->tx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = mem; + desc->lli.daddr = reg; + desc->lli.ctrla = ctrla + | ATC_SRC_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + case DMA_FROM_DEVICE: + ctrla |= ATC_SRC_WIDTH(reg_width); + ctrlb |= ATC_DST_ADDR_MODE_INCR + | ATC_SRC_ADDR_MODE_FIXED + | ATC_FC_PER2MEM; + + reg = atslave->rx_reg; + for_each_sg(sgl, sg, sg_len, i) { + struct at_desc *desc; + u32 len; + u32 mem; + + desc = atc_desc_get(atchan); + if (!desc) + goto err_desc_get; + + mem = sg_phys(sg); + len = sg_dma_len(sg); + mem_width = 2; + if (unlikely(mem & 3 || len & 3)) + mem_width = 0; + + desc->lli.saddr = reg; + desc->lli.daddr = mem; + desc->lli.ctrla = ctrla + | ATC_DST_WIDTH(mem_width) + | len >> mem_width; + desc->lli.ctrlb = ctrlb; + + if (!first) { + first = desc; + } else { + /* inform the HW lli about chaining */ + prev->lli.dscr = desc->txd.phys; + /* insert the link descriptor to the LD ring */ + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + total_len += len; + } + break; + default: + return NULL; + } + + /* set end-of-link to the last link descriptor of list*/ + set_desc_eol(prev); + + /* First descriptor of the chain embedds additional information */ + first->txd.cookie = -EBUSY; + first->len = total_len; + + /* last link descriptor of list is responsible of flags */ + prev->txd.flags = flags; /* client is in control of this ack */ + + return &first->txd; + +err_desc_get: + dev_err(chan2dev(chan), "not enough descriptors available\n"); + atc_desc_put(atchan, first); + return NULL; +} + +static void atc_terminate_all(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_bh(&atchan->lock); + + dma_writel(atdma, CHDR, atchan->mask); + + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); + + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + spin_unlock_bh(&atchan->lock); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); +} + +/** + * atc_is_tx_complete - poll for transaction completion + * @chan: DMA channel + * @cookie: transaction identifier to check status of + * @done: if not %NULL, updated with last completed transaction + * @used: if not %NULL, updated with last used transaction + * + * If @done and @used are passed in, upon return they reflect the driver + * internal state and can be used with dma_async_is_complete() to check + * the status of multiple cookies without re-checking hardware state. + */ +static enum dma_status +atc_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + enum dma_status ret; + + dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", + cookie, done ? *done : 0, used ? *used : 0); + + spin_lock_bh(atchan->lock); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + atc_cleanup_descriptors(atchan); + + last_complete = atchan->completed_cookie; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + spin_unlock_bh(atchan->lock); + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return ret; +} + +/** + * atc_issue_pending - try to finish work + * @chan: target DMA channel + */ +static void atc_issue_pending(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + dev_vdbg(chan2dev(chan), "issue_pending\n"); + + if (!atc_chan_is_enabled(atchan)) { + spin_lock_bh(&atchan->lock); + atc_advance_work(atchan); + spin_unlock_bh(&atchan->lock); + } +} + +/** + * atc_alloc_chan_resources - allocate resources for DMA channel + * @chan: allocate descriptor resources for this channel + * @client: current client requesting the channel be ready for requests + * + * return - the number of allocated descriptors + */ +static int atc_alloc_chan_resources(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc; + struct at_dma_slave *atslave; + int i; + u32 cfg; + LIST_HEAD(tmp_list); + + dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); + + /* ASSERT: channel is idle */ + if (atc_chan_is_enabled(atchan)) { + dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); + return -EIO; + } + + cfg = ATC_DEFAULT_CFG; + + atslave = chan->private; + if (atslave) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); + + /* if cfg configuration specified take it instad of default */ + if (atslave->cfg) + cfg = atslave->cfg; + } + + /* have we already been set up? + * reconfigure channel but no need to reallocate descriptors */ + if (!list_empty(&atchan->free_list)) + return atchan->descs_allocated; + + /* Allocate initial pool of descriptors */ + for (i = 0; i < init_nr_desc_per_channel; i++) { + desc = atc_alloc_descriptor(chan, GFP_KERNEL); + if (!desc) { + dev_err(atdma->dma_common.dev, + "Only %d initial descriptors\n", i); + break; + } + list_add_tail(&desc->desc_node, &tmp_list); + } + + spin_lock_bh(&atchan->lock); + atchan->descs_allocated = i; + list_splice(&tmp_list, &atchan->free_list); + atchan->completed_cookie = chan->cookie = 1; + spin_unlock_bh(&atchan->lock); + + /* channel parameters */ + channel_writel(atchan, CFG, cfg); + + dev_dbg(chan2dev(chan), + "alloc_chan_resources: allocated %d descriptors\n", + atchan->descs_allocated); + + return atchan->descs_allocated; +} + +/** + * atc_free_chan_resources - free all channel resources + * @chan: DMA channel + */ +static void atc_free_chan_resources(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc, *_desc; + LIST_HEAD(list); + + dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", + atchan->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&atchan->active_list)); + BUG_ON(!list_empty(&atchan->queue)); + BUG_ON(atc_chan_is_enabled(atchan)); + + list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { + dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); + list_del(&desc->desc_node); + /* free link descriptor */ + dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); + } + list_splice_init(&atchan->free_list, &list); + atchan->descs_allocated = 0; + + dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); +} + + +/*-- Module Management -----------------------------------------------*/ + +/** + * at_dma_off - disable DMA controller + * @atdma: the Atmel HDAMC device + */ +static void at_dma_off(struct at_dma *atdma) +{ + dma_writel(atdma, EN, 0); + + /* disable all interrupts */ + dma_writel(atdma, EBCIDR, -1L); + + /* confirm that all channels are disabled */ + while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) + cpu_relax(); +} + +static int __init at_dma_probe(struct platform_device *pdev) +{ + struct at_dma_platform_data *pdata; + struct resource *io; + struct at_dma *atdma; + size_t size; + int irq; + int err; + int i; + + /* get DMA Controller parameters from platform */ + pdata = pdev->dev.platform_data; + if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) + return -EINVAL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + size = sizeof(struct at_dma); + size += pdata->nr_channels * sizeof(struct at_dma_chan); + atdma = kzalloc(size, GFP_KERNEL); + if (!atdma) + return -ENOMEM; + + /* discover transaction capabilites from the platform data */ + atdma->dma_common.cap_mask = pdata->cap_mask; + atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; + + size = io->end - io->start + 1; + if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { + err = -EBUSY; + goto err_kfree; + } + + atdma->regs = ioremap(io->start, size); + if (!atdma->regs) { + err = -ENOMEM; + goto err_release_r; + } + + atdma->clk = clk_get(&pdev->dev, "dma_clk"); + if (IS_ERR(atdma->clk)) { + err = PTR_ERR(atdma->clk); + goto err_clk; + } + clk_enable(atdma->clk); + + /* force dma off, just in case */ + at_dma_off(atdma); + + err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); + if (err) + goto err_irq; + + platform_set_drvdata(pdev, atdma); + + /* create a pool of consistent memory blocks for hardware descriptors */ + atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", + &pdev->dev, sizeof(struct at_desc), + 4 /* word alignment */, 0); + if (!atdma->dma_desc_pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + err = -ENOMEM; + goto err_pool_create; + } + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + /* initialize channels related values */ + INIT_LIST_HEAD(&atdma->dma_common.channels); + for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { + struct at_dma_chan *atchan = &atdma->chan[i]; + + atchan->chan_common.device = &atdma->dma_common; + atchan->chan_common.cookie = atchan->completed_cookie = 1; + atchan->chan_common.chan_id = i; + list_add_tail(&atchan->chan_common.device_node, + &atdma->dma_common.channels); + + atchan->ch_regs = atdma->regs + ch_regs(i); + spin_lock_init(&atchan->lock); + atchan->mask = 1 << i; + + INIT_LIST_HEAD(&atchan->active_list); + INIT_LIST_HEAD(&atchan->queue); + INIT_LIST_HEAD(&atchan->free_list); + + tasklet_init(&atchan->tasklet, atc_tasklet, + (unsigned long)atchan); + atc_enable_irq(atchan); + } + + /* set base routines */ + atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; + atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; + atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; + atdma->dma_common.device_issue_pending = atc_issue_pending; + atdma->dma_common.dev = &pdev->dev; + + /* set prep routines based on capability */ + if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) + atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; + + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { + atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; + atdma->dma_common.device_terminate_all = atc_terminate_all; + } + + dma_writel(atdma, EN, AT_DMA_ENABLE); + + dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", + dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", + dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", + atdma->dma_common.chancnt); + + dma_async_device_register(&atdma->dma_common); + + return 0; + +err_pool_create: + platform_set_drvdata(pdev, NULL); + free_irq(platform_get_irq(pdev, 0), atdma); +err_irq: + clk_disable(atdma->clk); + clk_put(atdma->clk); +err_clk: + iounmap(atdma->regs); + atdma->regs = NULL; +err_release_r: + release_mem_region(io->start, size); +err_kfree: + kfree(atdma); + return err; +} + +static int __exit at_dma_remove(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + struct resource *io; + + at_dma_off(atdma); + dma_async_device_unregister(&atdma->dma_common); + + dma_pool_destroy(atdma->dma_desc_pool); + platform_set_drvdata(pdev, NULL); + free_irq(platform_get_irq(pdev, 0), atdma); + + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + device_node) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + /* Disable interrupts */ + atc_disable_irq(atchan); + tasklet_disable(&atchan->tasklet); + + tasklet_kill(&atchan->tasklet); + list_del(&chan->device_node); + } + + clk_disable(atdma->clk); + clk_put(atdma->clk); + + iounmap(atdma->regs); + atdma->regs = NULL; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(io->start, io->end - io->start + 1); + + kfree(atdma); + + return 0; +} + +static void at_dma_shutdown(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + at_dma_off(platform_get_drvdata(pdev)); + clk_disable(atdma->clk); +} + +static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + at_dma_off(platform_get_drvdata(pdev)); + clk_disable(atdma->clk); + return 0; +} + +static int at_dma_resume_early(struct platform_device *pdev) +{ + struct at_dma *atdma = platform_get_drvdata(pdev); + + clk_enable(atdma->clk); + dma_writel(atdma, EN, AT_DMA_ENABLE); + return 0; + +} + +static struct platform_driver at_dma_driver = { + .remove = __exit_p(at_dma_remove), + .shutdown = at_dma_shutdown, + .suspend_late = at_dma_suspend_late, + .resume_early = at_dma_resume_early, + .driver = { + .name = "at_hdmac", + }, +}; + +static int __init at_dma_init(void) +{ + return platform_driver_probe(&at_dma_driver, at_dma_probe); +} +module_init(at_dma_init); + +static void __exit at_dma_exit(void) +{ + platform_driver_unregister(&at_dma_driver); +} +module_exit(at_dma_exit); + +MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); +MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:at_hdmac"); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h new file mode 100644 index 000000000000..4c972afc49ec --- /dev/null +++ b/drivers/dma/at_hdmac_regs.h @@ -0,0 +1,353 @@ +/* + * Header file for the Atmel AHB DMA Controller driver + * + * Copyright (C) 2008 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT_HDMAC_REGS_H +#define AT_HDMAC_REGS_H + +#include <mach/at_hdmac.h> + +#define AT_DMA_MAX_NR_CHANNELS 8 + + +#define AT_DMA_GCFG 0x00 /* Global Configuration Register */ +#define AT_DMA_IF_BIGEND(i) (0x1 << (i)) /* AHB-Lite Interface i in Big-endian mode */ +#define AT_DMA_ARB_CFG (0x1 << 4) /* Arbiter mode. */ +#define AT_DMA_ARB_CFG_FIXED (0x0 << 4) +#define AT_DMA_ARB_CFG_ROUND_ROBIN (0x1 << 4) + +#define AT_DMA_EN 0x04 /* Controller Enable Register */ +#define AT_DMA_ENABLE (0x1 << 0) + +#define AT_DMA_SREQ 0x08 /* Software Single Request Register */ +#define AT_DMA_SSREQ(x) (0x1 << ((x) << 1)) /* Request a source single transfer on channel x */ +#define AT_DMA_DSREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination single transfer on channel x */ + +#define AT_DMA_CREQ 0x0C /* Software Chunk Transfer Request Register */ +#define AT_DMA_SCREQ(x) (0x1 << ((x) << 1)) /* Request a source chunk transfer on channel x */ +#define AT_DMA_DCREQ(x) (0x1 << (1 + ((x) << 1))) /* Request a destination chunk transfer on channel x */ + +#define AT_DMA_LAST 0x10 /* Software Last Transfer Flag Register */ +#define AT_DMA_SLAST(x) (0x1 << ((x) << 1)) /* This src rq is last tx of buffer on channel x */ +#define AT_DMA_DLAST(x) (0x1 << (1 + ((x) << 1))) /* This dst rq is last tx of buffer on channel x */ + +#define AT_DMA_SYNC 0x14 /* Request Synchronization Register */ +#define AT_DMA_SYR(h) (0x1 << (h)) /* Synchronize handshake line h */ + +/* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */ +#define AT_DMA_EBCIER 0x18 /* Enable register */ +#define AT_DMA_EBCIDR 0x1C /* Disable register */ +#define AT_DMA_EBCIMR 0x20 /* Mask Register */ +#define AT_DMA_EBCISR 0x24 /* Status Register */ +#define AT_DMA_CBTC_OFFSET 8 +#define AT_DMA_ERR_OFFSET 16 +#define AT_DMA_BTC(x) (0x1 << (x)) +#define AT_DMA_CBTC(x) (0x1 << (AT_DMA_CBTC_OFFSET + (x))) +#define AT_DMA_ERR(x) (0x1 << (AT_DMA_ERR_OFFSET + (x))) + +#define AT_DMA_CHER 0x28 /* Channel Handler Enable Register */ +#define AT_DMA_ENA(x) (0x1 << (x)) +#define AT_DMA_SUSP(x) (0x1 << ( 8 + (x))) +#define AT_DMA_KEEP(x) (0x1 << (24 + (x))) + +#define AT_DMA_CHDR 0x2C /* Channel Handler Disable Register */ +#define AT_DMA_DIS(x) (0x1 << (x)) +#define AT_DMA_RES(x) (0x1 << ( 8 + (x))) + +#define AT_DMA_CHSR 0x30 /* Channel Handler Status Register */ +#define AT_DMA_EMPT(x) (0x1 << (16 + (x))) +#define AT_DMA_STAL(x) (0x1 << (24 + (x))) + + +#define AT_DMA_CH_REGS_BASE 0x3C /* Channel registers base address */ +#define ch_regs(x) (AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */ + +/* Hardware register offset for each channel */ +#define ATC_SADDR_OFFSET 0x00 /* Source Address Register */ +#define ATC_DADDR_OFFSET 0x04 /* Destination Address Register */ +#define ATC_DSCR_OFFSET 0x08 /* Descriptor Address Register */ +#define ATC_CTRLA_OFFSET 0x0C /* Control A Register */ +#define ATC_CTRLB_OFFSET 0x10 /* Control B Register */ +#define ATC_CFG_OFFSET 0x14 /* Configuration Register */ +#define ATC_SPIP_OFFSET 0x18 /* Src PIP Configuration Register */ +#define ATC_DPIP_OFFSET 0x1C /* Dst PIP Configuration Register */ + + +/* Bitfield definitions */ + +/* Bitfields in DSCR */ +#define ATC_DSCR_IF(i) (0x3 & (i)) /* Dsc feched via AHB-Lite Interface i */ + +/* Bitfields in CTRLA */ +#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ +#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ +/* Chunck Tranfer size definitions are in at_hdmac.h */ +#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ +#define ATC_SRC_WIDTH(x) ((x) << 24) +#define ATC_SRC_WIDTH_BYTE (0x0 << 24) +#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) +#define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ +#define ATC_DST_WIDTH(x) ((x) << 28) +#define ATC_DST_WIDTH_BYTE (0x0 << 28) +#define ATC_DST_WIDTH_HALFWORD (0x1 << 28) +#define ATC_DST_WIDTH_WORD (0x2 << 28) +#define ATC_DONE (0x1 << 31) /* Tx Done (only written back in descriptor) */ + +/* Bitfields in CTRLB */ +#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ +#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ +#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ +#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ +#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ +#define ATC_DST_DSCR_DIS (0x1 << 20) /* Dst Descriptor fetch disable */ +#define ATC_FC_MASK (0x7 << 21) /* Choose Flow Controller */ +#define ATC_FC_MEM2MEM (0x0 << 21) /* Mem-to-Mem (DMA) */ +#define ATC_FC_MEM2PER (0x1 << 21) /* Mem-to-Periph (DMA) */ +#define ATC_FC_PER2MEM (0x2 << 21) /* Periph-to-Mem (DMA) */ +#define ATC_FC_PER2PER (0x3 << 21) /* Periph-to-Periph (DMA) */ +#define ATC_FC_PER2MEM_PER (0x4 << 21) /* Periph-to-Mem (Peripheral) */ +#define ATC_FC_MEM2PER_PER (0x5 << 21) /* Mem-to-Periph (Peripheral) */ +#define ATC_FC_PER2PER_SRCPER (0x6 << 21) /* Periph-to-Periph (Src Peripheral) */ +#define ATC_FC_PER2PER_DSTPER (0x7 << 21) /* Periph-to-Periph (Dst Peripheral) */ +#define ATC_SRC_ADDR_MODE_MASK (0x3 << 24) +#define ATC_SRC_ADDR_MODE_INCR (0x0 << 24) /* Incrementing Mode */ +#define ATC_SRC_ADDR_MODE_DECR (0x1 << 24) /* Decrementing Mode */ +#define ATC_SRC_ADDR_MODE_FIXED (0x2 << 24) /* Fixed Mode */ +#define ATC_DST_ADDR_MODE_MASK (0x3 << 28) +#define ATC_DST_ADDR_MODE_INCR (0x0 << 28) /* Incrementing Mode */ +#define ATC_DST_ADDR_MODE_DECR (0x1 << 28) /* Decrementing Mode */ +#define ATC_DST_ADDR_MODE_FIXED (0x2 << 28) /* Fixed Mode */ +#define ATC_IEN (0x1 << 30) /* BTC interrupt enable (active low) */ +#define ATC_AUTO (0x1 << 31) /* Auto multiple buffer tx enable */ + +/* Bitfields in CFG */ +/* are in at_hdmac.h */ + +/* Bitfields in SPIP */ +#define ATC_SPIP_HOLE(x) (0xFFFFU & (x)) +#define ATC_SPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) + +/* Bitfields in DPIP */ +#define ATC_DPIP_HOLE(x) (0xFFFFU & (x)) +#define ATC_DPIP_BOUNDARY(x) ((0x3FF & (x)) << 16) + + +/*-- descriptors -----------------------------------------------------*/ + +/* LLI == Linked List Item; aka DMA buffer descriptor */ +struct at_lli { + /* values that are not changed by hardware */ + dma_addr_t saddr; + dma_addr_t daddr; + /* value that may get written back: */ + u32 ctrla; + /* more values that are not changed by hardware */ + u32 ctrlb; + dma_addr_t dscr; /* chain to next lli */ +}; + +/** + * struct at_desc - software descriptor + * @at_lli: hardware lli structure + * @txd: support for the async_tx api + * @desc_node: node on the channed descriptors list + * @len: total transaction bytecount + */ +struct at_desc { + /* FIRST values the hardware uses */ + struct at_lli lli; + + /* THEN values for driver housekeeping */ + struct dma_async_tx_descriptor txd; + struct list_head desc_node; + size_t len; +}; + +static inline struct at_desc * +txd_to_at_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct at_desc, txd); +} + + +/*-- Channels --------------------------------------------------------*/ + +/** + * struct at_dma_chan - internal representation of an Atmel HDMAC channel + * @chan_common: common dmaengine channel object members + * @device: parent device + * @ch_regs: memory mapped register base + * @mask: channel index in a mask + * @error_status: transmit error status information from irq handler + * to tasklet (use atomic operations) + * @tasklet: bottom half to finish transaction work + * @lock: serializes enqueue/dequeue operations to descriptors lists + * @completed_cookie: identifier for the most recently completed operation + * @active_list: list of descriptors dmaengine is being running on + * @queue: list of descriptors ready to be submitted to engine + * @free_list: list of descriptors usable by the channel + * @descs_allocated: records the actual size of the descriptor pool + */ +struct at_dma_chan { + struct dma_chan chan_common; + struct at_dma *device; + void __iomem *ch_regs; + u8 mask; + unsigned long error_status; + struct tasklet_struct tasklet; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + dma_cookie_t completed_cookie; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + unsigned int descs_allocated; +}; + +#define channel_readl(atchan, name) \ + __raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET) + +#define channel_writel(atchan, name, val) \ + __raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET) + +static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) +{ + return container_of(dchan, struct at_dma_chan, chan_common); +} + + +/*-- Controller ------------------------------------------------------*/ + +/** + * struct at_dma - internal representation of an Atmel HDMA Controller + * @chan_common: common dmaengine dma_device object members + * @ch_regs: memory mapped register base + * @clk: dma controller clock + * @all_chan_mask: all channels availlable in a mask + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @chan: channels table to store at_dma_chan structures + */ +struct at_dma { + struct dma_device dma_common; + void __iomem *regs; + struct clk *clk; + + u8 all_chan_mask; + + struct dma_pool *dma_desc_pool; + /* AT THE END channels table */ + struct at_dma_chan chan[0]; +}; + +#define dma_readl(atdma, name) \ + __raw_readl((atdma)->regs + AT_DMA_##name) +#define dma_writel(atdma, name, val) \ + __raw_writel((val), (atdma)->regs + AT_DMA_##name) + +static inline struct at_dma *to_at_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct at_dma, dma_common); +} + + +/*-- Helper functions ------------------------------------------------*/ + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +#if defined(VERBOSE_DEBUG) +static void vdbg_dump_regs(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + dev_err(chan2dev(&atchan->chan_common), + " channel %d : imr = 0x%x, chsr = 0x%x\n", + atchan->chan_common.chan_id, + dma_readl(atdma, EBCIMR), + dma_readl(atdma, CHSR)); + + dev_err(chan2dev(&atchan->chan_common), + " channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n", + channel_readl(atchan, SADDR), + channel_readl(atchan, DADDR), + channel_readl(atchan, CTRLA), + channel_readl(atchan, CTRLB), + channel_readl(atchan, CFG), + channel_readl(atchan, DSCR)); +} +#else +static void vdbg_dump_regs(struct at_dma_chan *atchan) {} +#endif + +static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) +{ + dev_printk(KERN_CRIT, chan2dev(&atchan->chan_common), + " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", + lli->saddr, lli->daddr, + lli->ctrla, lli->ctrlb, lli->dscr); +} + + +static void atc_setup_irq(struct at_dma_chan *atchan, int on) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + u32 ebci; + + /* enable interrupts on buffer chain completion & error */ + ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) + | AT_DMA_ERR(atchan->chan_common.chan_id); + if (on) + dma_writel(atdma, EBCIER, ebci); + else + dma_writel(atdma, EBCIDR, ebci); +} + +static inline void atc_enable_irq(struct at_dma_chan *atchan) +{ + atc_setup_irq(atchan, 1); +} + +static inline void atc_disable_irq(struct at_dma_chan *atchan) +{ + atc_setup_irq(atchan, 0); +} + + +/** + * atc_chan_is_enabled - test if given channel is enabled + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + return !!(dma_readl(atdma, CHSR) & atchan->mask); +} + + +/** + * set_desc_eol - set end-of-link to descriptor so it will end transfer + * @desc: descriptor, signle or at the end of a chain, to end chain on + */ +static void set_desc_eol(struct at_desc *desc) +{ + desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; + desc->lli.dscr = 0; +} + +#endif /* AT_HDMAC_REGS_H */ diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index fb7da5141e96..d93017fc7872 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); +static unsigned int iterations; +module_param(iterations, uint, S_IRUGO); +MODULE_PARM_DESC(iterations, + "Iterations before stopping test (default: infinite)"); + static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO); MODULE_PARM_DESC(xor_sources, @@ -114,7 +119,7 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK);; + | (~i & PATTERN_COUNT_MASK); for ( ; i < test_buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; @@ -270,7 +275,8 @@ static int dmatest_func(void *data) flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; - while (!kthread_should_stop()) { + while (!kthread_should_stop() + && !(iterations && total_tests >= iterations)) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; @@ -416,6 +422,13 @@ err_srcbuf: err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); + + if (iterations > 0) + while (!kthread_should_stop()) { + DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); + interruptible_sleep_on(&wait_dmatest_exit); + } + return ret; } @@ -495,11 +508,11 @@ static int dmatest_add_channel(struct dma_chan *chan) if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_MEMCPY); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_XOR); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index f18d1bde0439..ef87a8984145 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -12,6 +12,11 @@ * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. * The support for MPC8349 DMA contorller is also added. * + * This driver instructs the DMA controller to issue the PCI Read Multiple + * command for PCI read operations, instead of using the default PCI Read Line + * command. Please be aware that this setting may result in read pre-fetching + * on some platforms. + * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -49,9 +54,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) case FSL_DMA_IP_83XX: /* Set the channel to below modes: * EOTIE - End-of-transfer interrupt enable + * PRC_RM - PCI read multiple */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, - 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE + | FSL_DMA_MR_PRC_RM, 32); break; } @@ -136,15 +142,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan) static void dma_start(struct fsl_dma_chan *fsl_chan) { - u32 mr_set = 0;; + u32 mr_set = 0; if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); mr_set |= FSL_DMA_MR_EMP_EN; - } else + } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~FSL_DMA_MR_EMP_EN, 32); + } if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) mr_set |= FSL_DMA_MR_EMS_EN; @@ -871,9 +878,9 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: + new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 4f21a512d848..dc7f26865797 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -38,6 +38,7 @@ /* Special MR definition for MPC8349 */ #define FSL_DMA_MR_EOTIE 0x00000080 +#define FSL_DMA_MR_PRC_RM 0x00000800 #define FSL_DMA_SR_CH 0x00000020 #define FSL_DMA_SR_PE 0x00000010 diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ddab94f51224..3f23eabe09f2 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1176,7 +1176,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - dma_dev->max_xor = 8; ; + dma_dev->max_xor = 8; dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } |