summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-11 20:35:53 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-02-04 14:25:49 +0100
commitc8ebae37034c0ead62eb4df8ef88e999ddb8d5cf (patch)
treec9925f03a9c627d7408ef483d7920e25a927e633
parentARM: mmci: no need for separate host->data_xfered (diff)
downloadlinux-c8ebae37034c0ead62eb4df8ef88e999ddb8d5cf.tar.xz
linux-c8ebae37034c0ead62eb4df8ef88e999ddb8d5cf.zip
ARM: mmci: add dmaengine-based DMA support
Based on a patch from Linus Walleij. Add dmaengine based support for DMA to the MMCI driver, using the Primecell DMA engine interface. The changes over Linus' driver are: - rename txsize_threshold to dmasize_threshold, as this reflects the purpose more. - use 'mmci_dma_' as the function prefix rather than 'dma_mmci_'. - clean up requesting of dma channels. - don't release a single channel twice when it's shared between tx and rx. - get rid of 'dma_enable' bool - instead check whether the channel is NULL. - detect incomplete DMA at the end of a transfer. Some DMA controllers (eg, PL08x) are unable to be configured for scatter DMA and also listen to all four DMA request signals [BREQ,SREQ,LBREQ,LSREQ] from the MMCI. They can do one or other but not both. As MMCI uses LBREQ/LSREQ for the final burst/words, PL08x does not transfer the last few words. - map and unmap DMA buffers using the DMA engine struct device, not the MMCI struct device - the DMA engine is doing the DMA transfer, not us. - avoid double-unmapping of the DMA buffers on MMCI data errors. - don't check for negative values from the dmaengine tx submission function - Dan says this must never fail. - use new dmaengine helper functions rather than using the ugly function pointers directly. - allow DMA code to be fully optimized away using dma_inprogress() which is defined to constant 0 if DMA engine support is disabled. - request maximum segment size from the DMA engine struct device and set this appropriately. - removed checking of buffer alignment - the DMA engine should deal with its own restrictions on buffer alignment, not the individual DMA engine users. - removed setting DMAREQCTL - this confuses some DMA controllers as it causes LBREQ to be asserted for the last seven transfers, rather than six SREQ and one LSREQ. - removed burst setting - the DMA controller should not burst past the transfer size required to complete the DMA operation. Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--drivers/mmc/host/mmci.c282
-rw-r--r--drivers/mmc/host/mmci.h13
-rw-r--r--include/linux/amba/mmci.h17
3 files changed, 304 insertions, 8 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index db2a358143d6..8a29c9f4a812 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -186,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_dma_setup(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ const char *rxname, *txname;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ if (plat->dma_rx_param) {
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel)
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
+ }
+
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel)
+ dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+
+ if (host->dma_rx_channel)
+ rxname = dma_chan_name(host->dma_rx_channel);
+ else
+ rxname = "none";
+
+ if (host->dma_tx_channel)
+ txname = dma_chan_name(host->dma_tx_channel);
+ else
+ txname = "none";
+
+ dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
+ rxname, txname);
+
+ /*
+ * Limit the maximum segment size in any SG entry according to
+ * the parameters of the DMA engine device.
+ */
+ if (host->dma_tx_channel) {
+ struct device *dev = host->dma_tx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+ if (host->dma_rx_channel) {
+ struct device *dev = host->dma_rx_channel->device->dev;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
+
+ if (max_seg_size < host->mmc->max_seg_size)
+ host->mmc->max_seg_size = max_seg_size;
+ }
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel && plat->dma_tx_param)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_rx_channel = host->dma_tx_channel = NULL;
+}
+
+static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+ struct dma_chan *chan = host->dma_current;
+ enum dma_data_direction dir;
+ u32 status;
+ int i;
+
+ /* Wait up to 1ms for the DMA to complete */
+ for (i = 0; ; i++) {
+ status = readl(host->base + MMCISTATUS);
+ if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Check to see whether we still have some data left in the FIFO -
+ * this catches DMA controllers which are unable to monitor the
+ * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
+ * contiguous buffers. On TX, we'll get a FIFO underrun error.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dmaengine_terminate_all(chan);
+ if (!data->error)
+ data->error = -EIO;
+ }
+
+ if (data->flags & MMC_DATA_WRITE) {
+ dir = DMA_TO_DEVICE;
+ } else {
+ dir = DMA_FROM_DEVICE;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+
+ /*
+ * Use of DMA with scatter-gather is impossible.
+ * Give up with DMA and switch back to PIO mode.
+ */
+ if (status & MCI_RXDATAAVLBLMASK) {
+ dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
+ mmci_dma_release(host);
+ }
+}
+
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .dst_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor *desc;
+ int nr_sg;
+
+ host->dma_current = NULL;
+
+ if (data->flags & MMC_DATA_READ) {
+ conf.direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ conf.direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ /* If there's no DMA channel, fall back to PIO */
+ if (!chan)
+ return -EINVAL;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ device = chan->device;
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ if (nr_sg == 0)
+ return -EINVAL;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ conf.direction, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ /* Okay, go for it. */
+ host->dma_current = chan;
+
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+
+unmap_exit:
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_dma_setup(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_release(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+{
+}
+
+static inline void mmci_dma_data_error(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@@ -201,8 +445,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->size = data->blksz * data->blocks;
data->bytes_xfered = 0;
- mmci_init_sg(host, data);
-
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@@ -216,8 +458,21 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
+
+ if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ if (!mmci_dma_start_data(host, datactrl))
+ return;
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
@@ -281,6 +536,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
+
/*
* Calculate how far we are into the transfer. Note that
* the data counter gives the number of bytes transferred
@@ -315,6 +574,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
+ if (dma_inprogress(host))
+ mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
@@ -767,6 +1028,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@@ -894,9 +1156,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
amba_set_drvdata(dev, mmc);
- dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
- mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
+ mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
+ amba_rev(dev), (unsigned long long)dev->res.start,
+ dev->irq[0], dev->irq[1]);
+
+ mmci_dma_setup(host);
mmc_add_host(mmc);
@@ -943,6 +1208,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 164ce060fc1f..ec9a7bc6d0df 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -148,8 +148,10 @@
struct clk;
struct variant_data;
+struct dma_chan;
struct mmci_host {
+ phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@@ -179,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
+
+#ifdef CONFIG_DMA_ENGINE
+ /* DMA stuff */
+ struct dma_chan *dma_current;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+
+#define dma_inprogress(host) ((host)->dma_current)
+#else
+#define dma_inprogress(host) (0)
+#endif
};
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index f4ee9acc9721..f60227088b7b 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -6,6 +6,9 @@
#include <linux/mmc/host.h>
+/* Just some dummy forwarding */
+struct dma_chan;
+
/**
* struct mmci_platform_data - platform configuration for the MMCI
* (also known as PL180) block.
@@ -27,6 +30,17 @@
* @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
+ * @dma_filter: function used to select an apropriate RX and TX
+ * DMA channel to be used for DMA, if and only if you're deploying the
+ * generic DMA engine
+ * @dma_rx_param: parameter passed to the DMA allocation
+ * filter in order to select an apropriate RX channel. If
+ * there is a bidirectional RX+TX channel, then just specify
+ * this and leave dma_tx_param set to NULL
+ * @dma_tx_param: parameter passed to the DMA allocation
+ * filter in order to select an apropriate TX channel. If this
+ * is NULL the driver will attempt to use the RX channel as a
+ * bidirectional channel
*/
struct mmci_platform_data {
unsigned int f_max;
@@ -38,6 +52,9 @@ struct mmci_platform_data {
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ void *dma_rx_param;
+ void *dma_tx_param;
};
#endif