summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 18:32:00 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 18:32:00 +0100
commit5f0e685f316a1de6d3af8b23eaf46651faca32ab (patch)
treeaf1ed231b7fcfc65b146be59a0aee699aa9f6353
parentMerge tag 'dt-for-linus' of git://git.secretlab.ca/git/linux-2.6 (diff)
parentspi/fsl-espi: Make sure pm is within 2..32 (diff)
downloadlinux-5f0e685f316a1de6d3af8b23eaf46651faca32ab.tar.xz
linux-5f0e685f316a1de6d3af8b23eaf46651faca32ab.zip
Merge tag 'spi-for-linus' of git://git.secretlab.ca/git/linux-2.6
Pull SPI changes for v3.4 from Grant Likely: "Mostly a bunch of new drivers and driver bug fixes; but this also includes a few patches that create a core message queue infrastructure for the spi subsystem instead of making each driver open code it." * tag 'spi-for-linus' of git://git.secretlab.ca/git/linux-2.6: (34 commits) spi/fsl-espi: Make sure pm is within 2..32 spi/fsl-espi: make the clock computation easier to read spi: sh-hspi: modify write/read method spi: sh-hspi: control spi clock more correctly spi: sh-hspi: convert to using core message queue spi: s3c64xx: Fix build spi: s3c64xx: remove unnecessary callback msg->complete spi: remove redundant variable assignment spi: release lock on error path in spi_pump_messages() spi: Compatibility with direction which is used in samsung DMA operation spi-topcliff-pch: add recovery processing in case wait-event timeout spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control spi-topcliff-pch: Fix issue for transmitting over 4KByte spi-topcliff-pch: Modify pci-bus number dynamically to get DMA device info spi/imx: simplify error handling to free gpios spi: Convert to DEFINE_PCI_DEVICE_TABLE spi: add Broadcom BCM63xx SPI controller driver SPI: add CSR SiRFprimaII SPI controller driver spi-topcliff-pch: fix -Wuninitialized warning spi: Mark spi_register_board_info() __devinit ...
-rw-r--r--Documentation/devicetree/bindings/spi/omap-spi.txt20
-rw-r--r--Documentation/spi/spi-summary58
-rw-r--r--drivers/spi/Kconfig36
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-bcm63xx.c486
-rw-r--r--drivers/spi/spi-dw-pci.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c14
-rw-r--r--drivers/spi/spi-imx.c11
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c56
-rw-r--r--drivers/spi/spi-pl022.c286
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c2
-rw-r--r--drivers/spi/spi-rspi.c521
-rw-r--r--drivers/spi/spi-s3c64xx.c232
-rw-r--r--drivers/spi/spi-sh-hspi.c331
-rw-r--r--drivers/spi/spi-sh.c25
-rw-r--r--drivers/spi/spi-sirf.c687
-rw-r--r--drivers/spi/spi-topcliff-pch.c113
-rw-r--r--drivers/spi/spi.c347
-rw-r--r--include/linux/amba/pl022.h3
-rw-r--r--include/linux/spi/sh_hspi.h23
-rw-r--r--include/linux/spi/spi.h53
22 files changed, 2890 insertions, 422 deletions
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
new file mode 100644
index 000000000000..81df374adbb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/omap-spi.txt
@@ -0,0 +1,20 @@
+OMAP2+ McSPI device
+
+Required properties:
+- compatible :
+ - "ti,omap2-spi" for OMAP2 & OMAP3.
+ - "ti,omap4-spi" for OMAP4+.
+- ti,spi-num-cs : Number of chipselect supported by the instance.
+- ti,hwmods: Name of the hwmod associated to the McSPI
+
+
+Example:
+
+mcspi1: mcspi@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "ti,omap4-mcspi";
+ ti,hwmods = "mcspi1";
+ ti,spi-num-cs = <4>;
+};
+
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index 4884cb33845d..7312ec14dd89 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -1,7 +1,7 @@
Overview of Linux kernel SPI support
====================================
-21-May-2007
+02-Feb-2012
What is SPI?
------------
@@ -483,9 +483,9 @@ also initialize its own internal state. (See below about bus numbering
and those methods.)
After you initialize the spi_master, then use spi_register_master() to
-publish it to the rest of the system. At that time, device nodes for
-the controller and any predeclared spi devices will be made available,
-and the driver model core will take care of binding them to drivers.
+publish it to the rest of the system. At that time, device nodes for the
+controller and any predeclared spi devices will be made available, and
+the driver model core will take care of binding them to drivers.
If you need to remove your SPI controller driver, spi_unregister_master()
will reverse the effect of spi_register_master().
@@ -521,21 +521,53 @@ SPI MASTER METHODS
** When you code setup(), ASSUME that the controller
** is actively processing transfers for another device.
- master->transfer(struct spi_device *spi, struct spi_message *message)
- This must not sleep. Its responsibility is arrange that the
- transfer happens and its complete() callback is issued. The two
- will normally happen later, after other transfers complete, and
- if the controller is idle it will need to be kickstarted.
-
master->cleanup(struct spi_device *spi)
Your controller driver may use spi_device.controller_state to hold
state it dynamically associates with that device. If you do that,
be sure to provide the cleanup() method to free that state.
+ master->prepare_transfer_hardware(struct spi_master *master)
+ This will be called by the queue mechanism to signal to the driver
+ that a message is coming in soon, so the subsystem requests the
+ driver to prepare the transfer hardware by issuing this call.
+ This may sleep.
+
+ master->unprepare_transfer_hardware(struct spi_master *master)
+ This will be called by the queue mechanism to signal to the driver
+ that there are no more messages pending in the queue and it may
+ relax the hardware (e.g. by power management calls). This may sleep.
+
+ master->transfer_one_message(struct spi_master *master,
+ struct spi_message *mesg)
+ The subsystem calls the driver to transfer a single message while
+ queuing transfers that arrive in the meantime. When the driver is
+ finished with this message, it must call
+ spi_finalize_current_message() so the subsystem can issue the next
+ transfer. This may sleep.
+
+ DEPRECATED METHODS
+
+ master->transfer(struct spi_device *spi, struct spi_message *message)
+ This must not sleep. Its responsibility is arrange that the
+ transfer happens and its complete() callback is issued. The two
+ will normally happen later, after other transfers complete, and
+ if the controller is idle it will need to be kickstarted. This
+ method is not used on queued controllers and must be NULL if
+ transfer_one_message() and (un)prepare_transfer_hardware() are
+ implemented.
+
SPI MESSAGE QUEUE
-The bulk of the driver will be managing the I/O queue fed by transfer().
+If you are happy with the standard queueing mechanism provided by the
+SPI subsystem, just implement the queued methods specified above. Using
+the message queue has the upside of centralizing a lot of code and
+providing pure process-context execution of methods. The message queue
+can also be elevated to realtime priority on high-priority SPI traffic.
+
+Unless the queueing mechanism in the SPI subsystem is selected, the bulk
+of the driver will be managing the I/O queue fed by the now deprecated
+function transfer().
That queue could be purely conceptual. For example, a driver used only
for low-frequency sensor access might be fine using synchronous PIO.
@@ -561,4 +593,6 @@ Stephen Street
Mark Underwood
Andrew Victor
Vitaly Wool
-
+Grant Likely
+Mark Brown
+Linus Walleij
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8293658e7cf9..0b06e360628a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -94,6 +94,12 @@ config SPI_AU1550
If you say yes to this option, support will be included for the
PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+config SPI_BCM63XX
+ tristate "Broadcom BCM63xx SPI controller"
+ depends on BCM63XX
+ help
+ Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -126,7 +132,7 @@ config SPI_COLDFIRE_QSPI
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
- depends on SPI_MASTER && ARCH_DAVINCI
+ depends on ARCH_DAVINCI
select SPI_BITBANG
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
@@ -188,7 +194,7 @@ config SPI_MPC52xx_PSC
config SPI_MPC512x_PSC
tristate "Freescale MPC512x PSC SPI controller"
- depends on SPI_MASTER && PPC_MPC512x
+ depends on PPC_MPC512x
help
This enables using the Freescale MPC5121 Programmable Serial
Controller in SPI master mode.
@@ -238,7 +244,7 @@ config SPI_OMAP24XX
config SPI_OMAP_100K
tristate "OMAP SPI 100K"
- depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730)
+ depends on ARCH_OMAP850 || ARCH_OMAP730
help
OMAP SPI 100K master controller for omap7xx boards.
@@ -262,7 +268,7 @@ config SPI_PL022
config SPI_PPC4xx
tristate "PPC4xx SPI Controller"
- depends on PPC32 && 4xx && SPI_MASTER
+ depends on PPC32 && 4xx
select SPI_BITBANG
help
This selects a driver for the PPC4xx SPI Controller.
@@ -279,6 +285,12 @@ config SPI_PXA2XX
config SPI_PXA2XX_PCI
def_bool SPI_PXA2XX && X86_32 && PCI
+config SPI_RSPI
+ tristate "Renesas RSPI controller"
+ depends on SUPERH
+ help
+ SPI driver for Renesas RSPI blocks.
+
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -324,9 +336,22 @@ config SPI_SH_SCI
help
SPI driver for SuperH SCI blocks.
+config SPI_SH_HSPI
+ tristate "SuperH HSPI controller"
+ depends on ARCH_SHMOBILE
+ help
+ SPI driver for SuperH HSPI blocks.
+
+config SPI_SIRF
+ tristate "CSR SiRFprimaII SPI controller"
+ depends on ARCH_PRIMA2
+ select SPI_BITBANG
+ help
+ SPI driver for CSR SiRFprimaII SoCs
+
config SPI_STMP3XXX
tristate "Freescale STMP37xx/378x SPI/SSP controller"
- depends on ARCH_STMP3XXX && SPI_MASTER
+ depends on ARCH_STMP3XXX
help
SPI driver for Freescale STMP37xx/378x SoC SSP interface
@@ -384,7 +409,6 @@ config SPI_NUC900
config SPI_DESIGNWARE
tristate "DesignWare SPI controller core support"
- depends on SPI_MASTER
help
general driver for SPI controller core from DesignWare
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 61c3261c388c..a1d48e0ba3dc 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
@@ -44,13 +45,16 @@ obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
obj-$(CONFIG_SPI_SH) += spi-sh.o
+obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
new file mode 100644
index 000000000000..f01b2648452e
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx.c
@@ -0,0 +1,486 @@
+/*
+ * Broadcom BCM63xx SPI controller support
+ *
+ * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+
+#include <bcm63xx_dev_spi.h>
+
+#define PFX KBUILD_MODNAME
+#define DRV_VER "0.1.2"
+
+struct bcm63xx_spi {
+ spinlock_t lock;
+ int stopping;
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+
+ /* Platform data */
+ u32 speed_hz;
+ unsigned fifo_size;
+
+ /* Data buffers */
+ const unsigned char *tx_ptr;
+ unsigned char *rx_ptr;
+
+ /* data iomem */
+ u8 __iomem *tx_io;
+ const u8 __iomem *rx_io;
+
+ int remaining_bytes;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+};
+
+static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+ return bcm_readb(bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+ return bcm_readw(bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
+ u8 value, unsigned int offset)
+{
+ bcm_writeb(value, bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
+ u16 value, unsigned int offset)
+{
+ bcm_writew(value, bs->regs + bcm63xx_spireg(offset));
+}
+
+static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
+ { 20000000, SPI_CLK_20MHZ },
+ { 12500000, SPI_CLK_12_50MHZ },
+ { 6250000, SPI_CLK_6_250MHZ },
+ { 3125000, SPI_CLK_3_125MHZ },
+ { 1563000, SPI_CLK_1_563MHZ },
+ { 781000, SPI_CLK_0_781MHZ },
+ { 391000, SPI_CLK_0_391MHZ }
+};
+
+static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u8 bits_per_word;
+ u8 clk_cfg, reg;
+ u32 hz;
+ int i;
+
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
+ hz = (t) ? t->speed_hz : spi->max_speed_hz;
+ if (bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, bits_per_word);
+ return -EINVAL;
+ }
+
+ if (spi->chip_select > spi->master->num_chipselect) {
+ dev_err(&spi->dev, "%s, unsupported slave %d\n",
+ __func__, spi->chip_select);
+ return -EINVAL;
+ }
+
+ /* Find the closest clock configuration */
+ for (i = 0; i < SPI_CLK_MASK; i++) {
+ if (hz <= bcm63xx_spi_freq_table[i][0]) {
+ clk_cfg = bcm63xx_spi_freq_table[i][1];
+ break;
+ }
+ }
+
+ /* No matching configuration found, default to lowest */
+ if (i == SPI_CLK_MASK)
+ clk_cfg = SPI_CLK_0_391MHZ;
+
+ /* clear existing clock configuration bits of the register */
+ reg = bcm_spi_readb(bs, SPI_CLK_CFG);
+ reg &= ~SPI_CLK_MASK;
+ reg |= clk_cfg;
+
+ bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
+ dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
+ clk_cfg, hz);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int bcm63xx_spi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_spi *bs;
+ int ret;
+
+ bs = spi_master_get_devdata(spi->master);
+
+ if (bs->stopping)
+ return -ESHUTDOWN;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (spi->mode & ~MODEBITS) {
+ dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
+ __func__, spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ ret = bcm63xx_spi_setup_transfer(spi, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return ret;
+ }
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
+ __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+
+ return 0;
+}
+
+/* Fill the TX FIFO with as many bytes as possible */
+static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
+{
+ u8 size;
+
+ /* Fill the Tx FIFO with as many bytes as possible */
+ size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
+ bs->fifo_size;
+ memcpy_toio(bs->tx_io, bs->tx_ptr, size);
+ bs->remaining_bytes -= size;
+}
+
+static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u16 msg_ctl;
+ u16 cmd;
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ /* Transmitter is inhibited */
+ bs->tx_ptr = t->tx_buf;
+ bs->rx_ptr = t->rx_buf;
+ init_completion(&bs->done);
+
+ if (t->tx_buf) {
+ bs->remaining_bytes = t->len;
+ bcm63xx_spi_fill_tx_fifo(bs);
+ }
+
+ /* Enable the command done interrupt which
+ * we use to determine completion of a command */
+ bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+
+ /* Fill in the Message control register */
+ msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
+
+ if (t->rx_buf && t->tx_buf)
+ msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT);
+ else if (t->rx_buf)
+ msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT);
+ else if (t->tx_buf)
+ msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT);
+
+ bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+
+ /* Issue the transfer */
+ cmd = SPI_CMD_START_IMMEDIATE;
+ cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
+ bcm_spi_writew(bs, cmd, SPI_CMD);
+ wait_for_completion(&bs->done);
+
+ /* Disable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ return t->len - bs->remaining_bytes;
+}
+
+static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ int ret = 0;
+
+ if (unlikely(list_empty(&m->transfers)))
+ return -EINVAL;
+
+ if (bs->stopping)
+ return -ESHUTDOWN;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ ret += bcm63xx_txrx_bufs(spi, t);
+ }
+
+ m->complete(m->context);
+
+ return ret;
+}
+
+/* This driver supports single master mode only. Hence
+ * CMD_DONE is the only interrupt we care about
+ */
+static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = (struct spi_master *)dev_id;
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ u8 intr;
+ u16 cmd;
+
+ /* Read interupts and clear them immediately */
+ intr = bcm_spi_readb(bs, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ /* A tansfer completed */
+ if (intr & SPI_INTR_CMD_DONE) {
+ u8 rx_tail;
+
+ rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+
+ /* Read out all the data */
+ if (rx_tail)
+ memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
+
+ /* See if there is more data to send */
+ if (bs->remaining_bytes > 0) {
+ bcm63xx_spi_fill_tx_fifo(bs);
+
+ /* Start the transfer */
+ bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
+ SPI_MSG_CTL);
+ cmd = bcm_spi_readw(bs, SPI_CMD);
+ cmd |= SPI_CMD_START_IMMEDIATE;
+ cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+ bcm_spi_writew(bs, cmd, SPI_CMD);
+ } else {
+ complete(&bs->done);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data;
+ int irq;
+ struct spi_master *master;
+ struct clk *clk;
+ struct bcm63xx_spi *bs;
+ int ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(dev, "no iomem\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ clk = clk_get(dev, "spi");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "no clock for device\n");
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+
+ master = spi_alloc_master(dev, sizeof(*bs));
+ if (!master) {
+ dev_err(dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
+
+ platform_set_drvdata(pdev, master);
+ bs->pdev = pdev;
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ resource_size(r), PFX)) {
+ dev_err(dev, "iomem request failed\n");
+ ret = -ENXIO;
+ goto out_err;
+ }
+
+ bs->regs = devm_ioremap_nocache(&pdev->dev, r->start,
+ resource_size(r));
+ if (!bs->regs) {
+ dev_err(dev, "unable to ioremap regs\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ bs->irq = irq;
+ bs->clk = clk;
+ bs->fifo_size = pdata->fifo_size;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
+ pdev->name, master);
+ if (ret) {
+ dev_err(dev, "unable to request irq\n");
+ goto out_err;
+ }
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = bcm63xx_spi_setup;
+ master->transfer = bcm63xx_transfer;
+ bs->speed_hz = pdata->speed_hz;
+ bs->stopping = 0;
+ bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
+ bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
+ spin_lock_init(&bs->lock);
+
+ /* Initialize hardware */
+ clk_enable(bs->clk);
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+
+ /* register and we are done */
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(dev, "spi register failed\n");
+ goto out_clk_disable;
+ }
+
+ dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n",
+ r->start, irq, bs->fifo_size, DRV_VER);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable(clk);
+out_err:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+out_clk:
+ clk_put(clk);
+out:
+ return ret;
+}
+
+static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ /* reset spi block */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+ spin_lock(&bs->lock);
+ bs->stopping = 1;
+
+ /* HW shutdown */
+ clk_disable(bs->clk);
+ clk_put(bs->clk);
+
+ spin_unlock(&bs->lock);
+ platform_set_drvdata(pdev, 0);
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcm63xx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master =
+ platform_get_drvdata(to_platform_device(dev));
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ clk_disable(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master =
+ platform_get_drvdata(to_platform_device(dev));
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ clk_enable(bs->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
+ .suspend = bcm63xx_spi_suspend,
+ .resume = bcm63xx_spi_resume,
+};
+
+#define BCM63XX_SPI_PM_OPS (&bcm63xx_spi_pm_ops)
+#else
+#define BCM63XX_SPI_PM_OPS NULL
+#endif
+
+static struct platform_driver bcm63xx_spi_driver = {
+ .driver = {
+ .name = "bcm63xx-spi",
+ .owner = THIS_MODULE,
+ .pm = BCM63XX_SPI_PM_OPS,
+ },
+ .probe = bcm63xx_spi_probe,
+ .remove = __devexit_p(bcm63xx_spi_remove),
+};
+
+module_platform_driver(bcm63xx_spi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_spi");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>");
+MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index f64250ea1611..14f7cc9523f0 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
#define spi_resume NULL
#endif
-static const struct pci_device_id pci_ids[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
/* Intel MID platform SPI controller 0 */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
{},
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d770f03705c3..7523a2429d09 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -180,18 +180,20 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= CSMODE_DIV16;
- pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
+ pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
- WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
+ WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
"Will use %d Hz instead.\n", dev_name(&spi->dev),
- hz, mpc8xxx_spi->spibrg / 1024);
- if (pm > 16)
- pm = 16;
+ hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
+ if (pm > 33)
+ pm = 33;
} else {
- pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
+ pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
}
if (pm)
pm--;
+ if (pm < 2)
+ pm = 2;
cs->hw_mode |= CSMODE_PM(pm);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c6e697f5e007..31054e3de4c1 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -793,13 +793,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
- while (i > 0) {
- i--;
- if (spi_imx->chipselect[i] >= 0)
- gpio_free(spi_imx->chipselect[i]);
- }
dev_err(&pdev->dev, "can't get cs gpios\n");
- goto out_master_put;
+ goto out_gpio_free;
}
}
@@ -881,10 +876,10 @@ out_iounmap:
out_release_mem:
release_mem_region(res->start, resource_size(res));
out_gpio_free:
- for (i = 0; i < master->num_chipselect; i++)
+ while (--i >= 0) {
if (spi_imx->chipselect[i] >= 0)
gpio_free(spi_imx->chipselect[i]);
-out_master_put:
+ }
spi_master_put(master);
kfree(master);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 182e9c873822..dae8be229c5d 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -360,8 +360,6 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
}
hw = spi_master_get_devdata(master);
- memset(hw, 0, sizeof(struct nuc900_spi));
-
hw->master = spi_master_get(master);
hw->pdata = pdev->dev.platform_data;
hw->dev = &pdev->dev;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0b0dfb71c640..bb9274c2526d 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -34,6 +34,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/spi/spi.h>
@@ -1079,15 +1081,39 @@ static int omap_mcspi_runtime_resume(struct device *dev)
return 0;
}
+static struct omap2_mcspi_platform_config omap2_pdata = {
+ .regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+ {
+ .compatible = "ti,omap2-mcspi",
+ .data = &omap2_pdata,
+ },
+ {
+ .compatible = "ti,omap4-mcspi",
+ .data = &omap4_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
- struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
+ struct omap2_mcspi_platform_config *pdata;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
char wq_name[20];
+ u32 regs_offset = 0;
+ static int bus_num = 1;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1098,13 +1124,26 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- if (pdev->id != -1)
- master->bus_num = pdev->id;
-
master->setup = omap2_mcspi_setup;
master->transfer = omap2_mcspi_transfer;
master->cleanup = omap2_mcspi_cleanup;
- master->num_chipselect = pdata->num_cs;
+ master->dev.of_node = node;
+
+ match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ if (match) {
+ u32 num_cs = 1; /* default number of chipselect */
+ pdata = match->data;
+
+ of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+ master->num_chipselect = num_cs;
+ master->bus_num = bus_num++;
+ } else {
+ pdata = pdev->dev.platform_data;
+ master->num_chipselect = pdata->num_cs;
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+ }
+ regs_offset = pdata->regs_offset;
dev_set_drvdata(&pdev->dev, master);
@@ -1124,8 +1163,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
goto free_master;
}
- r->start += pdata->regs_offset;
- r->end += pdata->regs_offset;
+ r->start += regs_offset;
+ r->end += regs_offset;
mcspi->phys = r->start;
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
@@ -1285,7 +1324,8 @@ static struct platform_driver omap2_mcspi_driver = {
.driver = {
.name = "omap2_mcspi",
.owner = THIS_MODULE,
- .pm = &omap2_mcspi_pm_ops
+ .pm = &omap2_mcspi_pm_ops,
+ .of_match_table = omap_mcspi_of_match,
},
.remove = __exit_p(omap2_mcspi_remove),
};
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f37ad2271ad5..dc8485d1e883 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -29,7 +29,6 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
-#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -330,12 +329,13 @@ struct vendor_data {
* @clk: outgoing clock "SPICLK" for the SPI bus
* @master: SPI framework hookup
* @master_info: controller-specific data from machine setup
- * @workqueue: a workqueue on which any spi_message request is queued
- * @pump_messages: work struct for scheduling work to the workqueue
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
- * @busy: workqueue is busy
- * @running: workqueue is running
+ * @busy: message pump is busy
+ * @running: message pump is running
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
@@ -365,14 +365,7 @@ struct pl022 {
struct clk *clk;
struct spi_master *master;
struct pl022_ssp_controller *master_info;
- /* Driver message queue */
- struct workqueue_struct *workqueue;
- struct work_struct pump_messages;
- spinlock_t queue_lock;
- struct list_head queue;
- bool busy;
- bool running;
- /* Message transfer pump */
+ /* Message per-transfer pump */
struct tasklet_struct pump_transfers;
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
@@ -394,6 +387,7 @@ struct pl022 {
struct sg_table sgt_rx;
struct sg_table sgt_tx;
char *dummypage;
+ bool dma_running;
#endif
};
@@ -448,8 +442,6 @@ static void null_cs_control(u32 command)
static void giveback(struct pl022 *pl022)
{
struct spi_transfer *last_transfer;
- unsigned long flags;
- struct spi_message *msg;
pl022->next_msg_cs_active = false;
last_transfer = list_entry(pl022->cur_msg->transfers.prev,
@@ -477,15 +469,8 @@ static void giveback(struct pl022 *pl022)
* sent the current message could be unloaded, which
* could invalidate the cs_control() callback...
*/
-
/* get a pointer to the next message, if any */
- spin_lock_irqsave(&pl022->queue_lock, flags);
- if (list_empty(&pl022->queue))
- next_msg = NULL;
- else
- next_msg = list_entry(pl022->queue.next,
- struct spi_message, queue);
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ next_msg = spi_get_next_queued_message(pl022->master);
/*
* see if the next and current messages point
@@ -497,19 +482,13 @@ static void giveback(struct pl022 *pl022)
pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
else
pl022->next_msg_cs_active = true;
+
}
- spin_lock_irqsave(&pl022->queue_lock, flags);
- msg = pl022->cur_msg;
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
- queue_work(pl022->workqueue, &pl022->pump_messages);
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- msg->state = NULL;
- if (msg->complete)
- msg->complete(msg->context);
+ spi_finalize_current_message(pl022->master);
}
/**
@@ -1063,6 +1042,7 @@ static int configure_dma(struct pl022 *pl022)
dmaengine_submit(txdesc);
dma_async_issue_pending(rxchan);
dma_async_issue_pending(txchan);
+ pl022->dma_running = true;
return 0;
@@ -1141,11 +1121,12 @@ static void terminate_dma(struct pl022 *pl022)
dmaengine_terminate_all(rxchan);
dmaengine_terminate_all(txchan);
unmap_free_dma_scatter(pl022);
+ pl022->dma_running = false;
}
static void pl022_dma_remove(struct pl022 *pl022)
{
- if (pl022->busy)
+ if (pl022->dma_running)
terminate_dma(pl022);
if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel);
@@ -1493,73 +1474,20 @@ out:
return;
}
-/**
- * pump_messages - Workqueue function which processes spi message queue
- * @data: pointer to private data of SSP driver
- *
- * This function checks if there is any spi message in the queue that
- * needs processing and delegate control to appropriate function
- * do_polling_transfer()/do_interrupt_dma_transfer()
- * based on the kind of the transfer
- *
- */
-static void pump_messages(struct work_struct *work)
+static int pl022_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct pl022 *pl022 =
- container_of(work, struct pl022, pump_messages);
- unsigned long flags;
- bool was_busy = false;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&pl022->queue_lock, flags);
- if (list_empty(&pl022->queue) || !pl022->running) {
- if (pl022->busy) {
- /* nothing more to do - disable spi/ssp and power off */
- writew((readw(SSP_CR1(pl022->virtbase)) &
- (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
-
- if (pl022->master_info->autosuspend_delay > 0) {
- pm_runtime_mark_last_busy(&pl022->adev->dev);
- pm_runtime_put_autosuspend(&pl022->adev->dev);
- } else {
- pm_runtime_put(&pl022->adev->dev);
- }
- }
- pl022->busy = false;
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (pl022->cur_msg) {
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- return;
- }
- /* Extract head of queue */
- pl022->cur_msg =
- list_entry(pl022->queue.next, struct spi_message, queue);
-
- list_del_init(&pl022->cur_msg->queue);
- if (pl022->busy)
- was_busy = true;
- else
- pl022->busy = true;
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
+ struct pl022 *pl022 = spi_master_get_devdata(master);
/* Initial message state */
- pl022->cur_msg->state = STATE_START;
- pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
- struct spi_transfer, transfer_list);
+ pl022->cur_msg = msg;
+ msg->state = STATE_START;
+
+ pl022->cur_transfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
/* Setup the SPI using the per chip configuration */
- pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
- if (!was_busy)
- /*
- * We enable the core voltage and clocks here, then the clocks
- * and core will be disabled when this workqueue is run again
- * and there is no more work to be done.
- */
- pm_runtime_get_sync(&pl022->adev->dev);
+ pl022->cur_chip = spi_get_ctldata(msg->spi);
restore_state(pl022);
flush(pl022);
@@ -1568,95 +1496,37 @@ static void pump_messages(struct work_struct *work)
do_polling_transfer(pl022);
else
do_interrupt_dma_transfer(pl022);
-}
-
-static int __init init_queue(struct pl022 *pl022)
-{
- INIT_LIST_HEAD(&pl022->queue);
- spin_lock_init(&pl022->queue_lock);
-
- pl022->running = false;
- pl022->busy = false;
-
- tasklet_init(&pl022->pump_transfers, pump_transfers,
- (unsigned long)pl022);
-
- INIT_WORK(&pl022->pump_messages, pump_messages);
- pl022->workqueue = create_singlethread_workqueue(
- dev_name(pl022->master->dev.parent));
- if (pl022->workqueue == NULL)
- return -EBUSY;
return 0;
}
-static int start_queue(struct pl022 *pl022)
+static int pl022_prepare_transfer_hardware(struct spi_master *master)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pl022->queue_lock, flags);
-
- if (pl022->running || pl022->busy) {
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- return -EBUSY;
- }
-
- pl022->running = true;
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
- pl022->next_msg_cs_active = false;
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- queue_work(pl022->workqueue, &pl022->pump_messages);
+ struct pl022 *pl022 = spi_master_get_devdata(master);
+ /*
+ * Just make sure we have all we need to run the transfer by syncing
+ * with the runtime PM framework.
+ */
+ pm_runtime_get_sync(&pl022->adev->dev);
return 0;
}
-static int stop_queue(struct pl022 *pl022)
+static int pl022_unprepare_transfer_hardware(struct spi_master *master)
{
- unsigned long flags;
- unsigned limit = 500;
- int status = 0;
+ struct pl022 *pl022 = spi_master_get_devdata(master);
- spin_lock_irqsave(&pl022->queue_lock, flags);
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- /* This is a bit lame, but is optimized for the common execution path.
- * A wait_queue on the pl022->busy could be used, but then the common
- * execution path (pump_messages) would be required to call wake_up or
- * friends on every SPI message. Do this instead */
- while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- msleep(10);
- spin_lock_irqsave(&pl022->queue_lock, flags);
+ if (pl022->master_info->autosuspend_delay > 0) {
+ pm_runtime_mark_last_busy(&pl022->adev->dev);
+ pm_runtime_put_autosuspend(&pl022->adev->dev);
+ } else {
+ pm_runtime_put(&pl022->adev->dev);
}
- if (!list_empty(&pl022->queue) || pl022->busy)
- status = -EBUSY;
- else
- pl022->running = false;
-
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- return status;
-}
-
-static int destroy_queue(struct pl022 *pl022)
-{
- int status;
-
- status = stop_queue(pl022);
- /* we are unloading the module or failing to load (only two calls
- * to this routine), and neither call can handle a return value.
- * However, destroy_workqueue calls flush_workqueue, and that will
- * block until all work is done. If the reason that stop_queue
- * timed out is that the work will never finish, then it does no
- * good to call destroy_workqueue, so return anyway. */
- if (status != 0)
- return status;
-
- destroy_workqueue(pl022->workqueue);
-
return 0;
}
@@ -1776,38 +1646,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
return 0;
}
-/**
- * pl022_transfer - transfer function registered to SPI master framework
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
- *
- * This function is registered to the SPI framework for this SPI master
- * controller. It will queue the spi_message in the queue of driver if
- * the queue is not stopped and return.
- */
-static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
-{
- struct pl022 *pl022 = spi_master_get_devdata(spi->master);
- unsigned long flags;
-
- spin_lock_irqsave(&pl022->queue_lock, flags);
-
- if (!pl022->running) {
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- return -ESHUTDOWN;
- }
- msg->actual_length = 0;
- msg->status = -EINPROGRESS;
- msg->state = STATE_START;
-
- list_add_tail(&msg->queue, &pl022->queue);
- if (pl022->running && !pl022->busy)
- queue_work(pl022->workqueue, &pl022->pump_messages);
-
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
- return 0;
-}
-
static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
{
return rate / (cpsdvsr * (1 + scr));
@@ -2170,7 +2008,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = pl022_cleanup;
master->setup = pl022_setup;
- master->transfer = pl022_transfer;
+ master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
+ master->transfer_one_message = pl022_transfer_one_message;
+ master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
+ master->rt = platform_info->rt;
/*
* Supports mode 0-3, loopback, and active low CS. Transfers are
@@ -2214,6 +2055,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk_en;
}
+ /* Initialize transfer pump */
+ tasklet_init(&pl022->pump_transfers, pump_transfers,
+ (unsigned long)pl022);
+
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
@@ -2233,17 +2078,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
platform_info->enable_dma = 0;
}
- /* Initialize and start queue */
- status = init_queue(pl022);
- if (status != 0) {
- dev_err(&adev->dev, "probe - problem initializing queue\n");
- goto err_init_queue;
- }
- status = start_queue(pl022);
- if (status != 0) {
- dev_err(&adev->dev, "probe - problem starting queue\n");
- goto err_start_queue;
- }
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
status = spi_register_master(master);
@@ -2269,9 +2103,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_spi_register:
- err_start_queue:
- err_init_queue:
- destroy_queue(pl022);
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
@@ -2307,9 +2138,6 @@ pl022_remove(struct amba_device *adev)
*/
pm_runtime_get_noresume(&adev->dev);
- /* Remove the queue */
- if (destroy_queue(pl022) != 0)
- dev_err(&adev->dev, "queue remove failed\n");
load_ssp_default_config(pl022);
if (pl022->master_info->enable_dma)
pl022_dma_remove(pl022);
@@ -2331,12 +2159,12 @@ pl022_remove(struct amba_device *adev)
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- int status = 0;
+ int ret;
- status = stop_queue(pl022);
- if (status) {
- dev_warn(dev, "suspend cannot stop queue\n");
- return status;
+ ret = spi_master_suspend(pl022->master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
}
dev_dbg(dev, "suspended\n");
@@ -2346,16 +2174,16 @@ static int pl022_suspend(struct device *dev)
static int pl022_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- int status = 0;
+ int ret;
/* Start the queue running */
- status = start_queue(pl022);
- if (status)
- dev_err(dev, "problem starting queue (%d)\n", status);
+ ret = spi_master_resume(pl022->master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
else
dev_dbg(dev, "resumed\n");
- return status;
+ return ret;
}
#endif /* CONFIG_PM */
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 8caa07d58e69..3fb44afe27b4 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -151,7 +151,7 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
kfree(spi_info);
}
-static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
new file mode 100644
index 000000000000..354f170eab95
--- /dev/null
+++ b/drivers/spi/spi-rspi.c
@@ -0,0 +1,521 @@
+/*
+ * SH RSPI driver
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * Based on spi-sh.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+
+#define RSPI_SPCR 0x00
+#define RSPI_SSLP 0x01
+#define RSPI_SPPCR 0x02
+#define RSPI_SPSR 0x03
+#define RSPI_SPDR 0x04
+#define RSPI_SPSCR 0x08
+#define RSPI_SPSSR 0x09
+#define RSPI_SPBR 0x0a
+#define RSPI_SPDCR 0x0b
+#define RSPI_SPCKD 0x0c
+#define RSPI_SSLND 0x0d
+#define RSPI_SPND 0x0e
+#define RSPI_SPCR2 0x0f
+#define RSPI_SPCMD0 0x10
+#define RSPI_SPCMD1 0x12
+#define RSPI_SPCMD2 0x14
+#define RSPI_SPCMD3 0x16
+#define RSPI_SPCMD4 0x18
+#define RSPI_SPCMD5 0x1a
+#define RSPI_SPCMD6 0x1c
+#define RSPI_SPCMD7 0x1e
+
+/* SPCR */
+#define SPCR_SPRIE 0x80
+#define SPCR_SPE 0x40
+#define SPCR_SPTIE 0x20
+#define SPCR_SPEIE 0x10
+#define SPCR_MSTR 0x08
+#define SPCR_MODFEN 0x04
+#define SPCR_TXMD 0x02
+#define SPCR_SPMS 0x01
+
+/* SSLP */
+#define SSLP_SSL1P 0x02
+#define SSLP_SSL0P 0x01
+
+/* SPPCR */
+#define SPPCR_MOIFE 0x20
+#define SPPCR_MOIFV 0x10
+#define SPPCR_SPOM 0x04
+#define SPPCR_SPLP2 0x02
+#define SPPCR_SPLP 0x01
+
+/* SPSR */
+#define SPSR_SPRF 0x80
+#define SPSR_SPTEF 0x20
+#define SPSR_PERF 0x08
+#define SPSR_MODF 0x04
+#define SPSR_IDLNF 0x02
+#define SPSR_OVRF 0x01
+
+/* SPSCR */
+#define SPSCR_SPSLN_MASK 0x07
+
+/* SPSSR */
+#define SPSSR_SPECM_MASK 0x70
+#define SPSSR_SPCP_MASK 0x07
+
+/* SPDCR */
+#define SPDCR_SPLW 0x20
+#define SPDCR_SPRDTD 0x10
+#define SPDCR_SLSEL1 0x08
+#define SPDCR_SLSEL0 0x04
+#define SPDCR_SLSEL_MASK 0x0c
+#define SPDCR_SPFC1 0x02
+#define SPDCR_SPFC0 0x01
+
+/* SPCKD */
+#define SPCKD_SCKDL_MASK 0x07
+
+/* SSLND */
+#define SSLND_SLNDL_MASK 0x07
+
+/* SPND */
+#define SPND_SPNDL_MASK 0x07
+
+/* SPCR2 */
+#define SPCR2_PTE 0x08
+#define SPCR2_SPIE 0x04
+#define SPCR2_SPOE 0x02
+#define SPCR2_SPPE 0x01
+
+/* SPCMDn */
+#define SPCMD_SCKDEN 0x8000
+#define SPCMD_SLNDEN 0x4000
+#define SPCMD_SPNDEN 0x2000
+#define SPCMD_LSBF 0x1000
+#define SPCMD_SPB_MASK 0x0f00
+#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_20BIT 0x0000
+#define SPCMD_SPB_24BIT 0x0100
+#define SPCMD_SPB_32BIT 0x0200
+#define SPCMD_SSLKP 0x0080
+#define SPCMD_SSLA_MASK 0x0030
+#define SPCMD_BRDV_MASK 0x000c
+#define SPCMD_CPOL 0x0002
+#define SPCMD_CPHA 0x0001
+
+struct rspi_data {
+ void __iomem *addr;
+ u32 max_speed_hz;
+ struct spi_master *master;
+ struct list_head queue;
+ struct work_struct ws;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ struct clk *clk;
+ unsigned char spsr;
+};
+
+static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
+{
+ iowrite8(data, rspi->addr + offset);
+}
+
+static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
+{
+ iowrite16(data, rspi->addr + offset);
+}
+
+static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
+{
+ return ioread8(rspi->addr + offset);
+}
+
+static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
+{
+ return ioread16(rspi->addr + offset);
+}
+
+static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
+{
+ int tmp;
+ unsigned char spbr;
+
+ tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ spbr = clamp(tmp, 0, 255);
+
+ return spbr;
+}
+
+static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
+}
+
+static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
+}
+
+static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
+ u8 enable_bit)
+{
+ int ret;
+
+ rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ rspi_enable_irq(rspi, enable_bit);
+ ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
+ if (ret == 0 && !(rspi->spsr & wait_mask))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void rspi_assert_ssl(struct rspi_data *rspi)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+}
+
+static void rspi_negate_ssl(struct rspi_data *rspi)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+}
+
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
+ rspi_write8(rspi, 0x00, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
+
+ /* Sets number of frames to be used: 1 frame */
+ rspi_write8(rspi, 0x00, RSPI_SPDCR);
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
+ RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+
+ data = (u8 *)t->tx_buf;
+ while (remain > 0) {
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
+ RSPI_SPCR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rspi_write16(rspi, *data, RSPI_SPDR);
+ data++;
+ remain--;
+ }
+
+ /* Waiting for the last transmition */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+ unsigned char spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read16(rspi, RSPI_SPDR); /* dummy read */
+ if (spsr & SPSR_OVRF)
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
+ RSPI_SPCR);
+
+ data = (u8 *)t->rx_buf;
+ while (remain > 0) {
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
+ RSPI_SPCR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: tx empty timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* dummy write for generate clock */
+ rspi_write16(rspi, 0x00, RSPI_SPDR);
+
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev,
+ "%s: receive timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ /* SPDR allows 16 or 32-bit access only */
+ *data = (u8)rspi_read16(rspi, RSPI_SPDR);
+
+ data++;
+ remain--;
+ }
+
+ return 0;
+}
+
+static void rspi_work(struct work_struct *work)
+{
+ struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
+ struct spi_message *mesg;
+ struct spi_transfer *t;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&rspi->lock, flags);
+ while (!list_empty(&rspi->queue)) {
+ mesg = list_entry(rspi->queue.next, struct spi_message, queue);
+ list_del_init(&mesg->queue);
+ spin_unlock_irqrestore(&rspi->lock, flags);
+
+ rspi_assert_ssl(rspi);
+
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ if (t->tx_buf) {
+ ret = rspi_send_pio(rspi, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = rspi_receive_pio(rspi, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
+ }
+ rspi_negate_ssl(rspi);
+
+ mesg->status = 0;
+ mesg->complete(mesg->context);
+
+ spin_lock_irqsave(&rspi->lock, flags);
+ }
+
+ return;
+
+error:
+ mesg->status = ret;
+ mesg->complete(mesg->context);
+}
+
+static int rspi_setup(struct spi_device *spi)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(spi->master);
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+ rspi->max_speed_hz = spi->max_speed_hz;
+
+ rspi_set_config_register(rspi, 8);
+
+ return 0;
+}
+
+static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ mesg->actual_length = 0;
+ mesg->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&rspi->lock, flags);
+ list_add_tail(&mesg->queue, &rspi->queue);
+ schedule_work(&rspi->ws);
+ spin_unlock_irqrestore(&rspi->lock, flags);
+
+ return 0;
+}
+
+static void rspi_cleanup(struct spi_device *spi)
+{
+}
+
+static irqreturn_t rspi_irq(int irq, void *_sr)
+{
+ struct rspi_data *rspi = (struct rspi_data *)_sr;
+ unsigned long spsr;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned char disable_irq = 0;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ disable_irq |= SPCR_SPRIE;
+ if (spsr & SPSR_SPTEF)
+ disable_irq |= SPCR_SPTIE;
+
+ if (disable_irq) {
+ ret = IRQ_HANDLED;
+ rspi_disable_irq(rspi, disable_irq);
+ wake_up(&rspi->wait);
+ }
+
+ return ret;
+}
+
+static int __devexit rspi_remove(struct platform_device *pdev)
+{
+ struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
+
+ spi_unregister_master(rspi->master);
+ free_irq(platform_get_irq(pdev, 0), rspi);
+ clk_put(rspi->clk);
+ iounmap(rspi->addr);
+ spi_master_put(rspi->master);
+
+ return 0;
+}
+
+static int __devinit rspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct rspi_data *rspi;
+ int ret, irq;
+ char clk_name[16];
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ rspi = spi_master_get_devdata(master);
+ dev_set_drvdata(&pdev->dev, rspi);
+
+ rspi->master = master;
+ rspi->addr = ioremap(res->start, resource_size(res));
+ if (rspi->addr == NULL) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
+ rspi->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(rspi->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(rspi->clk);
+ goto error2;
+ }
+ clk_enable(rspi->clk);
+
+ INIT_LIST_HEAD(&rspi->queue);
+ spin_lock_init(&rspi->lock);
+ INIT_WORK(&rspi->ws, rspi_work);
+ init_waitqueue_head(&rspi->wait);
+
+ master->num_chipselect = 2;
+ master->bus_num = pdev->id;
+ master->setup = rspi_setup;
+ master->transfer = rspi_transfer;
+ master->cleanup = rspi_cleanup;
+
+ ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error3;
+ }
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto error4;
+ }
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return 0;
+
+error4:
+ free_irq(irq, rspi);
+error3:
+ clk_put(rspi->clk);
+error2:
+ iounmap(rspi->addr);
+error1:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver rspi_driver = {
+ .probe = rspi_probe,
+ .remove = __devexit_p(rspi_remove),
+ .driver = {
+ .name = "rspi",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(rspi_driver);
+
+MODULE_DESCRIPTION("Renesas RSPI bus driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:rspi");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index dcf7e1006426..972a94c58be3 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -20,10 +20,12 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <mach/dma.h>
@@ -126,8 +128,6 @@
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-#define SUSPND (1<<0)
-#define SPIBUSY (1<<1)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
@@ -142,10 +142,8 @@ struct s3c64xx_spi_dma_data {
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
* @master: Pointer to the SPI Protocol master.
- * @workqueue: Work queue for the SPI xfer requests.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
- * @work: Work
* @queue: To log SPI xfer requests.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
@@ -153,6 +151,7 @@ struct s3c64xx_spi_dma_data {
* @tx_dmach: Controller's DMA channel for Tx.
* @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers.
+ * @irq: interrupt
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
@@ -164,10 +163,8 @@ struct s3c64xx_spi_driver_data {
struct clk *src_clk;
struct platform_device *pdev;
struct spi_master *master;
- struct workqueue_struct *workqueue;
struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
- struct work_struct work;
struct list_head queue;
spinlock_t lock;
unsigned long sfr_start;
@@ -239,7 +236,7 @@ static void s3c64xx_spi_dmacb(void *data)
struct s3c64xx_spi_dma_data *dma = data;
unsigned long flags;
- if (dma->direction == DMA_FROM_DEVICE)
+ if (dma->direction == DMA_DEV_TO_MEM)
sdd = container_of(data,
struct s3c64xx_spi_driver_data, rx_dma);
else
@@ -248,7 +245,7 @@ static void s3c64xx_spi_dmacb(void *data)
spin_lock_irqsave(&sdd->lock, flags);
- if (dma->direction == DMA_FROM_DEVICE) {
+ if (dma->direction == DMA_DEV_TO_MEM) {
sdd->state &= ~RXBUSY;
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
@@ -267,7 +264,7 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct s3c64xx_spi_driver_data *sdd;
struct samsung_dma_prep_info info;
- if (dma->direction == DMA_FROM_DEVICE)
+ if (dma->direction == DMA_DEV_TO_MEM)
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
else
@@ -634,9 +631,10 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
}
}
-static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
+static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
@@ -766,73 +764,33 @@ out:
msg->status = status;
- if (msg->complete)
- msg->complete(msg->context);
+ spi_finalize_current_message(master);
+
+ return 0;
}
-static void s3c64xx_spi_work(struct work_struct *work)
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
- struct s3c64xx_spi_driver_data *sdd = container_of(work,
- struct s3c64xx_spi_driver_data, work);
- unsigned long flags;
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Acquire DMA channels */
while (!acquire_dma(sdd))
msleep(10);
- spin_lock_irqsave(&sdd->lock, flags);
-
- while (!list_empty(&sdd->queue)
- && !(sdd->state & SUSPND)) {
-
- struct spi_message *msg;
-
- msg = container_of(sdd->queue.next, struct spi_message, queue);
-
- list_del_init(&msg->queue);
-
- /* Set Xfer busy flag */
- sdd->state |= SPIBUSY;
-
- spin_unlock_irqrestore(&sdd->lock, flags);
-
- handle_msg(sdd, msg);
+ pm_runtime_get_sync(&sdd->pdev->dev);
- spin_lock_irqsave(&sdd->lock, flags);
-
- sdd->state &= ~SPIBUSY;
- }
+ return 0;
+}
- spin_unlock_irqrestore(&sdd->lock, flags);
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Free DMA channels */
sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
-}
-static int s3c64xx_spi_transfer(struct spi_device *spi,
- struct spi_message *msg)
-{
- struct s3c64xx_spi_driver_data *sdd;
- unsigned long flags;
-
- sdd = spi_master_get_devdata(spi->master);
-
- spin_lock_irqsave(&sdd->lock, flags);
-
- if (sdd->state & SUSPND) {
- spin_unlock_irqrestore(&sdd->lock, flags);
- return -ESHUTDOWN;
- }
-
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
-
- list_add_tail(&msg->queue, &sdd->queue);
-
- queue_work(sdd->workqueue, &sdd->work);
-
- spin_unlock_irqrestore(&sdd->lock, flags);
+ pm_runtime_put(&sdd->pdev->dev);
return 0;
}
@@ -872,13 +830,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
}
- if (sdd->state & SUSPND) {
- spin_unlock_irqrestore(&sdd->lock, flags);
- dev_err(&spi->dev,
- "setup: SPI-%d not active!\n", spi->master->bus_num);
- return -ESHUTDOWN;
- }
-
spin_unlock_irqrestore(&sdd->lock, flags);
if (spi->bits_per_word != 8
@@ -890,6 +841,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
goto setup_exit;
}
+ pm_runtime_get_sync(&sdd->pdev->dev);
+
/* Check if we can provide the requested rate */
if (!sci->clk_from_cmu) {
u32 psr, speed;
@@ -922,6 +875,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
err = -EINVAL;
}
+ pm_runtime_put(&sdd->pdev->dev);
+
setup_exit:
/* setup() returns with device de-selected */
@@ -930,6 +885,33 @@ setup_exit:
return err;
}
+static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd = data;
+ struct spi_master *spi = sdd->master;
+ unsigned int val;
+
+ val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
+
+ val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
+ S3C64XX_SPI_PND_TX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+
+ writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+
+ if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
+ dev_err(&spi->dev, "RX overrun\n");
+ if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
+ dev_err(&spi->dev, "RX underrun\n");
+ if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
+ dev_err(&spi->dev, "TX overrun\n");
+ if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
+ dev_err(&spi->dev, "TX underrun\n");
+
+ return IRQ_HANDLED;
+}
+
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
@@ -970,7 +952,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
struct spi_master *master;
- int ret;
+ int ret, irq;
char clk_name[16];
if (pdev->id < 0) {
@@ -1006,6 +988,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
master = spi_alloc_master(&pdev->dev,
sizeof(struct s3c64xx_spi_driver_data));
if (master == NULL) {
@@ -1021,15 +1009,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
sdd->tx_dma.dmach = dmatx_res->start;
- sdd->tx_dma.direction = DMA_TO_DEVICE;
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.dmach = dmarx_res->start;
- sdd->rx_dma.direction = DMA_FROM_DEVICE;
+ sdd->rx_dma.direction = DMA_DEV_TO_MEM;
sdd->cur_bpw = 8;
master->bus_num = pdev->id;
master->setup = s3c64xx_spi_setup;
- master->transfer = s3c64xx_spi_transfer;
+ master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
+ master->transfer_one_message = s3c64xx_spi_transfer_one_message;
+ master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
/* the spi->mode bits understood by this driver: */
@@ -1084,22 +1074,24 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err6;
}
- sdd->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (sdd->workqueue == NULL) {
- dev_err(&pdev->dev, "Unable to create workqueue\n");
- ret = -ENOMEM;
- goto err7;
- }
-
/* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd, pdev->id);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
- INIT_WORK(&sdd->work, s3c64xx_spi_work);
INIT_LIST_HEAD(&sdd->queue);
+ ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
+ irq, ret);
+ goto err7;
+ }
+
+ writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
+ S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
if (spi_register_master(master)) {
dev_err(&pdev->dev, "cannot register SPI master\n");
ret = -EBUSY;
@@ -1113,10 +1105,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
mem_res->end, mem_res->start,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
err8:
- destroy_workqueue(sdd->workqueue);
+ free_irq(irq, sdd);
err7:
clk_disable(sdd->src_clk);
err6:
@@ -1142,18 +1136,14 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct resource *mem_res;
- unsigned long flags;
-
- spin_lock_irqsave(&sdd->lock, flags);
- sdd->state |= SUSPND;
- spin_unlock_irqrestore(&sdd->lock, flags);
- while (sdd->state & SPIBUSY)
- msleep(10);
+ pm_runtime_disable(&pdev->dev);
spi_unregister_master(master);
- destroy_workqueue(sdd->workqueue);
+ writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
+
+ free_irq(platform_get_irq(pdev, 0), sdd);
clk_disable(sdd->src_clk);
clk_put(sdd->src_clk);
@@ -1174,18 +1164,12 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+static int s3c64xx_spi_suspend(struct device *dev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- unsigned long flags;
-
- spin_lock_irqsave(&sdd->lock, flags);
- sdd->state |= SUSPND;
- spin_unlock_irqrestore(&sdd->lock, flags);
- while (sdd->state & SPIBUSY)
- msleep(10);
+ spi_master_suspend(master);
/* Disable the clock */
clk_disable(sdd->src_clk);
@@ -1196,12 +1180,12 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int s3c64xx_spi_resume(struct platform_device *pdev)
+static int s3c64xx_spi_resume(struct device *dev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- unsigned long flags;
sci->cfg_gpio(pdev);
@@ -1211,25 +1195,49 @@ static int s3c64xx_spi_resume(struct platform_device *pdev)
s3c64xx_spi_hwinit(sdd, pdev->id);
- spin_lock_irqsave(&sdd->lock, flags);
- sdd->state &= ~SUSPND;
- spin_unlock_irqrestore(&sdd->lock, flags);
+ spi_master_resume(master);
return 0;
}
-#else
-#define s3c64xx_spi_suspend NULL
-#define s3c64xx_spi_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+static int s3c64xx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ clk_disable(sdd->clk);
+ clk_disable(sdd->src_clk);
+
+ return 0;
+}
+
+static int s3c64xx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ clk_enable(sdd->src_clk);
+ clk_enable(sdd->clk);
+
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops s3c64xx_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
+ SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
+ s3c64xx_spi_runtime_resume, NULL)
+};
+
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.owner = THIS_MODULE,
+ .pm = &s3c64xx_spi_pm,
},
.remove = s3c64xx_spi_remove,
- .suspend = s3c64xx_spi_suspend,
- .resume = s3c64xx_spi_resume,
};
MODULE_ALIAS("platform:s3c64xx-spi");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
new file mode 100644
index 000000000000..934138c7b3d3
--- /dev/null
+++ b/drivers/spi/spi-sh-hspi.c
@@ -0,0 +1,331 @@
+/*
+ * SuperH HSPI bus driver
+ *
+ * Copyright (C) 2011 Kuninori Morimoto
+ *
+ * Based on spi-sh.c:
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/sh_hspi.h>
+
+#define SPCR 0x00
+#define SPSR 0x04
+#define SPSCR 0x08
+#define SPTBR 0x0C
+#define SPRBR 0x10
+#define SPCR2 0x14
+
+/* SPSR */
+#define RXFL (1 << 2)
+
+#define hspi2info(h) (h->dev->platform_data)
+
+struct hspi_priv {
+ void __iomem *addr;
+ struct spi_master *master;
+ struct device *dev;
+ struct clk *clk;
+};
+
+/*
+ * basic function
+ */
+static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
+{
+ iowrite32(val, hspi->addr + reg);
+}
+
+static u32 hspi_read(struct hspi_priv *hspi, int reg)
+{
+ return ioread32(hspi->addr + reg);
+}
+
+/*
+ * transfer function
+ */
+static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
+{
+ int t = 256;
+
+ while (t--) {
+ if ((mask & hspi_read(hspi, SPSR)) == val)
+ return 0;
+
+ msleep(20);
+ }
+
+ dev_err(hspi->dev, "timeout\n");
+ return -ETIMEDOUT;
+}
+
+/*
+ * spi master function
+ */
+static int hspi_prepare_transfer(struct spi_master *master)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(hspi->dev);
+ return 0;
+}
+
+static int hspi_unprepare_transfer(struct spi_master *master)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(master);
+
+ pm_runtime_put_sync(hspi->dev);
+ return 0;
+}
+
+static void hspi_hw_setup(struct hspi_priv *hspi,
+ struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ struct spi_device *spi = msg->spi;
+ struct device *dev = hspi->dev;
+ u32 target_rate;
+ u32 spcr, idiv_clk;
+ u32 rate, best_rate, min, tmp;
+
+ target_rate = t ? t->speed_hz : 0;
+ if (!target_rate)
+ target_rate = spi->max_speed_hz;
+
+ /*
+ * find best IDIV/CLKCx settings
+ */
+ min = ~0;
+ best_rate = 0;
+ spcr = 0;
+ for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
+ rate = clk_get_rate(hspi->clk);
+
+ /* IDIV calculation */
+ if (idiv_clk & (1 << 5))
+ rate /= 128;
+ else
+ rate /= 16;
+
+ /* CLKCx calculation */
+ rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
+
+ /* save best settings */
+ tmp = abs(target_rate - rate);
+ if (tmp < min) {
+ min = tmp;
+ spcr = idiv_clk;
+ best_rate = rate;
+ }
+ }
+
+ if (spi->mode & SPI_CPHA)
+ spcr |= 1 << 7;
+ if (spi->mode & SPI_CPOL)
+ spcr |= 1 << 6;
+
+ dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
+
+ hspi_write(hspi, SPCR, spcr);
+ hspi_write(hspi, SPSR, 0x0);
+ hspi_write(hspi, SPSCR, 0x1); /* master mode */
+}
+
+static int hspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ u32 tx;
+ u32 rx;
+ int ret, i;
+
+ dev_dbg(hspi->dev, "%s\n", __func__);
+
+ ret = 0;
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ hspi_hw_setup(hspi, msg, t);
+
+ for (i = 0; i < t->len; i++) {
+
+ /* wait remains */
+ ret = hspi_status_check_timeout(hspi, 0x1, 0);
+ if (ret < 0)
+ break;
+
+ tx = 0;
+ if (t->tx_buf)
+ tx = (u32)((u8 *)t->tx_buf)[i];
+
+ hspi_write(hspi, SPTBR, tx);
+
+ /* wait recive */
+ ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
+ if (ret < 0)
+ break;
+
+ rx = hspi_read(hspi, SPRBR);
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = (u8)rx;
+
+ }
+
+ msg->actual_length += t->len;
+ }
+
+ msg->status = ret;
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static int hspi_setup(struct spi_device *spi)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
+ struct device *dev = hspi->dev;
+
+ if (8 != spi->bits_per_word) {
+ dev_err(dev, "bits_per_word should be 8\n");
+ return -EIO;
+ }
+
+ dev_dbg(dev, "%s setup\n", spi->modalias);
+
+ return 0;
+}
+
+static void hspi_cleanup(struct spi_device *spi)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
+ struct device *dev = hspi->dev;
+
+ dev_dbg(dev, "%s cleanup\n", spi->modalias);
+}
+
+static int __devinit hspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct hspi_priv *hspi;
+ struct clk *clk;
+ int ret;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ clk = clk_get(NULL, "shyway_clk");
+ if (!clk) {
+ dev_err(&pdev->dev, "shyway_clk is required\n");
+ ret = -EINVAL;
+ goto error0;
+ }
+
+ hspi = spi_master_get_devdata(master);
+ dev_set_drvdata(&pdev->dev, hspi);
+
+ /* init hspi */
+ hspi->master = master;
+ hspi->dev = &pdev->dev;
+ hspi->clk = clk;
+ hspi->addr = devm_ioremap(hspi->dev,
+ res->start, resource_size(res));
+ if (!hspi->addr) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ master->num_chipselect = 1;
+ master->bus_num = pdev->id;
+ master->setup = hspi_setup;
+ master->cleanup = hspi_cleanup;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->prepare_transfer_hardware = hspi_prepare_transfer;
+ master->transfer_one_message = hspi_transfer_one_message;
+ master->unprepare_transfer_hardware = hspi_unprepare_transfer;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto error2;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return 0;
+
+ error2:
+ devm_iounmap(hspi->dev, hspi->addr);
+ error1:
+ clk_put(clk);
+ error0:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int __devexit hspi_remove(struct platform_device *pdev)
+{
+ struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(hspi->clk);
+ spi_unregister_master(hspi->master);
+ devm_iounmap(hspi->dev, hspi->addr);
+
+ return 0;
+}
+
+static struct platform_driver hspi_driver = {
+ .probe = hspi_probe,
+ .remove = __devexit_p(hspi_remove),
+ .driver = {
+ .name = "sh-hspi",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(hspi_driver);
+
+MODULE_DESCRIPTION("SuperH HSPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 70c8af9f7ccc..79442c31bcd9 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -92,17 +92,26 @@ struct spi_sh_data {
unsigned long cr1;
wait_queue_head_t wait;
spinlock_t lock;
+ int width;
};
static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
unsigned long offset)
{
- writel(data, ss->addr + offset);
+ if (ss->width == 8)
+ iowrite8(data, ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ iowrite32(data, ss->addr + offset);
}
static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
{
- return readl(ss->addr + offset);
+ if (ss->width == 8)
+ return ioread8(ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ return ioread32(ss->addr + offset);
+ else
+ return 0;
}
static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
@@ -464,6 +473,18 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
ss = spi_master_get_devdata(master);
dev_set_drvdata(&pdev->dev, ss);
+ switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_8BIT:
+ ss->width = 8;
+ break;
+ case IORESOURCE_MEM_32BIT:
+ ss->width = 32;
+ break;
+ default:
+ dev_err(&pdev->dev, "No support width\n");
+ ret = -ENODEV;
+ goto error1;
+ }
ss->irq = irq;
ss->master = master;
ss->addr = ioremap(res->start, resource_size(res));
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
new file mode 100644
index 000000000000..52fe495bb32a
--- /dev/null
+++ b/drivers/spi/spi-sirf.c
@@ -0,0 +1,687 @@
+/*
+ * SPI bus driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/pinctrl/pinmux.h>
+
+#define DRIVER_NAME "sirfsoc_spi"
+
+#define SIRFSOC_SPI_CTRL 0x0000
+#define SIRFSOC_SPI_CMD 0x0004
+#define SIRFSOC_SPI_TX_RX_EN 0x0008
+#define SIRFSOC_SPI_INT_EN 0x000C
+#define SIRFSOC_SPI_INT_STATUS 0x0010
+#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
+#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
+#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
+#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
+#define SIRFSOC_SPI_TXFIFO_OP 0x0110
+#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
+#define SIRFSOC_SPI_TXFIFO_DATA 0x0118
+#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
+#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
+#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
+#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
+#define SIRFSOC_SPI_RXFIFO_OP 0x0130
+#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
+#define SIRFSOC_SPI_RXFIFO_DATA 0x0138
+#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
+
+/* SPI CTRL register defines */
+#define SIRFSOC_SPI_SLV_MODE BIT(16)
+#define SIRFSOC_SPI_CMD_MODE BIT(17)
+#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
+#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
+#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
+#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
+#define SIRFSOC_SPI_TRAN_MSB BIT(22)
+#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
+#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
+#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
+#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
+#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
+#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
+
+/* Interrupt Enable */
+#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
+#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
+#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
+#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
+#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
+#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
+#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
+#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
+#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
+#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
+#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
+
+#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
+
+/* Interrupt status */
+#define SIRFSOC_SPI_RX_DONE BIT(0)
+#define SIRFSOC_SPI_TX_DONE BIT(1)
+#define SIRFSOC_SPI_RX_OFLOW BIT(2)
+#define SIRFSOC_SPI_TX_UFLOW BIT(3)
+#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
+#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
+#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
+#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
+#define SIRFSOC_SPI_FRM_END BIT(10)
+
+/* TX RX enable */
+#define SIRFSOC_SPI_RX_EN BIT(0)
+#define SIRFSOC_SPI_TX_EN BIT(1)
+#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
+
+#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
+#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
+
+/* FIFO OPs */
+#define SIRFSOC_SPI_FIFO_RESET BIT(0)
+#define SIRFSOC_SPI_FIFO_START BIT(1)
+
+/* FIFO CTRL */
+#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
+#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
+#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
+
+/* FIFO Status */
+#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
+#define SIRFSOC_SPI_FIFO_FULL BIT(8)
+#define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
+
+/* 256 bytes rx/tx FIFO */
+#define SIRFSOC_SPI_FIFO_SIZE 256
+#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
+
+#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
+#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
+#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
+#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+
+struct sirfsoc_spi {
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ u32 ctrl_freq; /* SPI controller clock speed */
+ struct clk *clk;
+ struct pinmux *pmx;
+
+ /* rx & tx bufs from the spi_transfer */
+ const void *tx;
+ void *rx;
+
+ /* place received word into rx buffer */
+ void (*rx_word) (struct sirfsoc_spi *);
+ /* get word from tx buffer for sending */
+ void (*tx_word) (struct sirfsoc_spi *);
+
+ /* number of words left to be tranmitted/received */
+ unsigned int left_tx_cnt;
+ unsigned int left_rx_cnt;
+
+ /* tasklet to push tx msg into FIFO */
+ struct tasklet_struct tasklet_tx;
+
+ int chipselect[0];
+};
+
+static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u8 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u8) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_cnt--;
+}
+
+static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u8 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_cnt--;
+}
+
+static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u16 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u16) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_cnt--;
+}
+
+static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u16 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_cnt--;
+}
+
+static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u32 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u32) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_cnt--;
+
+}
+
+static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u32 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_cnt--;
+}
+
+static void spi_sirfsoc_tasklet_tx(unsigned long arg)
+{
+ struct sirfsoc_spi *sspi = (struct sirfsoc_spi *)arg;
+
+ /* Fill Tx FIFO while there are left words to be transmitted */
+ while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) &
+ SIRFSOC_SPI_FIFO_FULL)) &&
+ sspi->left_tx_cnt)
+ sspi->tx_word(sspi);
+}
+
+static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
+{
+ struct sirfsoc_spi *sspi = dev_id;
+ u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
+
+ writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
+
+ /* Error Conditions */
+ if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
+ spi_stat & SIRFSOC_SPI_TX_UFLOW) {
+ complete(&sspi->done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ }
+
+ if (spi_stat & SIRFSOC_SPI_FRM_END) {
+ while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
+ & SIRFSOC_SPI_FIFO_EMPTY)) &&
+ sspi->left_rx_cnt)
+ sspi->rx_word(sspi);
+
+ /* Received all words */
+ if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
+ complete(&sspi->done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ }
+ }
+
+ if (spi_stat & SIRFSOC_SPI_RXFIFO_THD_REACH ||
+ spi_stat & SIRFSOC_SPI_TXFIFO_THD_REACH ||
+ spi_stat & SIRFSOC_SPI_RX_FIFO_FULL ||
+ spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
+ tasklet_schedule(&sspi->tasklet_tx);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ int timeout = t->len * 10;
+ sspi = spi_master_get_devdata(spi->master);
+
+ sspi->tx = t->tx_buf;
+ sspi->rx = t->rx_buf;
+ sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
+ INIT_COMPLETION(sspi->done);
+
+ writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+
+ if (t->len == 1) {
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
+ SIRFSOC_SPI_ENA_AUTO_CLR,
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
+ SIRFSOC_SPI_MUL_DAT_MODE |
+ SIRFSOC_SPI_ENA_AUTO_CLR,
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ } else {
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ }
+
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+
+ /* Send the first word to trigger the whole tx/rx process */
+ sspi->tx_word(sspi);
+
+ writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
+ SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
+ SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
+ SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
+
+ if (wait_for_completion_timeout(&sspi->done, timeout) == 0)
+ dev_err(&spi->dev, "transfer timeout\n");
+
+ /* TX, RX FIFO stop */
+ writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+
+ return t->len - sspi->left_rx_cnt;
+}
+
+static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
+{
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
+
+ if (sspi->chipselect[spi->chip_select] == 0) {
+ u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ case BITBANG_CS_INACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ }
+ writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+ } else {
+ int gpio = sspi->chipselect[spi->chip_select];
+ gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+}
+
+static int
+spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ u8 bits_per_word = 0;
+ int hz = 0;
+ u32 regval;
+ u32 txfifo_ctrl, rxfifo_ctrl;
+ u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
+
+ sspi = spi_master_get_devdata(spi->master);
+
+ bits_per_word = t && t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word;
+ hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+
+ /* Enable IO mode for RX, TX */
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ regval = (sspi->ctrl_freq / (2 * hz)) - 1;
+
+ if (regval > 0xFFFF || regval < 0) {
+ dev_err(&spi->dev, "Speed %d not supported\n", hz);
+ return -EINVAL;
+ }
+
+ switch (bits_per_word) {
+ case 8:
+ regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
+ sspi->rx_word = spi_sirfsoc_rx_word_u8;
+ sspi->tx_word = spi_sirfsoc_tx_word_u8;
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_BYTE;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_BYTE;
+ break;
+ case 12:
+ case 16:
+ regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
+ SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
+ sspi->rx_word = spi_sirfsoc_rx_word_u16;
+ sspi->tx_word = spi_sirfsoc_tx_word_u16;
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_WORD;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_WORD;
+ break;
+ case 32:
+ regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
+ sspi->rx_word = spi_sirfsoc_rx_word_u32;
+ sspi->tx_word = spi_sirfsoc_tx_word_u32;
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_DWORD;
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ SIRFSOC_SPI_FIFO_WIDTH_DWORD;
+ break;
+ default:
+ dev_err(&spi->dev, "Bits per word %d not supported\n",
+ bits_per_word);
+ return -EINVAL;
+ }
+
+ if (!(spi->mode & SPI_CS_HIGH))
+ regval |= SIRFSOC_SPI_CS_IDLE_STAT;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ regval |= SIRFSOC_SPI_TRAN_MSB;
+ if (spi->mode & SPI_CPOL)
+ regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
+
+ /*
+ * Data should be driven at least 1/2 cycle before the fetch edge to make
+ * sure that data gets stable at the fetch edge.
+ */
+ if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
+ (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
+ regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
+ else
+ regval |= SIRFSOC_SPI_DRV_POS_EDGE;
+
+ writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
+ SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
+ SIRFSOC_SPI_FIFO_HC(2),
+ sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
+ writel(SIRFSOC_SPI_FIFO_SC(2) |
+ SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
+ SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
+ sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
+ writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
+ writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
+
+ writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+ return 0;
+}
+
+static int spi_sirfsoc_setup(struct spi_device *spi)
+{
+ struct sirfsoc_spi *sspi;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ sspi = spi_master_get_devdata(spi->master);
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ return spi_sirfsoc_setup_transfer(spi, NULL);
+}
+
+static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
+{
+ struct sirfsoc_spi *sspi;
+ struct spi_master *master;
+ struct resource *mem_res;
+ int num_cs, cs_gpio, irq;
+ int i;
+ int ret;
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "sirf,spi-num-chipselects", &num_cs);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get chip select number\n");
+ goto err_cs;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI master\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ master->num_chipselect = num_cs;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
+ if (cs_gpio < 0) {
+ dev_err(&pdev->dev, "can't get cs gpio from DT\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+
+ sspi->chipselect[i] = cs_gpio;
+ if (cs_gpio == 0)
+ continue; /* use cs from spi controller */
+
+ ret = gpio_request(cs_gpio, DRIVER_NAME);
+ if (ret) {
+ while (i > 0) {
+ i--;
+ if (sspi->chipselect[i] > 0)
+ gpio_free(sspi->chipselect[i]);
+ }
+ dev_err(&pdev->dev, "fail to request cs gpios\n");
+ goto free_master;
+ }
+ }
+
+ sspi->base = devm_request_and_ioremap(&pdev->dev, mem_res);
+ if (!sspi->base) {
+ dev_err(&pdev->dev, "IO remap failed!\n");
+ ret = -ENOMEM;
+ goto free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto free_master;
+ }
+ ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
+ DRIVER_NAME, sspi);
+ if (ret)
+ goto free_master;
+
+ sspi->bitbang.master = spi_master_get(master);
+ sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
+ sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
+ sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
+ sspi->bitbang.master->setup = spi_sirfsoc_setup;
+ master->bus_num = pdev->id;
+ sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ sspi->pmx = pinmux_get(&pdev->dev, NULL);
+ ret = IS_ERR(sspi->pmx);
+ if (ret)
+ goto free_master;
+
+ pinmux_enable(sspi->pmx);
+
+ sspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->clk)) {
+ ret = -EINVAL;
+ goto free_pmx;
+ }
+ clk_enable(sspi->clk);
+ sspi->ctrl_freq = clk_get_rate(sspi->clk);
+
+ init_completion(&sspi->done);
+
+ tasklet_init(&sspi->tasklet_tx, spi_sirfsoc_tasklet_tx,
+ (unsigned long)sspi);
+
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ /* We are not using dummy delay between command and data */
+ writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
+
+ ret = spi_bitbang_start(&sspi->bitbang);
+ if (ret)
+ goto free_clk;
+
+ dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
+
+ return 0;
+
+free_clk:
+ clk_disable(sspi->clk);
+ clk_put(sspi->clk);
+free_pmx:
+ pinmux_disable(sspi->pmx);
+ pinmux_put(sspi->pmx);
+free_master:
+ spi_master_put(master);
+err_cs:
+ return ret;
+}
+
+static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sirfsoc_spi *sspi;
+ int i;
+
+ master = platform_get_drvdata(pdev);
+ sspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&sspi->bitbang);
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (sspi->chipselect[i] > 0)
+ gpio_free(sspi->chipselect[i]);
+ }
+ clk_disable(sspi->clk);
+ clk_put(sspi->clk);
+ pinmux_disable(sspi->pmx);
+ pinmux_put(sspi->pmx);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_sirfsoc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable(sspi->clk);
+ return 0;
+}
+
+static int spi_sirfsoc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+
+ clk_enable(sspi->clk);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+
+ return 0;
+}
+
+static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
+ .suspend = spi_sirfsoc_suspend,
+ .resume = spi_sirfsoc_resume,
+};
+#endif
+
+static const struct of_device_id spi_sirfsoc_of_match[] = {
+ { .compatible = "sirf,prima2-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
+
+static struct platform_driver spi_sirfsoc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &spi_sirfsoc_pm_ops,
+#endif
+ .of_match_table = spi_sirfsoc_of_match,
+ },
+ .probe = spi_sirfsoc_probe,
+ .remove = __devexit_p(spi_sirfsoc_remove),
+};
+module_platform_driver(spi_sirfsoc_driver);
+
+MODULE_DESCRIPTION("SiRF SoC SPI master driver");
+MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
+ "Barry Song <Baohua.Song@csr.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 10182eb50068..5c6fa5ed3366 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -196,6 +196,7 @@ struct pch_spi_data {
struct pch_spi_dma_ctrl dma;
int use_dma;
u8 irq_reg_sts;
+ int save_total_len;
};
/**
@@ -216,7 +217,7 @@ struct pch_pd_dev_save {
struct pch_spi_board_data *board_dat;
};
-static struct pci_device_id pch_spi_pcidev_id[] = {
+static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
@@ -318,22 +319,23 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->tx_index = tx_index;
data->rx_index = rx_index;
- }
-
- /* if transfer complete interrupt */
- if (reg_spsr_val & SPSR_FI_BIT) {
- if ((tx_index == bpw_len) && (rx_index == tx_index)) {
- /* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
-
- /* transfer is completed;
- inform pch_spi_process_messages */
- data->transfer_complete = true;
- data->transfer_active = false;
- wake_up(&data->wait);
- } else {
- dev_err(&data->master->dev,
- "%s : Transfer is not completed", __func__);
+ /* if transfer complete interrupt */
+ if (reg_spsr_val & SPSR_FI_BIT) {
+ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ PCH_ALL);
+
+ /* transfer is completed;
+ inform pch_spi_process_messages */
+ data->transfer_complete = true;
+ data->transfer_active = false;
+ wake_up(&data->wait);
+ } else {
+ dev_err(&data->master->dev,
+ "%s : Transfer is not completed",
+ __func__);
+ }
}
}
}
@@ -822,11 +824,13 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
rx_dma_buf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_buf++ = *rx_dma_buf++ & 0xFF;
+ data->cur_trans->rx_buf = rx_buf;
} else {
rx_sbuf = data->cur_trans->rx_buf;
rx_dma_sbuf = data->dma.rx_buf_virt;
for (j = 0; j < data->bpw_len; j++)
*rx_sbuf++ = *rx_dma_sbuf++;
+ data->cur_trans->rx_buf = rx_sbuf;
}
}
@@ -852,6 +856,9 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
+ if (!rtn)
+ dev_err(&data->master->dev,
+ "%s wait-event timeout\n", __func__);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
@@ -923,7 +930,8 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
dma_cap_set(DMA_SLAVE, mask);
/* Get DMA's dev information */
- dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
+ dma_dev = pci_get_bus_and_slot(data->board_dat->pdev->bus->number,
+ PCI_DEVFN(12, 0));
/* Set Tx DMA */
param = &dma->param_tx;
@@ -987,6 +995,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
int i;
int size;
int rem;
+ int head;
unsigned long flags;
struct pch_spi_dma_ctrl *dma;
@@ -1015,6 +1024,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
data->bpw_len = data->cur_trans->len / (*bpw / 8);
+ if (data->bpw_len > PCH_BUF_SIZE) {
+ data->bpw_len = PCH_BUF_SIZE;
+ data->cur_trans->len -= PCH_BUF_SIZE;
+ }
+
/* copy Tx Data */
if (data->cur_trans->tx_buf != NULL) {
if (*bpw == 8) {
@@ -1029,10 +1043,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
*tx_dma_sbuf++ = *tx_sbuf++;
}
}
+
+ /* Calculate Rx parameter for DMA transmitting */
if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
- num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = PCH_DMA_TRANS_SIZE;
+ }
size = PCH_DMA_TRANS_SIZE;
- rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
} else {
num = 1;
size = data->bpw_len;
@@ -1092,15 +1113,23 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->nent = num;
dma->desc_rx = desc_rx;
- /* TX */
- if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
- num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ /* Calculate Tx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
+ head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
+ PCH_DMA_TRANS_SIZE - head;
+ }
size = PCH_DMA_TRANS_SIZE;
- rem = 16;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
+ head = 0;
}
dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
@@ -1110,11 +1139,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
for (i = 0; i < num; i++, sg++) {
if (i == 0) {
sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
+ sg->offset);
+ sg_dma_len(sg) = size + head;
+ } else if (i == (num - 1)) {
+ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
} else {
- sg->offset = rem + size * (i - 1);
+ sg->offset = head + size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
sg->offset);
@@ -1202,6 +1237,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->current_msg->spi->bits_per_word);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do {
+ int cnt;
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
@@ -1221,11 +1257,28 @@ static void pch_spi_process_messages(struct work_struct *pwork)
}
spin_unlock(&data->lock);
+ if (!data->cur_trans->len)
+ goto out;
+ cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
+ data->save_total_len = data->cur_trans->len;
if (data->use_dma) {
- pch_spi_handle_dma(data, &bpw);
- if (!pch_spi_start_transfer(data))
- goto out;
- pch_spi_copy_rx_data_for_dma(data, bpw);
+ int i;
+ char *save_rx_buf = data->cur_trans->rx_buf;
+ for (i = 0; i < cnt; i ++) {
+ pch_spi_handle_dma(data, &bpw);
+ if (!pch_spi_start_transfer(data)) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete
+ (data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ goto out;
+ }
+ pch_spi_copy_rx_data_for_dma(data, bpw);
+ }
+ data->cur_trans->rx_buf = save_rx_buf;
} else {
pch_spi_set_tx(data, &bpw);
pch_spi_set_ir(data);
@@ -1236,6 +1289,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->pkt_tx_buff = NULL;
}
/* increment message count */
+ data->cur_trans->len = data->save_total_len;
data->current_msg->actual_length += data->cur_trans->len;
dev_dbg(&data->master->dev,
@@ -1388,6 +1442,7 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b2ccdea30cb9..3d8f662e4fe9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -30,6 +30,9 @@
#include <linux/of_spi.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
static void spidev_release(struct device *dev)
{
@@ -481,7 +484,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
*/
-int __init
+int __devinit
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
struct boardinfo *bi;
@@ -507,6 +510,294 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and transfer each message.
+ *
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+ struct spi_master *master =
+ container_of(work, struct spi_master, pump_messages);
+ unsigned long flags;
+ bool was_busy = false;
+ int ret;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ if (list_empty(&master->queue) || !master->running) {
+ if (master->busy) {
+ ret = master->unprepare_transfer_hardware(master);
+ if (ret) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ dev_err(&master->dev,
+ "failed to unprepare transfer hardware\n");
+ return;
+ }
+ }
+ master->busy = false;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (master->cur_msg) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+ /* Extract head of queue */
+ master->cur_msg =
+ list_entry(master->queue.next, struct spi_message, queue);
+
+ list_del_init(&master->cur_msg->queue);
+ if (master->busy)
+ was_busy = true;
+ else
+ master->busy = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (!was_busy) {
+ ret = master->prepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare transfer hardware\n");
+ return;
+ }
+ }
+
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to transfer one message from queue\n");
+ return;
+ }
+}
+
+static int spi_init_queue(struct spi_master *master)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+
+ INIT_LIST_HEAD(&master->queue);
+ spin_lock_init(&master->queue_lock);
+
+ master->running = false;
+ master->busy = false;
+
+ init_kthread_worker(&master->kworker);
+ master->kworker_task = kthread_run(kthread_worker_fn,
+ &master->kworker,
+ dev_name(&master->dev));
+ if (IS_ERR(master->kworker_task)) {
+ dev_err(&master->dev, "failed to create message pump task\n");
+ return -ENOMEM;
+ }
+ init_kthread_work(&master->pump_messages, spi_pump_messages);
+
+ /*
+ * Master config will indicate if this controller should run the
+ * message pump with high (realtime) priority to reduce the transfer
+ * latency on the bus by minimising the delay between a transfer
+ * request and the scheduling of the message pump thread. Without this
+ * setting the message pump thread will remain at default priority.
+ */
+ if (master->rt) {
+ dev_info(&master->dev,
+ "will run message pump with realtime priority\n");
+ sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
+ }
+
+ return 0;
+}
+
+/**
+ * spi_get_next_queued_message() - called by driver to check for queued
+ * messages
+ * @master: the master to check for queued messages
+ *
+ * If there are more messages in the queue, the next message is returned from
+ * this call.
+ */
+struct spi_message *spi_get_next_queued_message(struct spi_master *master)
+{
+ struct spi_message *next;
+ unsigned long flags;
+
+ /* get a pointer to the next message, if any */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ if (list_empty(&master->queue))
+ next = NULL;
+ else
+ next = list_entry(master->queue.next,
+ struct spi_message, queue);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ return next;
+}
+EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+
+/**
+ * spi_finalize_current_message() - the current message is complete
+ * @master: the master to return the message to
+ *
+ * Called by the driver to notify the core that the message in the front of the
+ * queue is complete and can be removed from the queue.
+ */
+void spi_finalize_current_message(struct spi_master *master)
+{
+ struct spi_message *mesg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ mesg = master->cur_msg;
+ master->cur_msg = NULL;
+
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ mesg->state = NULL;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+
+static int spi_start_queue(struct spi_master *master)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (master->running || master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ master->running = true;
+ master->cur_msg = NULL;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ return 0;
+}
+
+static int spi_stop_queue(struct spi_master *master)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int ret = 0;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the master->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead.
+ */
+ while ((!list_empty(&master->queue) || master->busy) && limit--) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&master->queue_lock, flags);
+ }
+
+ if (!list_empty(&master->queue) || master->busy)
+ ret = -EBUSY;
+ else
+ master->running = false;
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (ret) {
+ dev_warn(&master->dev,
+ "could not stop message queue\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int spi_destroy_queue(struct spi_master *master)
+{
+ int ret;
+
+ ret = spi_stop_queue(master);
+
+ /*
+ * flush_kthread_worker will block until all work is done.
+ * If the reason that stop_queue timed out is that the work will never
+ * finish, then it does no good to call flush/stop thread, so
+ * return anyway.
+ */
+ if (ret) {
+ dev_err(&master->dev, "problem destroying queue\n");
+ return ret;
+ }
+
+ flush_kthread_worker(&master->kworker);
+ kthread_stop(master->kworker_task);
+
+ return 0;
+}
+
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (!master->running) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+
+ list_add_tail(&msg->queue, &master->queue);
+ if (master->running && !master->busy)
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return 0;
+}
+
+static int spi_master_initialize_queue(struct spi_master *master)
+{
+ int ret;
+
+ master->queued = true;
+ master->transfer = spi_queued_transfer;
+
+ /* Initialize and start queue */
+ ret = spi_init_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem initializing queue\n");
+ goto err_init_queue;
+ }
+ ret = spi_start_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ return 0;
+
+err_start_queue:
+err_init_queue:
+ spi_destroy_queue(master);
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -522,6 +813,7 @@ static struct class spi_master_class = {
};
+
/**
* spi_alloc_master - allocate SPI master controller
* @dev: the controller, possibly using the platform_bus
@@ -539,7 +831,8 @@ static struct class spi_master_class = {
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() to prevent a memory leak.
+ * adding the device) calling spi_master_put() and kfree() to prevent a memory
+ * leak.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
@@ -621,14 +914,23 @@ int spi_register_master(struct spi_master *master)
dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
dynamic ? " (dynamic)" : "");
+ /* If we're using a queued driver, start the queue */
+ if (master->transfer)
+ dev_info(dev, "master is unqueued, this is deprecated\n");
+ else {
+ status = spi_master_initialize_queue(master);
+ if (status) {
+ device_unregister(&master->dev);
+ goto done;
+ }
+ }
+
mutex_lock(&board_lock);
list_add_tail(&master->list, &spi_master_list);
list_for_each_entry(bi, &board_list, list)
spi_match_master_to_boardinfo(master, &bi->board_info);
mutex_unlock(&board_lock);
- status = 0;
-
/* Register devices from the device tree */
of_register_spi_devices(master);
done:
@@ -636,7 +938,6 @@ done:
}
EXPORT_SYMBOL_GPL(spi_register_master);
-
static int __unregister(struct device *dev, void *null)
{
spi_unregister_device(to_spi_device(dev));
@@ -657,6 +958,11 @@ void spi_unregister_master(struct spi_master *master)
{
int dummy;
+ if (master->queued) {
+ if (spi_destroy_queue(master))
+ dev_err(&master->dev, "queue remove failed\n");
+ }
+
mutex_lock(&board_lock);
list_del(&master->list);
mutex_unlock(&board_lock);
@@ -666,6 +972,37 @@ void spi_unregister_master(struct spi_master *master)
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
+int spi_master_suspend(struct spi_master *master)
+{
+ int ret;
+
+ /* Basically no-ops for non-queued masters */
+ if (!master->queued)
+ return 0;
+
+ ret = spi_stop_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue stop failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_suspend);
+
+int spi_master_resume(struct spi_master *master)
+{
+ int ret;
+
+ if (!master->queued)
+ return 0;
+
+ ret = spi_start_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue restart failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_resume);
+
static int __spi_master_match(struct device *dev, void *data)
{
struct spi_master *m;
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 572f637299c9..3672f40f3455 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -241,6 +241,8 @@ struct dma_chan;
* @autosuspend_delay: delay in ms following transfer completion before the
* runtime power management system suspends the device. A setting of 0
* indicates no delay and the device will be suspended immediately.
+ * @rt: indicates the controller should run the message pump with realtime
+ * priority to minimise the transfer latency on the bus.
*/
struct pl022_ssp_controller {
u16 bus_id;
@@ -250,6 +252,7 @@ struct pl022_ssp_controller {
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
+ bool rt;
};
/**
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
new file mode 100644
index 000000000000..a1121f872ac1
--- /dev/null
+++ b/include/linux/spi/sh_hspi.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2011 Kuninori Morimoto
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef SH_HSPI_H
+#define SH_HSPI_H
+
+struct sh_hspi_info {
+};
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 176fce9cc6b1..98679b061b63 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -235,6 +236,27 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* the device whose settings are being modified.
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
+ * @queued: whether this master is providing an internal message queue
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @cur_msg: the currently in-flight message
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_transfer_hardware: a message will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the transfer hardware
+ * by issuing this call
+ * @transfer_one_message: the subsystem calls the driver to transfer a single
+ * message while queuing transfers that arrive in the meantime. When the
+ * driver is finished with this message, it must call
+ * spi_finalize_current_message() so the subsystem can issue the next
+ * transfer
+ * @prepare_transfer_hardware: there are currently no more messages on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
*
* Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -318,6 +340,28 @@ struct spi_master {
/* called on release() to free memory provided by spi_master */
void (*cleanup)(struct spi_device *spi);
+
+ /*
+ * These hooks are for drivers that want to use the generic
+ * master transfer queueing mechanism. If these are used, the
+ * transfer() function above must NOT be specified by the driver.
+ * Over time we expect SPI drivers to be phased over to this API.
+ */
+ bool queued;
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+ struct kthread_work pump_messages;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct spi_message *cur_msg;
+ bool busy;
+ bool running;
+ bool rt;
+
+ int (*prepare_transfer_hardware)(struct spi_master *master);
+ int (*transfer_one_message)(struct spi_master *master,
+ struct spi_message *mesg);
+ int (*unprepare_transfer_hardware)(struct spi_master *master);
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -343,6 +387,13 @@ static inline void spi_master_put(struct spi_master *master)
put_device(&master->dev);
}
+/* PM calls that need to be issued by the driver */
+extern int spi_master_suspend(struct spi_master *master);
+extern int spi_master_resume(struct spi_master *master);
+
+/* Calls the driver make to interact with the message queue */
+extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
+extern void spi_finalize_current_message(struct spi_master *master);
/* the spi driver core manages memory for the spi_master classdev */
extern struct spi_master *
@@ -549,7 +600,7 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
+ ntrans * sizeof(struct spi_transfer),
flags);
if (m) {
- int i;
+ unsigned i;
struct spi_transfer *t = (struct spi_transfer *)(m + 1);
INIT_LIST_HEAD(&m->transfers);