summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig20
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/amd/Kconfig14
-rw-r--r--drivers/dma/amd/Makefile3
-rw-r--r--drivers/dma/amd/qdma/Makefile5
-rw-r--r--drivers/dma/amd/qdma/qdma-comm-regs.c64
-rw-r--r--drivers/dma/amd/qdma/qdma.c1143
-rw-r--r--drivers/dma/amd/qdma/qdma.h266
-rw-r--r--drivers/dma/at_hdmac.c6
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c26
-rw-r--r--drivers/dma/dw/core.c131
-rw-r--r--drivers/dma/dw/dw.c40
-rw-r--r--drivers/dma/dw/idma32.c19
-rw-r--r--drivers/dma/dw/platform.c20
-rw-r--r--drivers/dma/dw/regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h6
-rw-r--r--drivers/dma/fsl-edma-main.c27
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/idxd.h7
-rw-r--r--drivers/dma/idxd/init.c9
-rw-r--r--drivers/dma/idxd/perfmon.c102
-rw-r--r--drivers/dma/idxd/submit.c2
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/lgm/lgm-dma.c2
-rw-r--r--drivers/dma/loongson1-apb-dma.c660
-rw-r--r--drivers/dma/lpc32xx-dmamux.c195
-rw-r--r--drivers/dma/ls2x-apb-dma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c2
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/mv_xor_v2.c2
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/ppc4xx/dma.h2
-rw-r--r--drivers/dma/ptdma/ptdma.h2
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/gpi.c2
-rw-r--r--drivers/dma/qcom/qcom_adm.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40.h2
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/dma/stm32/stm32-dma3.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/dma/ti/k3-udma.h1
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c2
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c101
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c27
64 files changed, 2747 insertions, 271 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cc0a62c34861..d9ec1e69e428 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -369,6 +369,15 @@ config K3_DMA
Support the DMA engine for Hisilicon K3 platform
devices.
+config LOONGSON1_APB_DMA
+ tristate "Loongson1 APB DMA support"
+ depends on MACH_LOONGSON32 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the APB DMA controller in Loongson1 SoCs,
+ which is required by Loongson1 NAND and audio support.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -378,6 +387,15 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LPC32XX_DMAMUX
+ bool "NXP LPC32xx DMA MUX for PL080"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on OF && AMBA_PL08X
+ select MFD_SYSCON
+ help
+ Support for PL080 multiplexed DMA request lines on
+ LPC32XX platrofm.
+
config LS2X_APB_DMA
tristate "Loongson LS2X APB DMA support"
depends on LOONGARCH || COMPILE_TEST
@@ -716,6 +734,8 @@ config XILINX_ZYNQMP_DPDMA
display driver.
# driver files
+source "drivers/dma/amd/Kconfig"
+
source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 374ea98faf43..ad6a03c052ec 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -49,7 +49,9 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
@@ -83,6 +85,7 @@ obj-$(CONFIG_ST_FDMA) += st_fdma.o
obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-$(CONFIG_INTEL_LDMA) += lgm/
+obj-y += amd/
obj-y += mediatek/
obj-y += qcom/
obj-y += stm32/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5906eae26e2a..a58a1600dd65 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -112,7 +112,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
}
/**
- * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
+ * acpi_dma_parse_csrt - parse CSRT to extract additional DMA resources
* @adev: ACPI device to match with
* @adma: struct acpi_dma of the given DMA controller
*
@@ -305,7 +305,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
* found.
*
* Return:
- * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
+ * 0, if no information is available, -1 on mismatch, and 1 otherwise.
*/
static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
struct acpi_dma_spec *dma_spec)
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 0968176f323d..e6a6566b309e 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -153,7 +153,7 @@ struct msgdma_extended_desc {
/**
* struct msgdma_sw_desc - implements a sw descriptor
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @node: node to move from the free list to the tx list
* @tx_list: transmit list node
*/
@@ -511,7 +511,7 @@ static void msgdma_copy_one(struct msgdma_device *mdev,
* of the DMA controller. The descriptor will get flushed to the
* FIFO, once the last word (control word) is written. Since we
* are not 100% sure that memcpy() writes all word in the "correct"
- * oder (address from low to high) on all architectures, we make
+ * order (address from low to high) on all architectures, we make
* sure this control word is written last by single coding it and
* adding some write-barriers here.
*/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 73a5cfb4da8a..38cdbca59485 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2006 ARM Ltd.
* Copyright (c) 2010 ST-Ericsson SA
- * Copyirght (c) 2017 Linaro Ltd.
+ * Copyright (c) 2017 Linaro Ltd.
*
* Author: Peter Pearse <peter.pearse@arm.com>
* Author: Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
new file mode 100644
index 000000000000..7d1f51d69675
--- /dev/null
+++ b/drivers/dma/amd/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config AMD_QDMA
+ tristate "AMD Queue-based DMA"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for the AMD Queue-based DMA subsystem. The primary
+ mechanism to transfer data using the QDMA is for the QDMA engine to
+ operate on instructions (descriptors) provided by the host operating
+ system. Using the descriptors, the QDMA can move data in either the
+ Host to Card (H2C) direction or the Card to Host (C2H) direction.
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
new file mode 100644
index 000000000000..37212be9364f
--- /dev/null
+++ b/drivers/dma/amd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/qdma/Makefile b/drivers/dma/amd/qdma/Makefile
new file mode 100644
index 000000000000..011268fef377
--- /dev/null
+++ b/drivers/dma/amd/qdma/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_AMD_QDMA) += amd-qdma.o
+
+amd-qdma-$(CONFIG_AMD_QDMA) := qdma.o qdma-comm-regs.o
diff --git a/drivers/dma/amd/qdma/qdma-comm-regs.c b/drivers/dma/amd/qdma/qdma-comm-regs.c
new file mode 100644
index 000000000000..9162f9d367cc
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma-comm-regs.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_REGS_DEF_H
+#define __QDMA_REGS_DEF_H
+
+#include "qdma.h"
+
+const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX] = {
+ [QDMA_REGO_CTXT_DATA] = QDMA_REGO(0x804, 8),
+ [QDMA_REGO_CTXT_CMD] = QDMA_REGO(0x844, 1),
+ [QDMA_REGO_CTXT_MASK] = QDMA_REGO(0x824, 8),
+ [QDMA_REGO_MM_H2C_CTRL] = QDMA_REGO(0x1004, 1),
+ [QDMA_REGO_MM_C2H_CTRL] = QDMA_REGO(0x1204, 1),
+ [QDMA_REGO_QUEUE_COUNT] = QDMA_REGO(0x120, 1),
+ [QDMA_REGO_RING_SIZE] = QDMA_REGO(0x204, 1),
+ [QDMA_REGO_H2C_PIDX] = QDMA_REGO(0x18004, 1),
+ [QDMA_REGO_C2H_PIDX] = QDMA_REGO(0x18008, 1),
+ [QDMA_REGO_INTR_CIDX] = QDMA_REGO(0x18000, 1),
+ [QDMA_REGO_FUNC_ID] = QDMA_REGO(0x12c, 1),
+ [QDMA_REGO_ERR_INT] = QDMA_REGO(0xb04, 1),
+ [QDMA_REGO_ERR_STAT] = QDMA_REGO(0x248, 1),
+};
+
+const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX] = {
+ /* QDMA_REGO_CTXT_DATA fields */
+ [QDMA_REGF_IRQ_ENABLE] = QDMA_REGF(53, 53),
+ [QDMA_REGF_WBK_ENABLE] = QDMA_REGF(52, 52),
+ [QDMA_REGF_WBI_CHECK] = QDMA_REGF(34, 34),
+ [QDMA_REGF_IRQ_ARM] = QDMA_REGF(16, 16),
+ [QDMA_REGF_IRQ_VEC] = QDMA_REGF(138, 128),
+ [QDMA_REGF_IRQ_AGG] = QDMA_REGF(139, 139),
+ [QDMA_REGF_WBI_INTVL_ENABLE] = QDMA_REGF(35, 35),
+ [QDMA_REGF_MRKR_DISABLE] = QDMA_REGF(62, 62),
+ [QDMA_REGF_QUEUE_ENABLE] = QDMA_REGF(32, 32),
+ [QDMA_REGF_QUEUE_MODE] = QDMA_REGF(63, 63),
+ [QDMA_REGF_DESC_BASE] = QDMA_REGF(127, 64),
+ [QDMA_REGF_DESC_SIZE] = QDMA_REGF(49, 48),
+ [QDMA_REGF_RING_ID] = QDMA_REGF(47, 44),
+ [QDMA_REGF_QUEUE_BASE] = QDMA_REGF(11, 0),
+ [QDMA_REGF_QUEUE_MAX] = QDMA_REGF(44, 32),
+ [QDMA_REGF_FUNCTION_ID] = QDMA_REGF(24, 17),
+ [QDMA_REGF_INTR_AGG_BASE] = QDMA_REGF(66, 15),
+ [QDMA_REGF_INTR_VECTOR] = QDMA_REGF(11, 1),
+ [QDMA_REGF_INTR_SIZE] = QDMA_REGF(69, 67),
+ [QDMA_REGF_INTR_VALID] = QDMA_REGF(0, 0),
+ [QDMA_REGF_INTR_COLOR] = QDMA_REGF(14, 14),
+ [QDMA_REGF_INTR_FUNCTION_ID] = QDMA_REGF(125, 114),
+ /* QDMA_REGO_CTXT_CMD fields */
+ [QDMA_REGF_CMD_INDX] = QDMA_REGF(19, 7),
+ [QDMA_REGF_CMD_CMD] = QDMA_REGF(6, 5),
+ [QDMA_REGF_CMD_TYPE] = QDMA_REGF(4, 1),
+ [QDMA_REGF_CMD_BUSY] = QDMA_REGF(0, 0),
+ /* QDMA_REGO_QUEUE_COUNT fields */
+ [QDMA_REGF_QUEUE_COUNT] = QDMA_REGF(11, 0),
+ /* QDMA_REGO_ERR_INT fields */
+ [QDMA_REGF_ERR_INT_FUNC] = QDMA_REGF(11, 0),
+ [QDMA_REGF_ERR_INT_VEC] = QDMA_REGF(22, 12),
+ [QDMA_REGF_ERR_INT_ARM] = QDMA_REGF(24, 24),
+};
+
+#endif /* __QDMA_REGS_DEF_H */
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
new file mode 100644
index 000000000000..b0a1f3ad851b
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-map-ops.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_qdma.h>
+#include <linux/regmap.h>
+
+#include "qdma.h"
+
+#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
+#define QDMA_REG_OFF(d, r) ((d)->roffs[r].off)
+
+/* MMIO regmap config for all QDMA registers */
+static const struct regmap_config qdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline struct qdma_queue *to_qdma_queue(struct dma_chan *chan)
+{
+ return container_of(chan, struct qdma_queue, vchan.chan);
+}
+
+static inline struct qdma_mm_vdesc *to_qdma_vdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct qdma_mm_vdesc, vdesc);
+}
+
+static inline u32 qdma_get_intr_ring_idx(struct qdma_device *qdev)
+{
+ u32 idx;
+
+ idx = qdev->qintr_rings[qdev->qintr_ring_idx++].ridx;
+ qdev->qintr_ring_idx %= qdev->qintr_ring_num;
+
+ return idx;
+}
+
+static u64 qdma_get_field(const struct qdma_device *qdev, const u32 *data,
+ enum qdma_reg_fields field)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit, hi_bit;
+ u64 value = 0, mask;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+
+ if (low_pos == hi_pos) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, low_bit);
+ value = (data[low_pos] & mask) >> low_bit;
+ } else if (hi_pos == low_pos + 1) {
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+ hi_bit = low_bit + (f->msb - f->lsb);
+ value = ((u64)data[hi_pos] << BITS_PER_TYPE(*data)) |
+ data[low_pos];
+ mask = GENMASK_ULL(hi_bit, low_bit);
+ value = (value & mask) >> low_bit;
+ } else {
+ hi_bit = f->msb % BITS_PER_TYPE(*data);
+ mask = GENMASK(hi_bit, 0);
+ value = data[hi_pos] & mask;
+ low_bit = f->msb - f->lsb - hi_bit;
+ value <<= low_bit;
+ low_bit -= 32;
+ value |= (u64)data[hi_pos - 1] << low_bit;
+ mask = GENMASK(31, 32 - low_bit);
+ value |= (data[hi_pos - 2] & mask) >> low_bit;
+ }
+
+ return value;
+}
+
+static void qdma_set_field(const struct qdma_device *qdev, u32 *data,
+ enum qdma_reg_fields field, u64 value)
+{
+ const struct qdma_reg_field *f = &qdev->rfields[field];
+ u16 low_pos, hi_pos, low_bit;
+
+ low_pos = f->lsb / BITS_PER_TYPE(*data);
+ hi_pos = f->msb / BITS_PER_TYPE(*data);
+ low_bit = f->lsb % BITS_PER_TYPE(*data);
+
+ data[low_pos++] |= value << low_bit;
+ if (low_pos <= hi_pos)
+ data[low_pos++] |= (u32)(value >> (32 - low_bit));
+ if (low_pos <= hi_pos)
+ data[low_pos] |= (u32)(value >> (64 - low_bit));
+}
+
+static inline int qdma_reg_write(const struct qdma_device *qdev,
+ const u32 *data, enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_write(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_write(qdev->regmap, r->off, *data);
+
+ return ret;
+}
+
+static inline int qdma_reg_read(const struct qdma_device *qdev, u32 *data,
+ enum qdma_regs reg)
+{
+ const struct qdma_reg *r = &qdev->roffs[reg];
+ int ret;
+
+ if (r->count > 1)
+ ret = regmap_bulk_read(qdev->regmap, r->off, data, r->count);
+ else
+ ret = regmap_read(qdev->regmap, r->off, data);
+
+ return ret;
+}
+
+static int qdma_context_cmd_execute(const struct qdma_device *qdev,
+ enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index)
+{
+ u32 value = 0;
+ int ret;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_INDX, index);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_CMD, cmd);
+ qdma_set_field(qdev, &value, QDMA_REGF_CMD_TYPE, type);
+
+ ret = qdma_reg_write(qdev, &value, QDMA_REGO_CTXT_CMD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(qdev->regmap,
+ QDMA_REG_OFF(qdev, QDMA_REGO_CTXT_CMD),
+ value,
+ !qdma_get_field(qdev, &value,
+ QDMA_REGF_CMD_BUSY),
+ QDMA_POLL_INTRVL_US,
+ QDMA_POLL_TIMEOUT_US);
+ if (ret) {
+ qdma_err(qdev, "Context command execution timed out");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdma_context_write_data(const struct qdma_device *qdev,
+ const u32 *data)
+{
+ u32 mask[QDMA_CTXT_REGMAP_LEN];
+ int ret;
+
+ memset(mask, ~0, sizeof(mask));
+
+ ret = qdma_reg_write(qdev, mask, QDMA_REGO_CTXT_MASK);
+ if (ret)
+ return ret;
+
+ ret = qdma_reg_write(qdev, data, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qdma_prep_sw_desc_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_BASE, ctxt->desc_base);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_VEC, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_FUNCTION_ID, qdev->fid);
+
+ qdma_set_field(qdev, data, QDMA_REGF_DESC_SIZE, QDMA_DESC_SIZE_32B);
+ qdma_set_field(qdev, data, QDMA_REGF_RING_ID, QDMA_DEFAULT_RING_ID);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MODE, QDMA_QUEUE_OP_MM);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBK_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_CHECK, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_ARM, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_IRQ_AGG, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_WBI_INTVL_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_ENABLE, 1);
+ qdma_set_field(qdev, data, QDMA_REGF_MRKR_DISABLE, 1);
+}
+
+static void qdma_prep_intr_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_intr *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_AGG_BASE, ctxt->agg_base);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VECTOR, ctxt->vec);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_SIZE, ctxt->size);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_VALID, ctxt->valid);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_COLOR, ctxt->color);
+ qdma_set_field(qdev, data, QDMA_REGF_INTR_FUNCTION_ID, qdev->fid);
+}
+
+static void qdma_prep_fmap_context(const struct qdma_device *qdev,
+ const struct qdma_ctxt_fmap *ctxt,
+ u32 *data)
+{
+ memset(data, 0, QDMA_CTXT_REGMAP_LEN * sizeof(*data));
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_BASE, ctxt->qbase);
+ qdma_set_field(qdev, data, QDMA_REGF_QUEUE_MAX, ctxt->qmax);
+}
+
+/*
+ * Program the indirect context register space
+ *
+ * Once the queue is enabled, context is dynamically updated by hardware. Any
+ * modification of the context through this API when the queue is enabled can
+ * result in unexpected behavior. Reading the context when the queue is enabled
+ * is not recommended as it can result in reduced performance.
+ */
+static int qdma_prog_context(struct qdma_device *qdev, enum qdma_ctxt_type type,
+ enum qdma_ctxt_cmd cmd, u16 index, u32 *ctxt)
+{
+ int ret;
+
+ mutex_lock(&qdev->ctxt_lock);
+ if (cmd == QDMA_CTXT_WRITE) {
+ ret = qdma_context_write_data(qdev, ctxt);
+ if (ret)
+ goto failed;
+ }
+
+ ret = qdma_context_cmd_execute(qdev, type, cmd, index);
+ if (ret)
+ goto failed;
+
+ if (cmd == QDMA_CTXT_READ) {
+ ret = qdma_reg_read(qdev, ctxt, QDMA_REGO_CTXT_DATA);
+ if (ret)
+ goto failed;
+ }
+
+failed:
+ mutex_unlock(&qdev->ctxt_lock);
+
+ return ret;
+}
+
+static int qdma_check_queue_status(struct qdma_device *qdev,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 status, data[QDMA_CTXT_REGMAP_LEN] = {0};
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
+ if (ret)
+ return ret;
+
+ status = qdma_get_field(qdev, data, QDMA_REGF_QUEUE_ENABLE);
+ if (status) {
+ qdma_err(qdev, "queue %d already in use", qid);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qdma_clear_queue_context(const struct qdma_queue *queue)
+{
+ enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH, };
+ enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH, };
+ struct qdma_device *qdev = queue->qdev;
+ enum qdma_ctxt_type *type;
+ int ret, num, i;
+
+ if (queue->dir == DMA_MEM_TO_DEV) {
+ type = h2c_types;
+ num = ARRAY_SIZE(h2c_types);
+ } else {
+ type = c2h_types;
+ num = ARRAY_SIZE(c2h_types);
+ }
+ for (i = 0; i < num; i++) {
+ ret = qdma_prog_context(qdev, type[i], QDMA_CTXT_CLEAR,
+ queue->qid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed to clear ctxt %d", type[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_setup_fmap_context(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_ctxt_fmap fmap;
+ int ret;
+
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_CLEAR,
+ qdev->fid, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clearing context");
+ return ret;
+ }
+
+ fmap.qbase = 0;
+ fmap.qmax = qdev->chan_num * 2;
+ qdma_prep_fmap_context(qdev, &fmap, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_FMAP, QDMA_CTXT_WRITE,
+ qdev->fid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup fmap, ret %d", ret);
+
+ return ret;
+}
+
+static int qdma_setup_queue_context(struct qdma_device *qdev,
+ const struct qdma_ctxt_sw_desc *sw_desc,
+ enum dma_transfer_direction dir, u16 qid)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ enum qdma_ctxt_type type;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV)
+ type = QDMA_CTXT_DESC_SW_H2C;
+ else
+ type = QDMA_CTXT_DESC_SW_C2H;
+
+ qdma_prep_sw_desc_context(qdev, sw_desc, ctxt);
+ /* Setup SW descriptor context */
+ ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
+ if (ret)
+ qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
+
+ return ret;
+}
+
+/*
+ * Enable or disable memory-mapped DMA engines
+ * 1: enable, 0: disable
+ */
+static int qdma_sgdma_control(struct qdma_device *qdev, u32 ctrl)
+{
+ int ret;
+
+ ret = qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_H2C_CTRL);
+ ret |= qdma_reg_write(qdev, &ctrl, QDMA_REGO_MM_C2H_CTRL);
+
+ return ret;
+}
+
+static int qdma_get_hw_info(struct qdma_device *qdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
+ u32 value = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &value, QDMA_REGO_QUEUE_COUNT);
+ if (ret)
+ return ret;
+
+ value = qdma_get_field(qdev, &value, QDMA_REGF_QUEUE_COUNT) + 1;
+ if (pdata->max_mm_channels * 2 > value) {
+ qdma_err(qdev, "not enough hw queues %d", value);
+ return -EINVAL;
+ }
+ qdev->chan_num = pdata->max_mm_channels;
+
+ ret = qdma_reg_read(qdev, &qdev->fid, QDMA_REGO_FUNC_ID);
+ if (ret)
+ return ret;
+
+ qdma_info(qdev, "max channel %d, function id %d",
+ qdev->chan_num, qdev->fid);
+
+ return 0;
+}
+
+static inline int qdma_update_pidx(const struct qdma_queue *queue, u16 pidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->pidx_reg,
+ pidx | QDMA_QUEUE_ARM_BIT);
+}
+
+static inline int qdma_update_cidx(const struct qdma_queue *queue,
+ u16 ridx, u16 cidx)
+{
+ struct qdma_device *qdev = queue->qdev;
+
+ return regmap_write(qdev->regmap, queue->cidx_reg,
+ ((u32)ridx << 16) | cidx);
+}
+
+/**
+ * qdma_free_vdesc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void qdma_free_vdesc(struct virt_dma_desc *vdesc)
+{
+ struct qdma_mm_vdesc *vd = to_qdma_vdesc(vdesc);
+
+ kfree(vd);
+}
+
+static int qdma_alloc_queues(struct qdma_device *qdev,
+ enum dma_transfer_direction dir)
+{
+ struct qdma_queue *q, **queues;
+ u32 i, pidx_base;
+ int ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ queues = &qdev->h2c_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_H2C_PIDX);
+ } else {
+ queues = &qdev->c2h_queues;
+ pidx_base = QDMA_REG_OFF(qdev, QDMA_REGO_C2H_PIDX);
+ }
+
+ *queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
+ GFP_KERNEL);
+ if (!*queues)
+ return -ENOMEM;
+
+ for (i = 0; i < qdev->chan_num; i++) {
+ ret = qdma_check_queue_status(qdev, dir, i);
+ if (ret)
+ return ret;
+
+ q = &(*queues)[i];
+ q->ring_size = QDMA_DEFAULT_RING_SIZE;
+ q->idx_mask = q->ring_size - 2;
+ q->qdev = qdev;
+ q->dir = dir;
+ q->qid = i;
+ q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
+ q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
+ i * QDMA_DMAP_REG_STRIDE;
+ q->vchan.desc_free = qdma_free_vdesc;
+ vchan_init(&q->vchan, &qdev->dma_dev);
+ }
+
+ return 0;
+}
+
+static int qdma_device_verify(struct qdma_device *qdev)
+{
+ u32 value;
+ int ret;
+
+ ret = regmap_read(qdev->regmap, QDMA_IDENTIFIER_REGOFF, &value);
+ if (ret)
+ return ret;
+
+ value = FIELD_GET(QDMA_IDENTIFIER_MASK, value);
+ if (value != QDMA_IDENTIFIER) {
+ qdma_err(qdev, "Invalid identifier");
+ return -ENODEV;
+ }
+ qdev->rfields = qdma_regfs_default;
+ qdev->roffs = qdma_regos_default;
+
+ return 0;
+}
+
+static int qdma_device_setup(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
+ int ret = 0;
+
+ while (dev && get_dma_ops(dev))
+ dev = dev->parent;
+ if (!dev) {
+ qdma_err(qdev, "dma device not found");
+ return -EINVAL;
+ }
+ set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
+
+ ret = qdma_setup_fmap_context(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed setup fmap context");
+ return ret;
+ }
+
+ /* Setup global ring buffer size at QDMA_DEFAULT_RING_ID index */
+ ret = qdma_reg_write(qdev, &ring_sz, QDMA_REGO_RING_SIZE);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup ring %d of size %ld",
+ QDMA_DEFAULT_RING_ID, QDMA_DEFAULT_RING_SIZE);
+ return ret;
+ }
+
+ /* Enable memory-mapped DMA engine in both directions */
+ ret = qdma_sgdma_control(qdev, 1);
+ if (ret) {
+ qdma_err(qdev, "Failed to SGDMA with error %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc H2C queues, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdma_alloc_queues(qdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ qdma_err(qdev, "Failed to alloc C2H queues, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qdma_free_queue_resources() - Free queue resources
+ * @chan: DMA channel
+ */
+static void qdma_free_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct device *dev = qdev->dma_dev.dev;
+
+ qdma_clear_queue_context(queue);
+ vchan_free_chan_resources(&queue->vchan);
+ dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ queue->desc_base, queue->dma_desc_base);
+}
+
+/**
+ * qdma_alloc_queue_resources() - Allocate queue resources
+ * @chan: DMA channel
+ */
+static int qdma_alloc_queue_resources(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_device *qdev = queue->qdev;
+ struct qdma_ctxt_sw_desc desc;
+ size_t size;
+ int ret;
+
+ ret = qdma_clear_queue_context(queue);
+ if (ret)
+ return ret;
+
+ size = queue->ring_size * QDMA_MM_DESC_SIZE;
+ queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ &queue->dma_desc_base,
+ GFP_KERNEL);
+ if (!queue->desc_base) {
+ qdma_err(qdev, "Failed to allocate descriptor ring");
+ return -ENOMEM;
+ }
+
+ /* Setup SW descriptor queue context for DMA memory map */
+ desc.vec = qdma_get_intr_ring_idx(qdev);
+ desc.desc_base = queue->dma_desc_base;
+ ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
+ if (ret) {
+ qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
+ chan->name);
+ dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ queue->dma_desc_base);
+ return ret;
+ }
+
+ queue->pidx = 0;
+ queue->cidx = 0;
+
+ return 0;
+}
+
+static bool qdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ struct qdma_queue_info *info = param;
+
+ return info->dir == queue->dir;
+}
+
+static int qdma_xfer_start(struct qdma_queue *queue)
+{
+ struct qdma_device *qdev = queue->qdev;
+ int ret;
+
+ if (!vchan_next_desc(&queue->vchan))
+ return 0;
+
+ qdma_dbg(qdev, "Tnx kickoff with P: %d for %s%d",
+ queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
+
+ ret = qdma_update_pidx(queue, queue->issued_vdesc->pidx);
+ if (ret) {
+ qdma_err(qdev, "Failed to update PIDX to %d for %s queue: %d",
+ queue->pidx, CHAN_STR(queue), queue->qid);
+ }
+
+ return ret;
+}
+
+static void qdma_issue_pending(struct dma_chan *chan)
+{
+ struct qdma_queue *queue = to_qdma_queue(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->vchan.lock, flags);
+ if (vchan_issue_pending(&queue->vchan)) {
+ if (queue->submitted_vdesc) {
+ queue->issued_vdesc = queue->submitted_vdesc;
+ queue->submitted_vdesc = NULL;
+ }
+ qdma_xfer_start(queue);
+ }
+
+ spin_unlock_irqrestore(&queue->vchan.lock, flags);
+}
+
+static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
+{
+ struct qdma_mm_desc *desc;
+
+ if (((q->pidx + 1) & q->idx_mask) == q->cidx)
+ return NULL;
+
+ desc = q->desc_base + q->pidx;
+ q->pidx = (q->pidx + 1) & q->idx_mask;
+
+ return desc;
+}
+
+static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
+{
+ struct qdma_mm_desc *desc;
+ struct scatterlist *sg;
+ u64 addr, *src, *dst;
+ u32 rest, len;
+ int ret = 0;
+ u32 i;
+
+ if (!vdesc->sg_len)
+ return 0;
+
+ if (q->dir == DMA_MEM_TO_DEV) {
+ dst = &vdesc->dev_addr;
+ src = &addr;
+ } else {
+ dst = &addr;
+ src = &vdesc->dev_addr;
+ }
+
+ for_each_sg(vdesc->sgl, sg, vdesc->sg_len, i) {
+ addr = sg_dma_address(sg) + vdesc->sg_off;
+ rest = sg_dma_len(sg) - vdesc->sg_off;
+ while (rest) {
+ len = min_t(u32, rest, QDMA_MM_DESC_MAX_LEN);
+ desc = qdma_get_desc(q);
+ if (!desc) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+ desc->len = cpu_to_le32(len);
+
+ vdesc->dev_addr += len;
+ vdesc->sg_off += len;
+ vdesc->pending_descs++;
+ addr += len;
+ rest -= len;
+ }
+ vdesc->sg_off = 0;
+ }
+out:
+ vdesc->sg_len -= i;
+ vdesc->pidx = q->pidx;
+ return ret;
+}
+
+static void qdma_fill_pending_vdesc(struct qdma_queue *q)
+{
+ struct virt_dma_chan *vc = &q->vchan;
+ struct qdma_mm_vdesc *vdesc = NULL;
+ struct virt_dma_desc *vd;
+ int ret;
+
+ if (!list_empty(&vc->desc_issued)) {
+ vd = &q->issued_vdesc->vdesc;
+ list_for_each_entry_from(vd, &vc->desc_issued, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret) {
+ q->issued_vdesc = vdesc;
+ return;
+ }
+ }
+ q->issued_vdesc = vdesc;
+ }
+
+ if (list_empty(&vc->desc_submitted))
+ return;
+
+ if (q->submitted_vdesc)
+ vd = &q->submitted_vdesc->vdesc;
+ else
+ vd = list_first_entry(&vc->desc_submitted, typeof(*vd), node);
+
+ list_for_each_entry_from(vd, &vc->desc_submitted, node) {
+ vdesc = to_qdma_vdesc(vd);
+ ret = qdma_hw_enqueue(q, vdesc);
+ if (ret)
+ break;
+ }
+ q->submitted_vdesc = vdesc;
+}
+
+static dma_cookie_t qdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct qdma_queue *q = to_qdma_queue(&vc->chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ vd = container_of(tx, struct virt_dma_desc, tx);
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_move_tail(&vd->node, &vc->desc_submitted);
+ qdma_fill_pending_vdesc(q);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+qdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+ struct dma_async_tx_descriptor *tx;
+ struct qdma_mm_vdesc *vdesc;
+
+ vdesc = kzalloc(sizeof(*vdesc), GFP_NOWAIT);
+ if (!vdesc)
+ return NULL;
+ vdesc->sgl = sgl;
+ vdesc->sg_len = sg_len;
+ if (dir == DMA_MEM_TO_DEV)
+ vdesc->dev_addr = q->cfg.dst_addr;
+ else
+ vdesc->dev_addr = q->cfg.src_addr;
+
+ tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
+ tx->tx_submit = qdma_tx_submit;
+
+ return tx;
+}
+
+static int qdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct qdma_queue *q = to_qdma_queue(chan);
+
+ memcpy(&q->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int qdma_arm_err_intr(const struct qdma_device *qdev)
+{
+ u32 value = 0;
+
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_FUNC, qdev->fid);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_VEC, qdev->err_irq_idx);
+ qdma_set_field(qdev, &value, QDMA_REGF_ERR_INT_ARM, 1);
+
+ return qdma_reg_write(qdev, &value, QDMA_REGO_ERR_INT);
+}
+
+static irqreturn_t qdma_error_isr(int irq, void *data)
+{
+ struct qdma_device *qdev = data;
+ u32 err_stat = 0;
+ int ret;
+
+ ret = qdma_reg_read(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret) {
+ qdma_err(qdev, "read error state failed, ret %d", ret);
+ goto out;
+ }
+
+ qdma_err(qdev, "global error %d", err_stat);
+ ret = qdma_reg_write(qdev, &err_stat, QDMA_REGO_ERR_STAT);
+ if (ret)
+ qdma_err(qdev, "clear error state failed, ret %d", ret);
+
+out:
+ qdma_arm_err_intr(qdev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qdma_queue_isr(int irq, void *data)
+{
+ struct qdma_intr_ring *intr = data;
+ struct qdma_queue *q = NULL;
+ struct qdma_device *qdev;
+ u32 index, comp_desc;
+ u64 intr_ent;
+ u8 color;
+ int ret;
+ u16 qid;
+
+ qdev = intr->qdev;
+ index = intr->cidx;
+ while (1) {
+ struct virt_dma_desc *vd;
+ struct qdma_mm_vdesc *vdesc;
+ unsigned long flags;
+ u32 cidx;
+
+ intr_ent = le64_to_cpu(intr->base[index]);
+ color = FIELD_GET(QDMA_INTR_MASK_COLOR, intr_ent);
+ if (color != intr->color)
+ break;
+
+ qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
+ if (FIELD_GET(QDMA_INTR_MASK_TYPE, intr_ent))
+ q = qdev->c2h_queues;
+ else
+ q = qdev->h2c_queues;
+ q += qid;
+
+ cidx = FIELD_GET(QDMA_INTR_MASK_CIDX, intr_ent);
+
+ spin_lock_irqsave(&q->vchan.lock, flags);
+ comp_desc = (cidx - q->cidx) & q->idx_mask;
+
+ vd = vchan_next_desc(&q->vchan);
+ if (!vd)
+ goto skip;
+
+ vdesc = to_qdma_vdesc(vd);
+ while (comp_desc > vdesc->pending_descs) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ comp_desc -= vdesc->pending_descs;
+ vd = vchan_next_desc(&q->vchan);
+ vdesc = to_qdma_vdesc(vd);
+ }
+ vdesc->pending_descs -= comp_desc;
+ if (!vdesc->pending_descs && QDMA_VDESC_QUEUED(vdesc)) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ q->cidx = cidx;
+
+ qdma_fill_pending_vdesc(q);
+ qdma_xfer_start(q);
+
+skip:
+ spin_unlock_irqrestore(&q->vchan.lock, flags);
+
+ /*
+ * Wrap the index value and flip the expected color value if
+ * interrupt aggregation PIDX has wrapped around.
+ */
+ index++;
+ index &= QDMA_INTR_RING_IDX_MASK;
+ if (!index)
+ intr->color = !intr->color;
+ }
+
+ /*
+ * Update the software interrupt aggregation ring CIDX if a valid entry
+ * was found.
+ */
+ if (q) {
+ qdma_dbg(qdev, "update intr ring%d %d", intr->ridx, index);
+
+ /*
+ * Record the last read index of status descriptor from the
+ * interrupt aggregation ring.
+ */
+ intr->cidx = index;
+
+ ret = qdma_update_cidx(q, intr->ridx, index);
+ if (ret) {
+ qdma_err(qdev, "Failed to update IRQ CIDX");
+ return IRQ_NONE;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int qdma_init_error_irq(struct qdma_device *qdev)
+{
+ struct device *dev = &qdev->pdev->dev;
+ int ret;
+ u32 vec;
+
+ vec = qdev->queue_irq_start - 1;
+
+ ret = devm_request_threaded_irq(dev, vec, NULL, qdma_error_isr,
+ IRQF_ONESHOT, "amd-qdma-error", qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to request error IRQ vector: %d", vec);
+ return ret;
+ }
+
+ ret = qdma_arm_err_intr(qdev);
+ if (ret)
+ qdma_err(qdev, "Failed to arm err interrupt, ret %d", ret);
+
+ return ret;
+}
+
+static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
+{
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct device *dev = &qdev->pdev->dev;
+ struct qdma_intr_ring *ring;
+ struct qdma_ctxt_intr intr_ctxt;
+ u32 vector;
+ int ret, i;
+
+ qdev->qintr_ring_num = qdev->queue_irq_num;
+ qdev->qintr_rings = devm_kcalloc(dev, qdev->qintr_ring_num,
+ sizeof(*qdev->qintr_rings),
+ GFP_KERNEL);
+ if (!qdev->qintr_rings)
+ return -ENOMEM;
+
+ vector = qdev->queue_irq_start;
+ for (i = 0; i < qdev->qintr_ring_num; i++, vector++) {
+ ring = &qdev->qintr_rings[i];
+ ring->qdev = qdev;
+ ring->msix_id = qdev->err_irq_idx + i + 1;
+ ring->ridx = i;
+ ring->color = 1;
+ ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ &ring->dev_base, GFP_KERNEL);
+ if (!ring->base) {
+ qdma_err(qdev, "Failed to alloc intr ring %d", i);
+ return -ENOMEM;
+ }
+ intr_ctxt.agg_base = QDMA_INTR_RING_BASE(ring->dev_base);
+ intr_ctxt.size = (QDMA_INTR_RING_SIZE - 1) / 4096;
+ intr_ctxt.vec = ring->msix_id;
+ intr_ctxt.valid = true;
+ intr_ctxt.color = true;
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_CLEAR, ring->ridx, NULL);
+ if (ret) {
+ qdma_err(qdev, "Failed clear intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ qdma_prep_intr_context(qdev, &intr_ctxt, ctxt);
+ ret = qdma_prog_context(qdev, QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_WRITE, ring->ridx, ctxt);
+ if (ret) {
+ qdma_err(qdev, "Failed setup intr ctx, ret %d", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, vector, NULL,
+ qdma_queue_isr, IRQF_ONESHOT,
+ "amd-qdma-queue", ring);
+ if (ret) {
+ qdma_err(qdev, "Failed to request irq %d", vector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int qdma_intr_init(struct qdma_device *qdev)
+{
+ int ret;
+
+ ret = qdma_init_error_irq(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init error IRQs, ret %d", ret);
+ return ret;
+ }
+
+ ret = qdmam_alloc_qintr_rings(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to init queue IRQs, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void amd_qdma_remove(struct platform_device *pdev)
+{
+ struct qdma_device *qdev = platform_get_drvdata(pdev);
+
+ qdma_sgdma_control(qdev, 0);
+ dma_async_device_unregister(&qdev->dma_dev);
+
+ mutex_destroy(&qdev->ctxt_lock);
+}
+
+static int amd_qdma_probe(struct platform_device *pdev)
+{
+ struct qdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct qdma_device *qdev;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qdev);
+ qdev->pdev = pdev;
+ mutex_init(&qdev->ctxt_lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ qdma_err(qdev, "Failed to get IRQ resource");
+ ret = -ENODEV;
+ goto failed;
+ }
+ qdev->err_irq_idx = pdata->irq_index;
+ qdev->queue_irq_start = res->start + 1;
+ qdev->queue_irq_num = resource_size(res) - 1;
+
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ qdma_err(qdev, "Failed to map IO resource, err %d", ret);
+ goto failed;
+ }
+
+ qdev->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &qdma_regmap_config);
+ if (IS_ERR(qdev->regmap)) {
+ ret = PTR_ERR(qdev->regmap);
+ qdma_err(qdev, "Regmap init failed, err %d", ret);
+ goto failed;
+ }
+
+ ret = qdma_device_verify(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_get_hw_info(qdev);
+ if (ret)
+ goto failed;
+
+ INIT_LIST_HEAD(&qdev->dma_dev.channels);
+
+ ret = qdma_device_setup(qdev);
+ if (ret)
+ goto failed;
+
+ ret = qdma_intr_init(qdev);
+ if (ret) {
+ qdma_err(qdev, "Failed to initialize IRQs %d", ret);
+ goto failed_disable_engine;
+ }
+
+ dma_cap_set(DMA_SLAVE, qdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, qdev->dma_dev.cap_mask);
+
+ qdev->dma_dev.dev = &pdev->dev;
+ qdev->dma_dev.filter.map = pdata->device_map;
+ qdev->dma_dev.filter.mapcnt = qdev->chan_num * 2;
+ qdev->dma_dev.filter.fn = qdma_filter_fn;
+ qdev->dma_dev.device_alloc_chan_resources = qdma_alloc_queue_resources;
+ qdev->dma_dev.device_free_chan_resources = qdma_free_queue_resources;
+ qdev->dma_dev.device_prep_slave_sg = qdma_prep_device_sg;
+ qdev->dma_dev.device_config = qdma_device_config;
+ qdev->dma_dev.device_issue_pending = qdma_issue_pending;
+ qdev->dma_dev.device_tx_status = dma_cookie_status;
+ qdev->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&qdev->dma_dev);
+ if (ret) {
+ qdma_err(qdev, "Failed to register AMD QDMA: %d", ret);
+ goto failed_disable_engine;
+ }
+
+ return 0;
+
+failed_disable_engine:
+ qdma_sgdma_control(qdev, 0);
+failed:
+ mutex_destroy(&qdev->ctxt_lock);
+ qdma_err(qdev, "Failed to probe AMD QDMA driver");
+ return ret;
+}
+
+static struct platform_driver amd_qdma_driver = {
+ .driver = {
+ .name = "amd-qdma",
+ },
+ .probe = amd_qdma_probe,
+ .remove_new = amd_qdma_remove,
+};
+
+module_platform_driver(amd_qdma_driver);
+
+MODULE_DESCRIPTION("AMD QDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/amd/qdma/qdma.h b/drivers/dma/amd/qdma/qdma.h
new file mode 100644
index 000000000000..94089f1f0c11
--- /dev/null
+++ b/drivers/dma/amd/qdma/qdma.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * DMA header for AMD Queue-based DMA Subsystem
+ *
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __QDMA_H
+#define __QDMA_H
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../../virt-dma.h"
+
+#define DISABLE 0
+#define ENABLE 1
+
+#define QDMA_MIN_IRQ 3
+#define QDMA_INTR_NAME_MAX_LEN 30
+#define QDMA_INTR_PREFIX "amd-qdma"
+
+#define QDMA_IDENTIFIER 0x1FD3
+#define QDMA_DEFAULT_RING_SIZE (BIT(10) + 1)
+#define QDMA_DEFAULT_RING_ID 0
+#define QDMA_POLL_INTRVL_US 10 /* 10us */
+#define QDMA_POLL_TIMEOUT_US (500 * 1000) /* 500ms */
+#define QDMA_DMAP_REG_STRIDE 16
+#define QDMA_CTXT_REGMAP_LEN 8 /* 8 regs */
+#define QDMA_MM_DESC_SIZE 32 /* Bytes */
+#define QDMA_MM_DESC_LEN_BITS 28
+#define QDMA_MM_DESC_MAX_LEN (BIT(QDMA_MM_DESC_LEN_BITS) - 1)
+#define QDMA_MIN_DMA_ALLOC_SIZE 4096
+#define QDMA_INTR_RING_SIZE BIT(13)
+#define QDMA_INTR_RING_IDX_MASK GENMASK(9, 0)
+#define QDMA_INTR_RING_BASE(_addr) ((_addr) >> 12)
+
+#define QDMA_IDENTIFIER_REGOFF 0x0
+#define QDMA_IDENTIFIER_MASK GENMASK(31, 16)
+#define QDMA_QUEUE_ARM_BIT BIT(16)
+
+#define qdma_err(qdev, fmt, args...) \
+ dev_err(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_dbg(qdev, fmt, args...) \
+ dev_dbg(&(qdev)->pdev->dev, fmt, ##args)
+
+#define qdma_info(qdev, fmt, args...) \
+ dev_info(&(qdev)->pdev->dev, fmt, ##args)
+
+enum qdma_reg_fields {
+ QDMA_REGF_IRQ_ENABLE,
+ QDMA_REGF_WBK_ENABLE,
+ QDMA_REGF_WBI_CHECK,
+ QDMA_REGF_IRQ_ARM,
+ QDMA_REGF_IRQ_VEC,
+ QDMA_REGF_IRQ_AGG,
+ QDMA_REGF_WBI_INTVL_ENABLE,
+ QDMA_REGF_MRKR_DISABLE,
+ QDMA_REGF_QUEUE_ENABLE,
+ QDMA_REGF_QUEUE_MODE,
+ QDMA_REGF_DESC_BASE,
+ QDMA_REGF_DESC_SIZE,
+ QDMA_REGF_RING_ID,
+ QDMA_REGF_CMD_INDX,
+ QDMA_REGF_CMD_CMD,
+ QDMA_REGF_CMD_TYPE,
+ QDMA_REGF_CMD_BUSY,
+ QDMA_REGF_QUEUE_COUNT,
+ QDMA_REGF_QUEUE_MAX,
+ QDMA_REGF_QUEUE_BASE,
+ QDMA_REGF_FUNCTION_ID,
+ QDMA_REGF_INTR_AGG_BASE,
+ QDMA_REGF_INTR_VECTOR,
+ QDMA_REGF_INTR_SIZE,
+ QDMA_REGF_INTR_VALID,
+ QDMA_REGF_INTR_COLOR,
+ QDMA_REGF_INTR_FUNCTION_ID,
+ QDMA_REGF_ERR_INT_FUNC,
+ QDMA_REGF_ERR_INT_VEC,
+ QDMA_REGF_ERR_INT_ARM,
+ QDMA_REGF_MAX
+};
+
+enum qdma_regs {
+ QDMA_REGO_CTXT_DATA,
+ QDMA_REGO_CTXT_CMD,
+ QDMA_REGO_CTXT_MASK,
+ QDMA_REGO_MM_H2C_CTRL,
+ QDMA_REGO_MM_C2H_CTRL,
+ QDMA_REGO_QUEUE_COUNT,
+ QDMA_REGO_RING_SIZE,
+ QDMA_REGO_H2C_PIDX,
+ QDMA_REGO_C2H_PIDX,
+ QDMA_REGO_INTR_CIDX,
+ QDMA_REGO_FUNC_ID,
+ QDMA_REGO_ERR_INT,
+ QDMA_REGO_ERR_STAT,
+ QDMA_REGO_MAX
+};
+
+struct qdma_reg_field {
+ u16 lsb; /* Least significant bit of field */
+ u16 msb; /* Most significant bit of field */
+};
+
+struct qdma_reg {
+ u32 off;
+ u32 count;
+};
+
+#define QDMA_REGF(_msb, _lsb) { \
+ .lsb = (_lsb), \
+ .msb = (_msb), \
+}
+
+#define QDMA_REGO(_off, _count) { \
+ .off = (_off), \
+ .count = (_count), \
+}
+
+enum qdma_desc_size {
+ QDMA_DESC_SIZE_8B,
+ QDMA_DESC_SIZE_16B,
+ QDMA_DESC_SIZE_32B,
+ QDMA_DESC_SIZE_64B,
+};
+
+enum qdma_queue_op_mode {
+ QDMA_QUEUE_OP_STREAM,
+ QDMA_QUEUE_OP_MM,
+};
+
+enum qdma_ctxt_type {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_WRB,
+ QDMA_CTXT_PFTCH,
+ QDMA_CTXT_INTR_COAL,
+ QDMA_CTXT_RSVD,
+ QDMA_CTXT_HOST_PROFILE,
+ QDMA_CTXT_TIMER,
+ QDMA_CTXT_FMAP,
+ QDMA_CTXT_FNC_STS,
+};
+
+enum qdma_ctxt_cmd {
+ QDMA_CTXT_CLEAR,
+ QDMA_CTXT_WRITE,
+ QDMA_CTXT_READ,
+ QDMA_CTXT_INVALIDATE,
+ QDMA_CTXT_MAX
+};
+
+struct qdma_ctxt_sw_desc {
+ u64 desc_base;
+ u16 vec;
+};
+
+struct qdma_ctxt_intr {
+ u64 agg_base;
+ u16 vec;
+ u32 size;
+ bool valid;
+ bool color;
+};
+
+struct qdma_ctxt_fmap {
+ u16 qbase;
+ u16 qmax;
+};
+
+struct qdma_device;
+
+struct qdma_mm_desc {
+ __le64 src_addr;
+ __le32 len;
+ __le32 reserved1;
+ __le64 dst_addr;
+ __le64 reserved2;
+} __packed;
+
+struct qdma_mm_vdesc {
+ struct virt_dma_desc vdesc;
+ struct qdma_queue *queue;
+ struct scatterlist *sgl;
+ u64 sg_off;
+ u32 sg_len;
+ u64 dev_addr;
+ u32 pidx;
+ u32 pending_descs;
+ struct dma_slave_config cfg;
+};
+
+#define QDMA_VDESC_QUEUED(vdesc) (!(vdesc)->sg_len)
+
+struct qdma_queue {
+ struct qdma_device *qdev;
+ struct virt_dma_chan vchan;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ struct qdma_mm_desc *desc_base;
+ struct qdma_mm_vdesc *submitted_vdesc;
+ struct qdma_mm_vdesc *issued_vdesc;
+ dma_addr_t dma_desc_base;
+ u32 pidx_reg;
+ u32 cidx_reg;
+ u32 ring_size;
+ u32 idx_mask;
+ u16 qid;
+ u32 pidx;
+ u32 cidx;
+};
+
+struct qdma_intr_ring {
+ struct qdma_device *qdev;
+ __le64 *base;
+ dma_addr_t dev_base;
+ char msix_name[QDMA_INTR_NAME_MAX_LEN];
+ u32 msix_vector;
+ u16 msix_id;
+ u32 ring_size;
+ u16 ridx;
+ u16 cidx;
+ u8 color;
+};
+
+#define QDMA_INTR_MASK_PIDX GENMASK_ULL(15, 0)
+#define QDMA_INTR_MASK_CIDX GENMASK_ULL(31, 16)
+#define QDMA_INTR_MASK_DESC_COLOR GENMASK_ULL(32, 32)
+#define QDMA_INTR_MASK_STATE GENMASK_ULL(34, 33)
+#define QDMA_INTR_MASK_ERROR GENMASK_ULL(36, 35)
+#define QDMA_INTR_MASK_TYPE GENMASK_ULL(38, 38)
+#define QDMA_INTR_MASK_QID GENMASK_ULL(62, 39)
+#define QDMA_INTR_MASK_COLOR GENMASK_ULL(63, 63)
+
+struct qdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *regmap;
+ struct mutex ctxt_lock; /* protect ctxt registers */
+ const struct qdma_reg_field *rfields;
+ const struct qdma_reg *roffs;
+ struct qdma_queue *h2c_queues;
+ struct qdma_queue *c2h_queues;
+ struct qdma_intr_ring *qintr_rings;
+ u32 qintr_ring_num;
+ u32 qintr_ring_idx;
+ u32 chan_num;
+ u32 queue_irq_start;
+ u32 queue_irq_num;
+ u32 err_irq_idx;
+ u32 fid;
+};
+
+extern const struct qdma_reg qdma_regos_default[QDMA_REGO_MAX];
+extern const struct qdma_reg_field qdma_regfs_default[QDMA_REGF_MAX];
+
+#endif /* __QDMA_H */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 40052d1bd0b5..baebddc740b0 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -339,7 +339,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
* @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
- * @all_chan_mask: all channels availlable in a mask
+ * @all_chan_mask: all channels available in a mask
* @lli_pool: hw lli table
* @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
@@ -668,7 +668,7 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* CTRLA is read in turn, next the DSCR is read a second time. If the two
* consecutive read values of the DSCR are the same then we assume both refers
* to the very same LLI as well as the CTRLA value read inbetween does. For
- * cyclic tranfers, the assumption is that a full loop is "not so fast". If the
+ * cyclic transfers, the assumption is that a full loop is "not so fast". If the
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
@@ -700,7 +700,7 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
break;
/*
- * DSCR has changed inside the DMA controller, so the previouly
+ * DSCR has changed inside the DMA controller, so the previously
* read value of CTRLA may refer to an already processed
* descriptor hence could be outdated. We need to update ctrla
* to match the current descriptor.
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index fbaacb4c19b2..cfa6e1167a1f 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -15,7 +15,7 @@
* number of hardware rings over one or more SBA hardware devices. By
* design, the internal buffer size of SBA hardware device is limited
* but all offload operations supported by SBA can be broken down into
- * multiple small size requests and executed parallely on multiple SBA
+ * multiple small size requests and executed parallelly on multiple SBA
* hardware devices for achieving high through-put.
*
* The Broadcom SBA RAID driver does not require any register programming
@@ -135,7 +135,7 @@ struct sba_device {
u32 max_xor_srcs;
u32 max_resp_pool_size;
u32 max_cmds_pool_size;
- /* Maibox client and Mailbox channels */
+ /* Mailbox client and Mailbox channels */
struct mbox_client client;
struct mbox_chan *mchan;
struct device *mbox_dev;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 9d74fe97452e..e1b92b4d7b05 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -369,7 +369,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
/* the last frame requires extra flags */
d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
- /* detect a size missmatch */
+ /* detect a size mismatch */
if (buf_len && (d->size != buf_len))
goto error_cb;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c380a4dda77a..c1357d7f3dc6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1070,7 +1070,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
if (!name)
dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
else
- dev_set_name(&chan->dev->device, name);
+ dev_set_name(&chan->dev->device, "%s", name);
rc = device_register(&chan->dev->device);
if (rc)
goto err_out_ida;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 1f201a542b37..91b2fbc0b864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -500,7 +500,7 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
per_sec *= val;
per_sec = INT_TO_FIXPT(per_sec);
- do_div(per_sec, runtime);
+ do_div(per_sec, (u32)runtime);
return per_sec;
}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 10e8f0715114..e3f8db4fe909 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -17,8 +17,8 @@ enum dw_hdma_control {
DW_HDMA_V0_CB = BIT(0),
DW_HDMA_V0_TCB = BIT(1),
DW_HDMA_V0_LLP = BIT(2),
- DW_HDMA_V0_LIE = BIT(3),
- DW_HDMA_V0_RIE = BIT(4),
+ DW_HDMA_V0_LWIE = BIT(3),
+ DW_HDMA_V0_RWIE = BIT(4),
DW_HDMA_V0_CCS = BIT(8),
DW_HDMA_V0_LLE = BIT(9),
};
@@ -195,25 +195,14 @@ static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_chan *chan = chunk->chan;
u32 control = 0, i = 0;
- int j;
if (chunk->cb)
control = DW_HDMA_V0_CB;
- j = chunk->bursts_alloc;
- list_for_each_entry(child, &chunk->burst->list, list) {
- j--;
- if (!j) {
- control |= DW_HDMA_V0_LIE;
- if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
- control |= DW_HDMA_V0_RIE;
- }
-
+ list_for_each_entry(child, &chunk->burst->list, list)
dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
child->sar, child->dar);
- }
control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
if (!chunk->cb)
@@ -247,10 +236,11 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) {
/* Enable engine */
SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
- /* Interrupt enable&unmask - done, abort */
- tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
- HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ /* Interrupt unmask - stop, abort */
+ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
+ tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
+ /* Interrupt enable - stop, abort */
+ tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5f7d690e3dba..dd75f97a33b3 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -621,12 +622,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo, ctlhi;
- u8 m_master = dwc->dws.m_master;
- u8 lms = DWC_LLP_LMS(m_master);
+ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
dma_addr_t reg;
unsigned int reg_width;
unsigned int mem_width;
- unsigned int data_width = dw->pdata->data_width[m_master];
unsigned int i;
struct scatterlist *sg;
size_t total_len = 0;
@@ -660,7 +659,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = __ffs(data_width | mem | len);
+ mem_width = __ffs(sconfig->src_addr_width | mem | len);
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -720,7 +719,7 @@ slave_sg_fromdev_fill_desc:
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
lli_write(desc, ctlhi, ctlhi);
- mem_width = __ffs(data_width | mem);
+ mem_width = __ffs(sconfig->dst_addr_width | mem);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@@ -780,20 +779,108 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(dw_dma_filter);
-static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+static int dwc_verify_maxburst(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(chan->device);
- memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+ dwc->dma_sconfig.src_maxburst =
+ clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
+ dwc->dma_sconfig.dst_maxburst =
+ clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
dwc->dma_sconfig.src_maxburst =
- clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
+ rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
dwc->dma_sconfig.dst_maxburst =
- clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
+ rounddown_pow_of_two(dwc->dma_sconfig.dst_maxburst);
- dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
- dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+ return 0;
+}
+
+static int dwc_verify_p_buswidth(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ u32 reg_width, max_width;
+
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ reg_width = dwc->dma_sconfig.dst_addr_width;
+ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ reg_width = dwc->dma_sconfig.src_addr_width;
+ else /* DMA_MEM_TO_MEM */
+ return 0;
+
+ max_width = dw->pdata->data_width[dwc->dws.p_master];
+
+ /* Fall-back to 1-byte transfer width if undefined */
+ if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (!is_power_of_2(reg_width) || reg_width > max_width)
+ return -EINVAL;
+ else /* bus width is valid */
+ return 0;
+
+ /* Update undefined addr width value */
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ dwc->dma_sconfig.dst_addr_width = reg_width;
+ else /* DMA_DEV_TO_MEM */
+ dwc->dma_sconfig.src_addr_width = reg_width;
+
+ return 0;
+}
+
+static int dwc_verify_m_buswidth(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ u32 reg_width, reg_burst, mem_width;
+
+ mem_width = dw->pdata->data_width[dwc->dws.m_master];
+
+ /*
+ * It's possible to have a data portion locked in the DMA FIFO in case
+ * of the channel suspension. Subsequent channel disabling will cause
+ * that data silent loss. In order to prevent that maintain the src and
+ * dst transfer widths coherency by means of the relation:
+ * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
+ * Look for the details in the commit message that brings this change.
+ *
+ * Note the DMA configs utilized in the calculations below must have
+ * been verified to have correct values by this method call.
+ */
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
+ reg_width = dwc->dma_sconfig.dst_addr_width;
+ if (mem_width < reg_width)
+ return -EINVAL;
+
+ dwc->dma_sconfig.src_addr_width = mem_width;
+ } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
+ reg_width = dwc->dma_sconfig.src_addr_width;
+ reg_burst = dwc->dma_sconfig.src_maxburst;
+
+ dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
+ }
+
+ return 0;
+}
+
+static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ int ret;
+
+ memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
+
+ ret = dwc_verify_maxburst(chan);
+ if (ret)
+ return ret;
+
+ ret = dwc_verify_p_buswidth(chan);
+ if (ret)
+ return ret;
+
+ ret = dwc_verify_m_buswidth(chan);
+ if (ret)
+ return ret;
return 0;
}
@@ -1068,7 +1155,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
bool autocfg = false;
unsigned int dw_params;
unsigned int i;
- int err;
+ int ret;
dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
if (!dw->pdata)
@@ -1084,7 +1171,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
autocfg = dw_params >> DW_PARAMS_EN & 1;
if (!autocfg) {
- err = -EINVAL;
+ ret = -EINVAL;
goto err_pdata;
}
@@ -1104,7 +1191,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
- err = -EINVAL;
+ ret = -EINVAL;
goto err_pdata;
} else {
memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
@@ -1116,7 +1203,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
GFP_KERNEL);
if (!dw->chan) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_pdata;
}
@@ -1134,15 +1221,15 @@ int do_dma_probe(struct dw_dma_chip *chip)
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_pdata;
}
tasklet_setup(&dw->tasklet, dw_dma_tasklet);
- err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+ ret = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
dw->name, dw);
- if (err)
+ if (ret)
goto err_pdata;
INIT_LIST_HEAD(&dw->dma.channels);
@@ -1254,8 +1341,8 @@ int do_dma_probe(struct dw_dma_chip *chip)
*/
dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
- err = dma_async_device_register(&dw->dma);
- if (err)
+ ret = dma_async_device_register(&dw->dma);
+ if (ret)
goto err_dma_register;
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
@@ -1269,7 +1356,7 @@ err_dma_register:
free_irq(chip->irq, dw);
err_pdata:
pm_runtime_put_sync_suspend(chip->dev);
- return err;
+ return ret;
}
int do_dma_remove(struct dw_dma_chip *chip)
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
index a4862263ff14..6766142884b6 100644
--- a/drivers/dma/dw/dw.c
+++ b/drivers/dma/dw/dw.c
@@ -64,30 +64,39 @@ static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
return DWC_CTLH_BLOCK_TS(block) << width;
}
+static inline u8 dw_dma_encode_maxburst(u32 maxburst)
+{
+ /*
+ * Fix burst size according to dw_dmac. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ */
+ return maxburst > 1 ? fls(maxburst) - 2 : 0;
+}
+
static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
- u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
- u8 p_master = dwc->dws.p_master;
- u8 m_master = dwc->dws.m_master;
- u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
- u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
+ u8 smsize = 0, dmsize = 0;
+ u8 sms, dms;
+
+ if (dwc->direction == DMA_MEM_TO_DEV) {
+ sms = dwc->dws.m_master;
+ dms = dwc->dws.p_master;
+ dmsize = dw_dma_encode_maxburst(sconfig->dst_maxburst);
+ } else if (dwc->direction == DMA_DEV_TO_MEM) {
+ sms = dwc->dws.p_master;
+ dms = dwc->dws.m_master;
+ smsize = dw_dma_encode_maxburst(sconfig->src_maxburst);
+ } else /* DMA_MEM_TO_MEM */ {
+ sms = dwc->dws.m_master;
+ dms = dwc->dws.m_master;
+ }
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
}
-static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
-{
- /*
- * Fix burst size according to dw_dmac. We need to convert them as:
- * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
- */
- *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
-}
-
static void dw_dma_set_device_name(struct dw_dma *dw, int id)
{
snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id);
@@ -116,7 +125,6 @@ int dw_dma_probe(struct dw_dma_chip *chip)
dw->suspend_chan = dw_dma_suspend_chan;
dw->resume_chan = dw_dma_resume_chan;
dw->prepare_ctllo = dw_dma_prepare_ctllo;
- dw->encode_maxburst = dw_dma_encode_maxburst;
dw->bytes2block = dw_dma_bytes2block;
dw->block2bytes = dw_dma_block2bytes;
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index 58f4078d83fe..dac617c183e6 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -199,21 +199,25 @@ static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
return IDMA32C_CTLH_BLOCK_TS(block);
}
+static inline u8 idma32_encode_maxburst(u32 maxburst)
+{
+ return maxburst > 1 ? fls(maxburst) - 1 : 0;
+}
+
static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
- u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
+ u8 smsize = 0, dmsize = 0;
+
+ if (dwc->direction == DMA_MEM_TO_DEV)
+ dmsize = idma32_encode_maxburst(sconfig->dst_maxburst);
+ else if (dwc->direction == DMA_DEV_TO_MEM)
+ smsize = idma32_encode_maxburst(sconfig->src_maxburst);
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
}
-static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
-{
- *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
-}
-
static void idma32_set_device_name(struct dw_dma *dw, int id)
{
snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
@@ -270,7 +274,6 @@ int idma32_dma_probe(struct dw_dma_chip *chip)
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
- dw->encode_maxburst = idma32_encode_maxburst;
dw->bytes2block = idma32_bytes2block;
dw->block2bytes = idma32_block2bytes;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 7d9d4c951724..47c58ad468cb 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -29,7 +29,7 @@ static int dw_probe(struct platform_device *pdev)
struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
- int err;
+ int ret;
match = device_get_match_data(dev);
if (!match)
@@ -51,9 +51,9 @@ static int dw_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return err;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (!data->pdata)
data->pdata = dev_get_platdata(dev);
@@ -69,14 +69,14 @@ static int dw_probe(struct platform_device *pdev)
chip->clk = devm_clk_get_optional(chip->dev, "hclk");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
- err = clk_prepare_enable(chip->clk);
- if (err)
- return err;
+ ret = clk_prepare_enable(chip->clk);
+ if (ret)
+ return ret;
pm_runtime_enable(&pdev->dev);
- err = data->probe(chip);
- if (err)
+ ret = data->probe(chip);
+ if (ret)
goto err_dw_dma_probe;
platform_set_drvdata(pdev, data);
@@ -90,7 +90,7 @@ static int dw_probe(struct platform_device *pdev)
err_dw_dma_probe:
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
- return err;
+ return ret;
}
static void dw_remove(struct platform_device *pdev)
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 76654bd13c1a..5969d9cc8d7a 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -327,7 +327,6 @@ struct dw_dma {
void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
- void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
unsigned int width, size_t *len);
size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 8514b9ab8517..995427afe077 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -898,7 +898,7 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc = container_of(tx, struct ep93xx_dma_desc, txd);
/*
- * If nothing is currently prosessed, we push this descriptor
+ * If nothing is currently processed, we push this descriptor
* directly to the hardware. Otherwise we put the descriptor
* to the pending queue.
*/
@@ -1076,7 +1076,7 @@ fail:
* @chan: channel
* @sgl: list of buffers to transfer
* @sg_len: number of entries in @sgl
- * @dir: direction of tha DMA transfer
+ * @dir: direction of the DMA transfer
* @flags: flags for the descriptor
* @context: operation context (ignored)
*
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 2c80077cb7c0..36c284a3d184 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -12,8 +12,8 @@ struct dpaa2_qdma_sd_d {
u32 rsv:32;
union {
struct {
- u32 ssd:12; /* souce stride distance */
- u32 sss:12; /* souce stride size */
+ u32 ssd:12; /* source stride distance */
+ u32 sss:12; /* source stride size */
u32 rsv1:8;
} sdf;
struct {
@@ -48,7 +48,7 @@ struct dpaa2_qdma_sd_d {
#define QDMA_SER_DISABLE (8) /* no notification */
#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
-#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_SER_BOTH (3 << 8) /* source and dest notification */
#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index c66185c5a199..f9f1eda79254 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -100,6 +100,22 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
+static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
+ dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!");
+ return true;
+ }
+ }
+ return false;
+}
+
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -117,6 +133,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
+
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1]))
+ return NULL;
+
if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
@@ -153,7 +173,7 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
- mutex_lock(&fsl_edma->fsl_edma_mutex);
+ guard(mutex)(&fsl_edma->fsl_edma_mutex);
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
device_node) {
@@ -161,6 +181,8 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
continue;
fsl_chan = to_fsl_edma_chan(chan);
+ if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0]))
+ return NULL;
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
@@ -177,18 +199,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
if (!b_chmux && i == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
} else if (b_chmux && !fsl_chan->srcid) {
/* if controller support channel mux, choose a free channel */
chan = dma_get_slave_channel(chan);
chan->device->privatecnt++;
fsl_chan->srcid = dma_spec->args[0];
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
}
- mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL;
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 4c47bff81064..25a4134be36b 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -677,7 +677,7 @@ static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
writel_relaxed(tmp, addr);
/*
- * 0 - dma should process FLR whith CPU.
+ * 0 - dma should process FLR with CPU.
* 1 - dma not process FLR, only cpu process FLR.
*/
addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index e3505e56784b..3c648308a54a 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -290,7 +290,7 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
desc->length += hw->len;
} while (i);
- /* Trigger an interrupt after the last block is transfered */
+ /* Trigger an interrupt after the last block is transferred */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
/* Disable LLP transfer in the last block */
@@ -364,7 +364,7 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
if (!i)
return bytes;
- /* The current chunk is not fully transfered yet */
+ /* The current chunk is not fully transferred yet */
bytes += desc->hw[--i].len;
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 868b724a3b75..d84e21daa991 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -124,7 +124,6 @@ struct idxd_pmu {
struct pmu pmu;
char name[IDXD_NAME_SIZE];
- int cpu;
int n_counters;
int counter_width;
@@ -135,8 +134,6 @@ struct idxd_pmu {
unsigned long supported_filters;
int n_filters;
-
- struct hlist_node cpuhp_node;
};
#define IDXD_MAX_PRIORITY 0xf
@@ -803,14 +800,10 @@ void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
int perfmon_pmu_init(struct idxd_device *idxd);
void perfmon_pmu_remove(struct idxd_device *idxd);
void perfmon_counter_overflow(struct idxd_device *idxd);
-void perfmon_init(void);
-void perfmon_exit(void);
#else
static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
-static inline void perfmon_init(void) {}
-static inline void perfmon_exit(void) {}
#endif
/* debugfs */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 21f6905b554d..234c1c658ec7 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -69,9 +69,15 @@ static struct idxd_driver_data idxd_driver_data[] = {
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on GNR-D platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
+ /* DSA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
/* IAX ver 1.0 platforms */
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA on DMR platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -878,8 +884,6 @@ static int __init idxd_init_module(void)
else
support_enqcmd = true;
- perfmon_init();
-
err = idxd_driver_register(&idxd_drv);
if (err < 0)
goto err_idxd_driver_register;
@@ -928,7 +932,6 @@ static void __exit idxd_exit_module(void)
idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
- perfmon_exit();
idxd_remove_debugfs();
}
module_exit(idxd_exit_module);
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index 5e94247e1ea7..4b6af2f15d8a 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -6,29 +6,6 @@
#include "idxd.h"
#include "perfmon.h"
-static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-
-static cpumask_t perfmon_dsa_cpu_mask;
-static bool cpuhp_set_up;
-static enum cpuhp_state cpuhp_slot;
-
-/*
- * perf userspace reads this attribute to determine which cpus to open
- * counters on. It's connected to perfmon_dsa_cpu_mask, which is
- * maintained by the cpu hotplug handlers.
- */
-static DEVICE_ATTR_RO(cpumask);
-
-static struct attribute *perfmon_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static struct attribute_group cpumask_attr_group = {
- .attrs = perfmon_cpumask_attrs,
-};
-
/*
* These attributes specify the bits in the config word that the perf
* syscall uses to pass the event ids and categories to perfmon.
@@ -67,16 +44,9 @@ static struct attribute_group perfmon_format_attr_group = {
static const struct attribute_group *perfmon_attr_groups[] = {
&perfmon_format_attr_group,
- &cpumask_attr_group,
NULL,
};
-static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
-}
-
static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
{
return &idxd_pmu->pmu == event->pmu;
@@ -217,7 +187,6 @@ static int perfmon_pmu_event_init(struct perf_event *event)
return -EINVAL;
event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
- event->cpu = idxd->idxd_pmu->cpu;
event->hw.config = event->attr.config;
if (event->group_leader != event)
@@ -480,14 +449,15 @@ static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
- idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
- idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
+ idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable;
+ idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable;
idxd_pmu->pmu.add = perfmon_pmu_event_add;
idxd_pmu->pmu.del = perfmon_pmu_event_del;
idxd_pmu->pmu.start = perfmon_pmu_event_start;
idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
idxd_pmu->pmu.read = perfmon_pmu_event_update;
idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ idxd_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
idxd_pmu->pmu.module = THIS_MODULE;
}
@@ -496,47 +466,11 @@ void perfmon_pmu_remove(struct idxd_device *idxd)
if (!idxd->idxd_pmu)
return;
- cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
perf_pmu_unregister(&idxd->idxd_pmu->pmu);
kfree(idxd->idxd_pmu);
idxd->idxd_pmu = NULL;
}
-static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct idxd_pmu *idxd_pmu;
-
- idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
-
- /* select the first online CPU as the designated reader */
- if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
- cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
- idxd_pmu->cpu = cpu;
- }
-
- return 0;
-}
-
-static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
-{
- struct idxd_pmu *idxd_pmu;
- unsigned int target;
-
- idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
-
- if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
- return 0;
-
- target = cpumask_any_but(cpu_online_mask, cpu);
- /* migrate events if there is a valid target */
- if (target < nr_cpu_ids) {
- cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
- }
-
- return 0;
-}
-
int perfmon_pmu_init(struct idxd_device *idxd)
{
union idxd_perfcap perfcap;
@@ -544,12 +478,6 @@ int perfmon_pmu_init(struct idxd_device *idxd)
int rc = -ENODEV;
/*
- * perfmon module initialization failed, nothing to do
- */
- if (!cpuhp_set_up)
- return -ENODEV;
-
- /*
* If perfmon_offset or num_counters is 0, it means perfmon is
* not supported on this hardware.
*/
@@ -624,11 +552,6 @@ int perfmon_pmu_init(struct idxd_device *idxd)
if (rc)
goto free;
- rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
- if (rc) {
- perf_pmu_unregister(&idxd->idxd_pmu->pmu);
- goto free;
- }
out:
return rc;
free:
@@ -637,22 +560,3 @@ free:
goto out;
}
-
-void __init perfmon_init(void)
-{
- int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "driver/dma/idxd/perf:online",
- perf_event_cpu_online,
- perf_event_cpu_offline);
- if (WARN_ON(rc < 0))
- return;
-
- cpuhp_slot = rc;
- cpuhp_set_up = true;
-}
-
-void __exit perfmon_exit(void)
-{
- if (cpuhp_set_up)
- cpuhp_remove_multi_state(cpuhp_slot);
-}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 817a564413b0..94eca25ae9b9 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -134,7 +134,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
* completing the descriptor will return desc to allocator and
* the desc can be acquired by a different process and the
* desc->list can be modified. Delete desc from list so the
- * list trasversing does not get corrupted by the other process.
+ * list traversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ebf7c115d553..e913f0db99da 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -167,7 +167,6 @@ struct imxdma_channel {
enum imx_dma_type {
IMX1_DMA,
- IMX21_DMA,
IMX27_DMA,
};
@@ -195,8 +194,6 @@ static const struct of_device_id imx_dma_of_dev_id[] = {
{
.compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
}, {
- .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
- }, {
.compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
}, {
/* sentinel */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 7b502b60b38b..cc9ddd6c325b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -905,7 +905,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR_VAL;
- /* validate the sources with the destintation page */
+ /* validate the sources with the destination page */
for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
xor_val_srcs[i] = xor_srcs[i];
xor_val_srcs[i] = dest;
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 4117c7b67e9c..8173c3f1075a 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -107,7 +107,7 @@
* If header mode is set in DMA descriptor,
* If bit 30 is disabled, HDR_LEN must be configured according to channel
* requirement.
- * If bit 30 is enabled(checksum with heade mode), HDR_LEN has no need to
+ * If bit 30 is enabled(checksum with header mode), HDR_LEN has no need to
* be configured. It will enable check sum for switch
* If header mode is not set in DMA descriptor,
* This register setting doesn't matter
diff --git a/drivers/dma/loongson1-apb-dma.c b/drivers/dma/loongson1-apb-dma.c
new file mode 100644
index 000000000000..255fe7eca212
--- /dev/null
+++ b/drivers/dma/loongson1-apb-dma.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Loongson-1 APB DMA Controller
+ *
+ * Copyright (C) 2015-2024 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Loongson-1 DMA Control Register */
+#define LS1X_DMA_CTRL 0x0
+
+/* DMA Control Register Bits */
+#define LS1X_DMA_STOP BIT(4)
+#define LS1X_DMA_START BIT(3)
+#define LS1X_DMA_ASK_VALID BIT(2)
+
+/* DMA Next Field Bits */
+#define LS1X_DMA_NEXT_VALID BIT(0)
+
+/* DMA Command Field Bits */
+#define LS1X_DMA_RAM2DEV BIT(12)
+#define LS1X_DMA_INT BIT(1)
+#define LS1X_DMA_INT_MASK BIT(0)
+
+#define LS1X_DMA_LLI_ALIGNMENT 64
+#define LS1X_DMA_LLI_ADDR_MASK GENMASK(31, __ffs(LS1X_DMA_LLI_ALIGNMENT))
+#define LS1X_DMA_MAX_CHANNELS 3
+
+enum ls1x_dmadesc_offsets {
+ LS1X_DMADESC_NEXT = 0,
+ LS1X_DMADESC_SADDR,
+ LS1X_DMADESC_DADDR,
+ LS1X_DMADESC_LENGTH,
+ LS1X_DMADESC_STRIDE,
+ LS1X_DMADESC_CYCLES,
+ LS1X_DMADESC_CMD,
+ LS1X_DMADESC_SIZE
+};
+
+struct ls1x_dma_lli {
+ unsigned int hw[LS1X_DMADESC_SIZE];
+ dma_addr_t phys;
+ struct list_head node;
+} __aligned(LS1X_DMA_LLI_ALIGNMENT);
+
+struct ls1x_dma_desc {
+ struct virt_dma_desc vd;
+ struct list_head lli_list;
+};
+
+struct ls1x_dma_chan {
+ struct virt_dma_chan vc;
+ struct dma_pool *lli_pool;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ unsigned int bus_width;
+ void __iomem *reg_base;
+ int irq;
+ bool is_cyclic;
+ struct ls1x_dma_lli *curr_lli;
+};
+
+struct ls1x_dma {
+ struct dma_device ddev;
+ unsigned int nr_chans;
+ struct ls1x_dma_chan chan[];
+};
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data);
+
+#define to_ls1x_dma_chan(dchan) \
+ container_of(dchan, struct ls1x_dma_chan, vc.chan)
+
+#define to_ls1x_dma_desc(d) \
+ container_of(d, struct ls1x_dma_desc, vd)
+
+static inline struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline int ls1x_dma_query(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_ASK_VALID;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout_atomic(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_ASK_VALID), 0, 3000);
+ if (ret)
+ dev_err(chan2dev(dchan), "failed to query DMA\n");
+
+ return ret;
+}
+
+static inline int ls1x_dma_start(struct ls1x_dma_chan *chan,
+ dma_addr_t *lli_phys)
+{
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ int val, ret;
+
+ val = *lli_phys & LS1X_DMA_LLI_ADDR_MASK;
+ val |= LS1X_DMA_START;
+ val |= dchan->chan_id;
+ writel(val, chan->reg_base + LS1X_DMA_CTRL);
+ ret = readl_poll_timeout(chan->reg_base + LS1X_DMA_CTRL, val,
+ !(val & LS1X_DMA_START), 0, 1000);
+ if (!ret)
+ dev_dbg(dev, "start DMA with lli_phys=%pad\n", lli_phys);
+ else
+ dev_err(dev, "failed to start DMA\n");
+
+ return ret;
+}
+
+static inline void ls1x_dma_stop(struct ls1x_dma_chan *chan)
+{
+ int val = readl(chan->reg_base + LS1X_DMA_CTRL);
+
+ writel(val | LS1X_DMA_STOP, chan->reg_base + LS1X_DMA_CTRL);
+}
+
+static void ls1x_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+
+ dma_free_coherent(dev, sizeof(struct ls1x_dma_lli),
+ chan->curr_lli, chan->curr_lli->phys);
+ dma_pool_destroy(chan->lli_pool);
+ chan->lli_pool = NULL;
+ devm_free_irq(dev, chan->irq, chan);
+ vchan_free_chan_resources(&chan->vc);
+}
+
+static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct device *dev = chan2dev(dchan);
+ dma_addr_t phys;
+ int ret;
+
+ ret = devm_request_irq(dev, chan->irq, ls1x_dma_irq_handler,
+ IRQF_SHARED, dma_chan_name(dchan), chan);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", chan->irq);
+ return ret;
+ }
+
+ chan->lli_pool = dma_pool_create(dma_chan_name(dchan), dev,
+ sizeof(struct ls1x_dma_lli),
+ __alignof__(struct ls1x_dma_lli), 0);
+ if (!chan->lli_pool)
+ return -ENOMEM;
+
+ /* allocate memory for querying the current lli */
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ chan->curr_lli = dma_alloc_coherent(dev, sizeof(struct ls1x_dma_lli),
+ &phys, GFP_KERNEL);
+ if (!chan->curr_lli) {
+ dma_pool_destroy(chan->lli_pool);
+ return -ENOMEM;
+ }
+ chan->curr_lli->phys = phys;
+
+ return 0;
+}
+
+static void ls1x_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(vd->tx.chan);
+ struct ls1x_dma_lli *lli, *_lli;
+
+ list_for_each_entry_safe(lli, _lli, &desc->lli_list, node) {
+ list_del(&lli->node);
+ dma_pool_free(chan->lli_pool, lli, lli->phys);
+ }
+
+ kfree(desc);
+}
+
+static struct ls1x_dma_desc *ls1x_dma_alloc_desc(void)
+{
+ struct ls1x_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->lli_list);
+
+ return desc;
+}
+
+static int ls1x_dma_prep_lli(struct dma_chan *dchan, struct ls1x_dma_desc *desc,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, bool is_cyclic)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct ls1x_dma_lli *lli, *prev = NULL, *first = NULL;
+ struct device *dev = chan2dev(dchan);
+ struct list_head *pos = NULL;
+ struct scatterlist *sg;
+ unsigned int dev_addr, cmd, i;
+
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ dev_addr = chan->dst_addr;
+ chan->bus_width = chan->dst_addr_width;
+ cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
+ break;
+ case DMA_DEV_TO_MEM:
+ dev_addr = chan->src_addr;
+ chan->bus_width = chan->src_addr_width;
+ cmd = LS1X_DMA_INT;
+ break;
+ default:
+ dev_err(dev, "unsupported DMA direction: %s\n",
+ dmaengine_get_direction_text(dir));
+ return -EINVAL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t buf_addr = sg_dma_address(sg);
+ size_t buf_len = sg_dma_len(sg);
+ dma_addr_t phys;
+
+ if (!is_dma_copy_aligned(dchan->device, buf_addr, 0, buf_len)) {
+ dev_err(dev, "buffer is not aligned\n");
+ return -EINVAL;
+ }
+
+ /* allocate HW descriptors */
+ lli = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT, &phys);
+ if (!lli) {
+ dev_err(dev, "failed to alloc lli %u\n", i);
+ return -ENOMEM;
+ }
+
+ /* setup HW descriptors */
+ lli->phys = phys;
+ lli->hw[LS1X_DMADESC_SADDR] = buf_addr;
+ lli->hw[LS1X_DMADESC_DADDR] = dev_addr;
+ lli->hw[LS1X_DMADESC_LENGTH] = buf_len / chan->bus_width;
+ lli->hw[LS1X_DMADESC_STRIDE] = 0;
+ lli->hw[LS1X_DMADESC_CYCLES] = 1;
+ lli->hw[LS1X_DMADESC_CMD] = cmd;
+
+ if (prev)
+ prev->hw[LS1X_DMADESC_NEXT] =
+ lli->phys | LS1X_DMA_NEXT_VALID;
+ prev = lli;
+
+ if (!first)
+ first = lli;
+
+ list_add_tail(&lli->node, &desc->lli_list);
+ }
+
+ if (is_cyclic) {
+ lli->hw[LS1X_DMADESC_NEXT] = first->phys | LS1X_DMA_NEXT_VALID;
+ chan->is_cyclic = is_cyclic;
+ }
+
+ list_for_each(pos, &desc->lli_list) {
+ lli = list_entry(pos, struct ls1x_dma_lli, node);
+ print_hex_dump_debug("LLI: ", DUMP_PREFIX_OFFSET, 16, 4,
+ lli, sizeof(*lli), false);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct ls1x_dma_desc *desc;
+
+ dev_dbg(chan2dev(dchan), "sg_len=%u flags=0x%lx dir=%s\n",
+ sg_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ if (ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, false)) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+ls1x_dma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct ls1x_dma_desc *desc;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(chan2dev(dchan),
+ "buf_len=%zu period_len=%zu flags=0x%lx dir=%s\n",
+ buf_len, period_len, flags, dmaengine_get_direction_text(dir));
+
+ desc = ls1x_dma_alloc_desc();
+ if (!desc)
+ return NULL;
+
+ /* allocate the scatterlist */
+ sg_len = buf_len / period_len;
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, sg_len);
+ for (i = 0; i < sg_len; ++i) {
+ sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(buf_addr)),
+ period_len, offset_in_page(buf_addr));
+ sg_dma_address(&sgl[i]) = buf_addr;
+ sg_dma_len(&sgl[i]) = period_len;
+ buf_addr += period_len;
+ }
+
+ ret = ls1x_dma_prep_lli(dchan, desc, sgl, sg_len, dir, true);
+ kfree(sgl);
+ if (ret) {
+ ls1x_dma_free_desc(&desc->vd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(to_virt_chan(dchan), &desc->vd, flags);
+}
+
+static int ls1x_dma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ chan->src_addr = config->src_addr;
+ chan->src_addr_width = config->src_addr_width;
+ chan->dst_addr = config->dst_addr;
+ chan->dst_addr_width = config->dst_addr_width;
+
+ return 0;
+}
+
+static int ls1x_dma_pause(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ int ret;
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+ /* save the current lli */
+ ret = ls1x_dma_query(chan, &chan->curr_lli->phys);
+ if (!ret)
+ ls1x_dma_stop(chan);
+
+ return ret;
+}
+
+static int ls1x_dma_resume(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ return ls1x_dma_start(chan, &chan->curr_lli->phys);
+}
+
+static int ls1x_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ LIST_HEAD(head);
+
+ ls1x_dma_stop(chan);
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (vd)
+ vchan_terminate_vdesc(vd);
+
+ vchan_get_all_descriptors(&chan->vc, &head);
+ }
+
+ vchan_dma_desc_free_list(&chan->vc, &head);
+
+ return 0;
+}
+
+static void ls1x_dma_synchronize(struct dma_chan *dchan)
+{
+ vchan_synchronize(to_virt_chan(dchan));
+}
+
+static enum dma_status ls1x_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ size_t bytes = 0;
+
+ status = dma_cookie_status(dchan, cookie, state);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ scoped_guard(spinlock_irqsave, &chan->vc.lock) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+ dma_addr_t next_phys;
+
+ /* get the current lli */
+ if (ls1x_dma_query(chan, &chan->curr_lli->phys))
+ return status;
+
+ /* locate the current lli */
+ next_phys = chan->curr_lli->hw[LS1X_DMADESC_NEXT];
+ list_for_each_entry(lli, &desc->lli_list, node)
+ if (lli->hw[LS1X_DMADESC_NEXT] == next_phys)
+ break;
+
+ dev_dbg(chan2dev(dchan), "current lli_phys=%pad",
+ &lli->phys);
+
+ /* count the residues */
+ list_for_each_entry_from(lli, &desc->lli_list, node)
+ bytes += lli->hw[LS1X_DMADESC_LENGTH] *
+ chan->bus_width;
+ }
+ }
+
+ dma_set_residue(state, bytes);
+
+ return status;
+}
+
+static void ls1x_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
+
+ guard(spinlock_irqsave)(&chan->vc.lock);
+
+ if (vchan_issue_pending(&chan->vc)) {
+ struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
+
+ if (vd) {
+ struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vd);
+ struct ls1x_dma_lli *lli;
+
+ lli = list_first_entry(&desc->lli_list,
+ struct ls1x_dma_lli, node);
+ ls1x_dma_start(chan, &lli->phys);
+ }
+ }
+}
+
+static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
+{
+ struct ls1x_dma_chan *chan = data;
+ struct dma_chan *dchan = &chan->vc.chan;
+ struct device *dev = chan2dev(dchan);
+ struct virt_dma_desc *vd;
+
+ scoped_guard(spinlock, &chan->vc.lock) {
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ dev_warn(dev,
+ "IRQ %d with no active desc on channel %d\n",
+ irq, dchan->chan_id);
+ return IRQ_NONE;
+ }
+
+ if (chan->is_cyclic) {
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ }
+
+ dev_dbg(dev, "DMA IRQ %d on channel %d\n", irq, dchan->chan_id);
+
+ return IRQ_HANDLED;
+}
+
+static int ls1x_dma_chan_probe(struct platform_device *pdev,
+ struct ls1x_dma *dma)
+{
+ void __iomem *reg_base;
+ int id;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+ char pdev_irqname[16];
+
+ snprintf(pdev_irqname, sizeof(pdev_irqname), "ch%d", id);
+ chan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (chan->irq < 0)
+ return dev_err_probe(&pdev->dev, chan->irq,
+ "failed to get IRQ for ch%d\n",
+ id);
+
+ chan->reg_base = reg_base;
+ chan->vc.desc_free = ls1x_dma_free_desc;
+ vchan_init(&chan->vc, &dma->ddev);
+ }
+
+ return 0;
+}
+
+static void ls1x_dma_chan_remove(struct ls1x_dma *dma)
+{
+ int id;
+
+ for (id = 0; id < dma->nr_chans; id++) {
+ struct ls1x_dma_chan *chan = &dma->chan[id];
+
+ if (chan->vc.chan.device == &dma->ddev) {
+ list_del(&chan->vc.chan.device_node);
+ tasklet_kill(&chan->vc.task);
+ }
+ }
+}
+
+static int ls1x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dma_device *ddev;
+ struct ls1x_dma *dma;
+ int ret;
+
+ ret = platform_irq_count(pdev);
+ if (ret <= 0 || ret > LS1X_DMA_MAX_CHANNELS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid number of IRQ channels: %d\n",
+ ret);
+
+ dma = devm_kzalloc(dev, struct_size(dma, chan, ret), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+ dma->nr_chans = ret;
+
+ /* initialize DMA device */
+ ddev = &dma->ddev;
+ ddev->dev = dev;
+ ddev->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ ddev->device_alloc_chan_resources = ls1x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls1x_dma_free_chan_resources;
+ ddev->device_prep_slave_sg = ls1x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls1x_dma_prep_dma_cyclic;
+ ddev->device_config = ls1x_dma_slave_config;
+ ddev->device_pause = ls1x_dma_pause;
+ ddev->device_resume = ls1x_dma_resume;
+ ddev->device_terminate_all = ls1x_dma_terminate_all;
+ ddev->device_synchronize = ls1x_dma_synchronize;
+ ddev->device_tx_status = ls1x_dma_tx_status;
+ ddev->device_issue_pending = ls1x_dma_issue_pending;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ INIT_LIST_HEAD(&ddev->channels);
+
+ /* initialize DMA channels */
+ ret = ls1x_dma_chan_probe(pdev, dma);
+ if (ret)
+ goto err;
+
+ ret = dmaenginem_async_device_register(ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA device\n");
+ goto err;
+ }
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
+ ddev);
+ if (ret) {
+ dev_err(dev, "failed to register DMA controller\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, dma);
+ dev_info(dev, "Loongson1 DMA driver registered\n");
+
+ return 0;
+
+err:
+ ls1x_dma_chan_remove(dma);
+
+ return ret;
+}
+
+static void ls1x_dma_remove(struct platform_device *pdev)
+{
+ struct ls1x_dma *dma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ ls1x_dma_chan_remove(dma);
+}
+
+static const struct of_device_id ls1x_dma_match[] = {
+ { .compatible = "loongson,ls1b-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_dma_match);
+
+static struct platform_driver ls1x_dma_driver = {
+ .probe = ls1x_dma_probe,
+ .remove = ls1x_dma_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_dma_match,
+ },
+};
+
+module_platform_driver(ls1x_dma_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 APB DMA Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lpc32xx-dmamux.c b/drivers/dma/lpc32xx-dmamux.c
new file mode 100644
index 000000000000..351d7e23e615
--- /dev/null
+++ b/drivers/dma/lpc32xx-dmamux.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2024 Timesys Corporation <piotr.wojtaszczyk@timesys.com>
+//
+// Based on TI DMA Crossbar driver by:
+// Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+// Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define LPC32XX_SSP_CLK_CTRL 0x78
+#define LPC32XX_I2S_CLK_CTRL 0x7c
+
+struct lpc32xx_dmamux {
+ int signal;
+ char *name_sel0;
+ char *name_sel1;
+ int muxval;
+ int muxreg;
+ int bit;
+ bool busy;
+};
+
+struct lpc32xx_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *reg;
+ spinlock_t lock; /* protects busy status flag */
+};
+
+/* From LPC32x0 User manual "3.2.1 DMA request signals" */
+static struct lpc32xx_dmamux lpc32xx_muxes[] = {
+ {
+ .signal = 3,
+ .name_sel0 = "spi2-rx-tx",
+ .name_sel1 = "ssp1-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 5,
+ },
+ {
+ .signal = 10,
+ .name_sel0 = "uart7-rx",
+ .name_sel1 = "i2s1-dma1",
+ .muxreg = LPC32XX_I2S_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 11,
+ .name_sel0 = "spi1-rx-tx",
+ .name_sel1 = "ssp1-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 4,
+ },
+ {
+ .signal = 14,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-rx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 3,
+ },
+ {
+ .signal = 15,
+ .name_sel0 = "none",
+ .name_sel1 = "ssp0-tx",
+ .muxreg = LPC32XX_SSP_CLK_CTRL,
+ .bit = 2,
+ },
+};
+
+static void lpc32xx_dmamux_release(struct device *dev, void *route_data)
+{
+ struct lpc32xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct lpc32xx_dmamux *mux = route_data;
+
+ dev_dbg(dev, "releasing dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ guard(spinlock)(&dmamux->lock);
+
+ mux->busy = false;
+}
+
+static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct device *dev = &pdev->dev;
+ struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ unsigned long flags;
+ struct lpc32xx_dmamux *mux = NULL;
+ int i;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
+ if (lpc32xx_muxes[i].signal == dma_spec->args[0]) {
+ mux = &lpc32xx_muxes[i];
+ break;
+ }
+ }
+ if (!mux) {
+ dev_err(&pdev->dev, "invalid mux request number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[2] > 1) {
+ dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+ dma_spec->args[1]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The of_node_put() will be done in the core for the node */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (mux->busy) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(dev, "dma request signal %d busy, routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+ of_node_put(dma_spec->np);
+ return ERR_PTR(-EBUSY);
+ }
+
+ mux->busy = true;
+ mux->muxval = dma_spec->args[2] ? BIT(mux->bit) : 0;
+
+ regmap_update_bits(dmamux->reg, mux->muxreg, BIT(mux->bit), mux->muxval);
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[2] = 0;
+ dma_spec->args_count = 2;
+
+ dev_dbg(dev, "dma request signal %d routed to %s\n",
+ mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
+
+ return mux;
+}
+
+static int lpc32xx_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct lpc32xx_dmamux_data *dmamux;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmamux->reg = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(dmamux->reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(dmamux->reg);
+ }
+
+ spin_lock_init(&dmamux->lock);
+ platform_set_drvdata(pdev, dmamux);
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = lpc32xx_dmamux_release;
+
+ return of_dma_router_register(np, lpc32xx_dmamux_reserve,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc32xx_dmamux_match[] = {
+ { .compatible = "nxp,lpc3220-dmamux" },
+ {},
+};
+
+static struct platform_driver lpc32xx_dmamux_driver = {
+ .probe = lpc32xx_dmamux_probe,
+ .driver = {
+ .name = "lpc32xx-dmamux",
+ .of_match_table = lpc32xx_dmamux_match,
+ },
+};
+
+static int __init lpc32xx_dmamux_init(void)
+{
+ return platform_driver_register(&lpc32xx_dmamux_driver);
+}
+arch_initcall(lpc32xx_dmamux_init);
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
index a49913f3ed3f..9652e8666722 100644
--- a/drivers/dma/ls2x-apb-dma.c
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -33,11 +33,11 @@
#define LDMA_STOP BIT(4) /* DMA stop operation */
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
-/* Bitfields in ndesc_addr field of HW decriptor */
+/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
-/* Bitfields in cmd field of HW decriptor */
+/* Bitfields in cmd field of HW descriptor */
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 529100c5b9f5..b69eabf12a24 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -518,7 +518,7 @@ mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
/* setup dma channel */
cvd[i]->ch = c;
- /* setup sourece, destination, and length */
+ /* setup source, destination, and length */
tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len;
cvd[i]->len = tlen;
cvd[i]->src = src;
@@ -617,7 +617,7 @@ static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c)
u32 i, min_refcnt = U32_MAX, refcnt;
unsigned long flags;
- /* allocate PC with the minimun refcount */
+ /* allocate PC with the minimum refcount */
for (i = 0; i < cqdma->dma_channels; ++i) {
refcnt = refcount_read(&cqdma->pc[i]->refcnt);
if (refcnt < min_refcnt) {
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 36ff11e909ea..58c7961ab9ad 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -226,7 +226,7 @@ struct mtk_hsdma_soc {
* @pc_refcnt: Track how many VCs are using the PC
* @lock: Lock protect agaisting multiple VCs access PC
* @soc: The pointer to area holding differences among
- * vaious platform
+ * various platform
*/
struct mtk_hsdma_device {
struct dma_device ddev;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index bcd3b623ac6c..43efce77bb57 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -414,7 +414,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_chan_is_busy(mv_chan)) {
u32 current_desc = mv_chan_get_current_desc(mv_chan);
/*
- * and the curren desc is the end of the chain before
+ * and the current desc is the end of the chain before
* the append, then we need to start the channel
*/
if (current_desc == old_chain_tail->async_tx.phys)
@@ -1074,7 +1074,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
- /* discover transaction capabilites from the platform data */
+ /* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index d86086b05b0e..c87cefd38a07 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -99,7 +99,7 @@ struct mv_xor_device {
* @common: common dmaengine channel object members
* @slots_allocated: records the actual size of the descriptor slot pool
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
- * @op_in_desc: new mode of driver, each op is writen to descriptor.
+ * @op_in_desc: new mode of driver, each op is written to descriptor.
*/
struct mv_xor_chan {
int pending;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 97ebc791a30b..c8c67f4d982c 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -175,7 +175,7 @@ struct mv_xor_v2_device {
* struct mv_xor_v2_sw_desc - implements a xor SW descriptor
* @idx: descriptor index
* @async_tx: support for the async_tx api
- * @hw_desc: assosiated HW descriptor
+ * @hw_desc: associated HW descriptor
* @free_list: node of the free SW descriprots list
*/
struct mv_xor_v2_sw_desc {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index c08916339aa7..3b011a91d48e 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -897,7 +897,7 @@ static int nbpf_config(struct dma_chan *dchan,
/*
* We could check config->slave_id to match chan->terminal here,
* but with DT they would be coming from the same source, so
- * such a check would be superflous
+ * such a check would be superfluous
*/
chan->slave_dst_addr = config->dst_addr;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index e588fff9f21d..423442e55d36 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -26,7 +26,7 @@ static DEFINE_MUTEX(of_dma_lock);
*
* Finds a DMA controller with matching device node and number for dma cells
* in a list of registered DMA controllers. If a match is found a valid pointer
- * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * to the DMA data stored is returned. A NULL pointer is returned if no match is
* found.
*/
static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec)
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
*
* This function can be used as the of xlate callback for DMA driver which wants
* to match the channel based on the channel id. When using this xlate function
- * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
+ * the #dma-cells property of the DMA controller dt node needs to be set to 1.
* The data parameter of of_dma_controller_register must be a pointer to the
* dma_device struct the function should match upon.
*
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index e001f4f7aa64..aa436f9e3571 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1156,7 +1156,7 @@ static int owl_dma_probe(struct platform_device *pdev)
}
/*
- * Eventhough the DMA controller is capable of generating 4
+ * Even though the DMA controller is capable of generating 4
* IRQ's for DMA priority feature, we only use 1 IRQ for
* simplification.
*/
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 60c4de8dac1d..82a9fe88ad54 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
*/
- ret = dma_set_max_seg_size(&adev->dev, 1900800);
- if (ret)
- dev_err(&adev->dev, "unable to set the seg size\n");
-
+ dma_set_max_seg_size(&adev->dev, 1900800);
init_pl330_debugfs(pl330);
dev_info(&adev->dev,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index bbb60a970dab..7b78759ac734 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -9,7 +9,7 @@
*/
/*
- * This driver supports the asynchrounous DMA copy and RAID engines available
+ * This driver supports the asynchronous DMA copy and RAID engines available
* on the AMCC PPC440SPe Processors.
* Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
* ADMA driver written by D.Williams.
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
index 1ff4be23db0f..b5725481bfa6 100644
--- a/drivers/dma/ppc4xx/dma.h
+++ b/drivers/dma/ppc4xx/dma.h
@@ -14,7 +14,7 @@
/* Number of elements in the array with statical CDBs */
#define MAX_STAT_DMA_CDBS 16
-/* Number of DMA engines available on the contoller */
+/* Number of DMA engines available on the controller */
#define DMA_ENGINES_NUM 2
/* Maximum h/w supported number of destinations */
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index 21b4bf895200..39bc37268235 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -192,7 +192,7 @@ struct pt_cmd_queue {
/* Queue dma pool */
struct dma_pool *dma_pool;
- /* Queue base address (not neccessarily aligned)*/
+ /* Queue base address (not necessarily aligned)*/
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5e7d332731e0..d43a881e43b9 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -440,7 +440,7 @@ static void bam_reset(struct bam_device *bdev)
val |= BAM_EN;
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
- /* set descriptor threshhold, start with 4 bytes */
+ /* set descriptor threshold, start with 4 bytes */
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
@@ -667,7 +667,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
for_each_sg(sgl, sg, sg_len, i)
num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
- /* allocate enough room to accomodate the number of entries */
+ /* allocate enough room to accommodate the number of entries */
async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
GFP_NOWAIT);
@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
- ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
- if (ret) {
- dev_err(bdev->dev, "cannot set maximum segment size\n");
- goto err_bam_channel_exit;
- }
+ dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
platform_set_drvdata(pdev, bdev);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index e6ebd688d746..52a7c8f2498f 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1856,7 +1856,7 @@ static void gpi_issue_pending(struct dma_chan *chan)
read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
- /* move all submitted discriptors to issued list */
+ /* move all submitted descriptors to issued list */
spin_lock_irqsave(&gchan->vc.lock, flags);
if (vchan_issue_pending(&gchan->vc))
vd = list_last_entry(&gchan->vc.desc_issued,
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index 53f4273b657c..c1db398adc84 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -650,7 +650,7 @@ static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
/*
* residue is either the full length if it is in the issued list, or 0
* if it is in progress. We have no reliable way of determining
- * anything inbetween
+ * anything in between
*/
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 40482cb73d79..1094a2f82164 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- if (ret)
- return ret;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 7cc9eb2217e8..8ead0a1fd237 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -318,7 +318,7 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
}
/*
- * Find a slave channel configuration from the contoller list by either a slave
+ * Find a slave channel configuration from the controller list by either a slave
* ID in the non-DT case, or by a MID/RID value in the DT case
*/
static const struct sh_dmae_slave_config *dmae_find_slave(
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2c489299148e..d52e1685aed5 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret)
goto destroy_cache;
- ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
- if (ret) {
- d40_err(dev, "Failed to set dma max seg size\n");
- goto destroy_cache;
- }
+ dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
d40_hw_init(base);
diff --git a/drivers/dma/ste_dma40.h b/drivers/dma/ste_dma40.h
index c697bfe16a01..a90c786acc1f 100644
--- a/drivers/dma/ste_dma40.h
+++ b/drivers/dma/ste_dma40.h
@@ -4,7 +4,7 @@
#define STE_DMA40_H
/*
- * Maxium size for a single dma descriptor
+ * Maximum size for a single dma descriptor
* Size is limited to 16 bits.
* Size is in the units of addr-widths (1,2,4,8 bytes)
* Larger transfers will be split up to multiple linked desc
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index c504e855eb02..2e30e9a94a1e 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -369,7 +369,7 @@ struct d40_phy_lli_bidir {
* @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
* @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
*
- * This struct must be 8 bytes aligned since it will be accessed directy by
+ * This struct must be 8 bytes aligned since it will be accessed directly by
* the DMA. Never add any none hw mapped registers to this struct.
*/
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 4087e0263a48..0be6e944df6f 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -403,6 +403,7 @@ static struct stm32_dma3_swdesc *stm32_dma3_chan_desc_alloc(struct stm32_dma3_ch
swdesc = kzalloc(struct_size(swdesc, lli, count), GFP_NOWAIT);
if (!swdesc)
return NULL;
+ swdesc->lli_size = count;
for (i = 0; i < count; i++) {
swdesc->lli[i].hwdesc = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT,
@@ -410,7 +411,6 @@ static struct stm32_dma3_swdesc *stm32_dma3_chan_desc_alloc(struct stm32_dma3_ch
if (!swdesc->lli[i].hwdesc)
goto err_pool_free;
}
- swdesc->lli_size = count;
swdesc->ccr = 0;
/* Set LL base address */
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index ac69778827f2..7d1acda2d72b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -463,7 +463,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
/*
* If interrupt is pending then do nothing as the ISR will handle
- * the programing for new request.
+ * the programming for new request.
*/
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
dev_err(tdc2dev(tdc),
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index d349c6d482ae..9062a237cd16 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -131,7 +131,6 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
struct device *xudma_get_device(struct udma_dev *ud);
struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
-void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 7e6c04afbe89..6ab9bfbdc480 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1186,10 +1186,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
+ d->sglen = 1;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
- d->sglen = 1;
d->ccr = c->ccr;
if (dir == DMA_DEV_TO_MEM)
@@ -1258,10 +1258,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->dev_addr = src;
d->fi = 0;
d->es = data_type;
+ d->sglen = 1;
d->sg[0].en = len / BIT(data_type);
d->sg[0].fn = 1;
d->sg[0].addr = dest;
- d->sglen = 1;
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
@@ -1309,6 +1309,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (data_type > CSDP_DATA_TYPE_32)
data_type = CSDP_DATA_TYPE_32;
+ d->sglen = 1;
sg = &d->sg[0];
d->dir = DMA_MEM_TO_MEM;
d->dev_addr = xt->src_start;
@@ -1316,7 +1317,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
sg->en = xt->sgl[0].size / BIT(data_type);
sg->fn = xt->numf;
sg->addr = xt->dst_start;
- d->sglen = 1;
d->ccr = c->ccr;
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index fd4397adeb79..275848a9c450 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1742,7 +1742,7 @@ static int xgene_dma_probe(struct platform_device *pdev)
/* Initialize DMA channels software state */
xgene_dma_init_channels(pdma);
- /* Configue DMA rings */
+ /* Configure DMA rings */
ret = xgene_dma_init_rings(pdma);
if (ret)
goto err_clk_enable;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 36bd4825d389..be87764af9e8 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -149,7 +149,7 @@ struct xilinx_dpdma_chan;
* @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
* @next_desc: next descriptor 32 bit address
* @src_addr: payload source address (1st page, 32 LSB)
- * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
+ * @addr_ext_23: payload source address (2nd and 3rd pages, 16 LSBs)
* @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
* @src_addr2: payload source address (2nd page, 32 LSB)
* @src_addr3: payload source address (3rd page, 32 LSB)
@@ -210,7 +210,7 @@ struct xilinx_dpdma_tx_desc {
* @vchan: virtual DMA channel
* @reg: register base address
* @id: channel ID
- * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @wait_to_stop: queue to wait for outstanding transactions before stopping
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
@@ -671,6 +671,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
}
/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ * @flags: tx flags argument passed in to prepare function
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, unsigned long flags)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
+ &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
+ period_len) |
+ FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
+ period_len);
+ hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
+ XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
+ XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
+ if (chan->xdev->ext_addr)
+ last->hw.addr_ext |=
+ FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
+ upper_32_bits(sw_desc->dma_addr));
+
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
+
+ return NULL;
+}
+
+/**
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
* descriptor
* @chan: DPDMA channel
@@ -1189,6 +1267,23 @@ out_unlock:
/* -----------------------------------------------------------------------------
* DMA Engine Operations
*/
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len, flags);
+}
static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1672,6 +1767,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
@@ -1679,6 +1775,7 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
/* TODO: Can we achieve better granularity ? */
ddev->device_tx_status = dma_cookie_status;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index f31631bef961..9ae46f1198fe 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -22,10 +22,10 @@
#include "../dmaengine.h"
/* Register Offsets */
-#define ZYNQMP_DMA_ISR 0x100
-#define ZYNQMP_DMA_IMR 0x104
-#define ZYNQMP_DMA_IER 0x108
-#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_ISR (chan->irq_offset + 0x100)
+#define ZYNQMP_DMA_IMR (chan->irq_offset + 0x104)
+#define ZYNQMP_DMA_IER (chan->irq_offset + 0x108)
+#define ZYNQMP_DMA_IDS (chan->irq_offset + 0x10c)
#define ZYNQMP_DMA_CTRL0 0x110
#define ZYNQMP_DMA_CTRL1 0x114
#define ZYNQMP_DMA_DATA_ATTR 0x120
@@ -145,6 +145,9 @@
#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
async_tx)
+/* IRQ Register offset for Versal Gen 2 */
+#define IRQ_REG_OFFSET 0x308
+
/**
* struct zynqmp_dma_desc_ll - Hw linked list descriptor
* @addr: Buffer address
@@ -211,6 +214,7 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
+ * @irq_offset: Irq register offset
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -235,6 +239,7 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
+ u32 irq_offset;
};
/**
@@ -253,6 +258,14 @@ struct zynqmp_dma_device {
struct clk *clk_apb;
};
+struct zynqmp_dma_config {
+ u32 offset;
+};
+
+static const struct zynqmp_dma_config versal2_dma_config = {
+ .offset = IRQ_REG_OFFSET,
+};
+
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
u64 value)
{
@@ -892,6 +905,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
{
struct zynqmp_dma_chan *chan;
struct device_node *node = pdev->dev.of_node;
+ const struct zynqmp_dma_config *match_data;
int err;
chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
@@ -919,6 +933,10 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
return -EINVAL;
}
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (match_data)
+ chan->irq_offset = match_data->offset;
+
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
zdev->chan = chan;
tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
@@ -1161,6 +1179,7 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
}
static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "amd,versal2-dma-1.0", .data = &versal2_dma_config },
{ .compatible = "xlnx,zynqmp-dma-1.0", },
{}
};