diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-03-15 20:25:13 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-03-15 20:25:13 +0100 |
commit | 2b3a4192dd01154bbb9f7c887c4b0fe35c9dc712 (patch) | |
tree | 1d207bf593c5c2112dd20b6345a0eadd43e23c7d /drivers/dma | |
parent | Merge tag 'soundwire-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff) | |
parent | dmaengine: of: constify of_phandle_args in of_dma_find_controller() (diff) | |
download | linux-2b3a4192dd01154bbb9f7c887c4b0fe35c9dc712.tar.xz linux-2b3a4192dd01154bbb9f7c887c4b0fe35c9dc712.zip |
Merge tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
"New hardware support:
- Allwinner H616 dma support
- Renesas r8a779h0 dma controller support
- TI CSI2RX dma support
Updates:
- Freescale edma driver updates for TCD64csupport for i.MX95
- constify of pointers and args
- Yaml conversion for MediaTek High-Speed controller binding
- TI k3 udma support for TX/RX DMA channels for thread IDs:
* tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
dmaengine: of: constify of_phandle_args in of_dma_find_controller()
dmaengine: pl08x: constify pointer to char in filter function
MAINTAINERS: change in AMD ptdma maintainer
MAINTAINERS: adjust file entry in MEDIATEK DMA DRIVER
dmaengine: idxd: constify the struct device_type usage
dt-bindings: renesas,rcar-dmac: Add r8a779h0 support
dt-bindings: dma: convert MediaTek High-Speed controller to the json-schema
dmaengine: idxd: make dsa_bus_type const
dmaengine: fsl-edma: integrate TCD64 support for i.MX95
dt-bindings: fsl-dma: fsl-edma: add fsl,imx95-edma5 compatible string
dmaengine: mcf-edma: utilize edma_write_tcdreg() macro for TCD Access
dmaengine: fsl-edma: add address for channel mux register in fsl_edma_chan
dmaengine: fsl-edma: fix spare build warning
dmaengine: fsl-edma: involve help macro fsl_edma_set(get)_tcd()
dt-bindings: mmp-dma: convert to YAML
dmaengine: ti: k3-psil-j721s2: Add entry for CSI2RX
dmaengine: ti: k3-udma-glue: Add function to request RX chan for thread ID
dmaengine: ti: k3-udma-glue: Add function to request TX chan for thread ID
dmaengine: ti: k3-udma-glue: Update name for remote RX channel device
dmaengine: ti: k3-udma-glue: Add function to parse channel by ID
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 14 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 2 | ||||
-rw-r--r-- | drivers/dma/bestcomm/sram.c | 5 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-common.c | 101 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-common.h | 161 | ||||
-rw-r--r-- | drivers/dma/fsl-edma-main.c | 19 | ||||
-rw-r--r-- | drivers/dma/idxd/bus.c | 2 | ||||
-rw-r--r-- | drivers/dma/idxd/cdev.c | 4 | ||||
-rw-r--r-- | drivers/dma/idxd/idxd.h | 14 | ||||
-rw-r--r-- | drivers/dma/idxd/sysfs.c | 10 | ||||
-rw-r--r-- | drivers/dma/mcf-edma-main.c | 2 | ||||
-rw-r--r-- | drivers/dma/of-dma.c | 2 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 1 | ||||
-rw-r--r-- | drivers/dma/ti/k3-psil-j721s2.c | 73 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma-glue.c | 298 | ||||
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 6 |
16 files changed, 540 insertions, 174 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e928f2ca0f1e..002a5ec80620 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -643,16 +643,16 @@ config TEGRA20_APB_DMA config TEGRA210_ADMA tristate "NVIDIA Tegra210 ADMA support" - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) + depends on (ARCH_TEGRA || COMPILE_TEST) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support for the NVIDIA Tegra210 ADMA controller driver. The - DMA controller has multiple DMA channels and is used to service - various audio clients in the Tegra210 audio processing engine - (APE). This DMA controller transfers data from memory to - peripheral and vice versa. It does not support memory to - memory data transfer. + Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA + controller driver. The DMA controller has multiple DMA channels + and is used to service various audio clients in the Tegra210 + audio processing engine (APE). This DMA controller transfers + data from memory to peripheral and vice versa. It does not + support memory to memory data transfer. config TIMB_DMA tristate "Timberdale FPGA DMA support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index eea8bd33b4b7..fbf048f432bf 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2239,7 +2239,7 @@ static int pl08x_resume(struct dma_chan *chan) bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) { struct pl08x_dma_chan *plchan; - char *name = chan_id; + const char *name = chan_id; /* Reject channels for devices not bound to this driver */ if (chan->device->dev->driver != &pl08x_amba_driver.drv) diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c index 0553956f7456..ad74d57cc3ab 100644 --- a/drivers/dma/bestcomm/sram.c +++ b/drivers/dma/bestcomm/sram.c @@ -90,13 +90,8 @@ int bcom_sram_init(struct device_node *sram_node, char *owner) bcom_sram->rh = rh_create(4); /* Attach the free zones */ -#if 0 - /* Currently disabled ... for future use only */ - reg_addr_p = of_get_property(sram_node, "available", &psize); -#else regaddr_p = NULL; psize = 0; -#endif if (!regaddr_p || !psize) { /* Attach the whole zone */ diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 793f1a7ad5e3..b18faa7cfedb 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -97,8 +97,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) * ch_mux: With the exception of 0, attempts to write a value * already in use will be forced to 0. */ - if (!edma_readl_chreg(fsl_chan, ch_mux)) - edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); + if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr)) + edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr); } val = edma_readl_chreg(fsl_chan, ch_csr); @@ -134,7 +134,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan) flags = fsl_edma_drvflags(fsl_chan); if (flags & FSL_EDMA_DRV_HAS_CHMUX) - edma_writel_chreg(fsl_chan, 0, ch_mux); + edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr); val &= ~EDMA_V3_CH_CSR_ERQ; edma_writel_chreg(fsl_chan, val, ch_csr); @@ -351,39 +351,45 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, { struct fsl_edma_desc *edesc = fsl_chan->edesc; enum dma_transfer_direction dir = edesc->dirn; - dma_addr_t cur_addr, dma_addr; + dma_addr_t cur_addr, dma_addr, old_addr; size_t len, size; u32 nbytes = 0; int i; /* calculate the total size in this desc */ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) { - nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); + nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); - len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); + len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); } if (!in_progress) return len; - if (dir == DMA_MEM_TO_DEV) - cur_addr = edma_read_tcdreg(fsl_chan, saddr); - else - cur_addr = edma_read_tcdreg(fsl_chan, daddr); + /* 64bit read is not atomic, need read retry when high 32bit changed */ + do { + if (dir == DMA_MEM_TO_DEV) { + old_addr = edma_read_tcdreg(fsl_chan, saddr); + cur_addr = edma_read_tcdreg(fsl_chan, saddr); + } else { + old_addr = edma_read_tcdreg(fsl_chan, daddr); + cur_addr = edma_read_tcdreg(fsl_chan, daddr); + } + } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr)); /* figure out the finished and calculate the residue */ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { - nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); + nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); - size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); + size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); if (dir == DMA_MEM_TO_DEV) - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); + dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr); else - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); + dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr); len -= size; if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { @@ -426,8 +432,7 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan, return fsl_chan->status; } -static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, - struct fsl_edma_hw_tcd *tcd) +static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd) { u16 csr = 0; @@ -439,26 +444,26 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, */ edma_write_tcdreg(fsl_chan, 0, csr); - edma_write_tcdreg(fsl_chan, tcd->saddr, saddr); - edma_write_tcdreg(fsl_chan, tcd->daddr, daddr); + edma_cp_tcd_to_reg(fsl_chan, tcd, saddr); + edma_cp_tcd_to_reg(fsl_chan, tcd, daddr); - edma_write_tcdreg(fsl_chan, tcd->attr, attr); - edma_write_tcdreg(fsl_chan, tcd->soff, soff); + edma_cp_tcd_to_reg(fsl_chan, tcd, attr); + edma_cp_tcd_to_reg(fsl_chan, tcd, soff); - edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes); - edma_write_tcdreg(fsl_chan, tcd->slast, slast); + edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes); + edma_cp_tcd_to_reg(fsl_chan, tcd, slast); - edma_write_tcdreg(fsl_chan, tcd->citer, citer); - edma_write_tcdreg(fsl_chan, tcd->biter, biter); - edma_write_tcdreg(fsl_chan, tcd->doff, doff); + edma_cp_tcd_to_reg(fsl_chan, tcd, citer); + edma_cp_tcd_to_reg(fsl_chan, tcd, biter); + edma_cp_tcd_to_reg(fsl_chan, tcd, doff); - edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga); + edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga); - csr = le16_to_cpu(tcd->csr); + csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr); if (fsl_chan->is_sw) { csr |= EDMA_TCD_CSR_START; - tcd->csr = cpu_to_le16(csr); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); } /* @@ -473,14 +478,14 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); - edma_write_tcdreg(fsl_chan, tcd->csr, csr); + edma_cp_tcd_to_reg(fsl_chan, tcd, csr); } static inline void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, - struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, - u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, - u16 biter, u16 doff, u32 dlast_sga, bool major_int, + struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst, + u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer, + u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int, bool disable_req, bool enable_sg) { struct dma_slave_config *cfg = &fsl_chan->cfg; @@ -493,12 +498,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, * So we put the value in little endian in memory, waiting * for fsl_edma_set_tcd_regs doing the swap. */ - tcd->saddr = cpu_to_le32(src); - tcd->daddr = cpu_to_le32(dst); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr); - tcd->attr = cpu_to_le16(attr); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr); - tcd->soff = cpu_to_le16(soff); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff); if (fsl_chan->is_multi_fifo) { /* set mloff to support multiple fifo */ @@ -515,15 +520,16 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, } } - tcd->nbytes = cpu_to_le32(nbytes); - tcd->slast = cpu_to_le32(slast); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast); + + fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff); - tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); - tcd->doff = cpu_to_le16(doff); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga); - tcd->dlast_sga = cpu_to_le32(dlast_sga); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter); - tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); if (major_int) csr |= EDMA_TCD_CSR_INT_MAJOR; @@ -539,7 +545,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, if (fsl_chan->is_sw) csr |= EDMA_TCD_CSR_START; - tcd->csr = cpu_to_le16(csr); + fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); } static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, @@ -580,8 +586,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( dma_addr_t dma_buf_next; bool major_int = true; int sg_len, i; - u32 src_addr, dst_addr, last_sg, nbytes; + dma_addr_t src_addr, dst_addr, last_sg; u16 soff, doff, iter; + u32 nbytes; if (!is_slave_direction(direction)) return NULL; @@ -653,8 +660,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_desc *fsl_desc; struct scatterlist *sg; - u32 src_addr, dst_addr, last_sg, nbytes; + dma_addr_t src_addr, dst_addr, last_sg; u16 soff, doff, iter; + u32 nbytes; int i; if (!is_slave_direction(direction)) @@ -803,7 +811,8 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, - sizeof(struct fsl_edma_hw_tcd), + fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? + sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), 32, 0); return 0; } diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index f5e216b157c7..7bf0aba471a8 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -88,6 +88,20 @@ struct fsl_edma_hw_tcd { __le16 biter; }; +struct fsl_edma_hw_tcd64 { + __le64 saddr; + __le16 soff; + __le16 attr; + __le32 nbytes; + __le64 slast; + __le64 daddr; + __le64 dlast_sga; + __le16 doff; + __le16 citer; + __le16 csr; + __le16 biter; +} __packed; + struct fsl_edma3_ch_reg { __le32 ch_csr; __le32 ch_es; @@ -97,7 +111,10 @@ struct fsl_edma3_ch_reg { __le32 ch_mux; __le32 ch_mattr; /* edma4, reserved for edma3 */ __le32 ch_reserved; - struct fsl_edma_hw_tcd tcd; + union { + struct fsl_edma_hw_tcd tcd; + struct fsl_edma_hw_tcd64 tcd64; + }; } __packed; /* @@ -126,7 +143,7 @@ struct edma_regs { struct fsl_edma_sw_tcd { dma_addr_t ptcd; - struct fsl_edma_hw_tcd *vtcd; + void *vtcd; }; struct fsl_edma_chan { @@ -145,7 +162,8 @@ struct fsl_edma_chan { u32 dma_dev_size; enum dma_data_direction dma_dir; char chan_name[32]; - struct fsl_edma_hw_tcd __iomem *tcd; + void __iomem *tcd; + void __iomem *mux_addr; u32 real_count; struct work_struct issue_worker; struct platform_device *pdev; @@ -188,6 +206,7 @@ struct fsl_edma_desc { #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13) /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */ #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14) +#define FSL_EDMA_DRV_TCD64 BIT(15) #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \ FSL_EDMA_DRV_BUS_8BYTE | \ @@ -207,6 +226,8 @@ struct fsl_edma_drvdata { u32 chreg_off; u32 chreg_space_sz; u32 flags; + u32 mux_off; /* channel mux register offset */ + u32 mux_skip; /* how much skip for each channel */ int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; @@ -229,23 +250,108 @@ struct fsl_edma_engine { struct fsl_edma_chan chans[] __counted_by(n_chans); }; -#define edma_read_tcdreg(chan, __name) \ -(sizeof(chan->tcd->__name) == sizeof(u32) ? \ - edma_readl(chan->edma, &chan->tcd->__name) : \ - edma_readw(chan->edma, &chan->tcd->__name)) - -#define edma_write_tcdreg(chan, val, __name) \ -(sizeof(chan->tcd->__name) == sizeof(u32) ? \ - edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \ - edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name)) +#define edma_read_tcdreg_c(chan, _tcd, __name) \ +(sizeof((_tcd)->__name) == sizeof(u64) ? \ + edma_readq(chan->edma, &(_tcd)->__name) : \ + ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ + edma_readl(chan->edma, &(_tcd)->__name) : \ + edma_readw(chan->edma, &(_tcd)->__name) \ + )) + +#define edma_read_tcdreg(chan, __name) \ +((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ + edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \ + edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ +) + +#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ +do { \ + switch (sizeof(_tcd->__name)) { \ + case sizeof(u64): \ + edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ + break; \ + case sizeof(u32): \ + edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ + break; \ + case sizeof(u16): \ + edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ + break; \ + case sizeof(u8): \ + edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ + break; \ + } \ +} while (0) + +#define edma_write_tcdreg(chan, val, __name) \ +do { \ + struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ + struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ + \ + if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ + edma_write_tcdreg_c(chan, tcd64_r, val, __name); \ + else \ + edma_write_tcdreg_c(chan, tcd_r, val, __name); \ +} while (0) + +#define edma_cp_tcd_to_reg(chan, __tcd, __name) \ +do { \ + struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ + struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ + struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \ + struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \ + \ + if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ + edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \ + else \ + edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \ +} while (0) #define edma_readl_chreg(chan, __name) \ edma_readl(chan->edma, \ - (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name)) + (void __iomem *)&(container_of(((__force void *)chan->tcd),\ + struct fsl_edma3_ch_reg, tcd)->__name)) #define edma_writel_chreg(chan, val, __name) \ edma_writel(chan->edma, val, \ - (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name)) + (void __iomem *)&(container_of(((__force void *)chan->tcd),\ + struct fsl_edma3_ch_reg, tcd)->__name)) + +#define fsl_edma_get_tcd(_chan, _tcd, _field) \ +(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ + (((struct fsl_edma_hw_tcd *)_tcd)->_field)) + +#define fsl_edma_le_to_cpu(x) \ +(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ + (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ + le16_to_cpu((__force __le16)(x)))) + +#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ +(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ + fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ + fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) + +#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ +do { \ + switch (sizeof((_tcd)->_field)) { \ + case sizeof(u64): \ + *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ + break; \ + case sizeof(u32): \ + *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ + break; \ + case sizeof(u16): \ + *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ + break; \ + } \ +} while (0) + +#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ +do { \ + if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \ + fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \ + else \ + fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ +} while (0) /* * R/W functions for big- or little-endian registers: @@ -253,6 +359,21 @@ struct fsl_edma_engine { * For the big-endian IP module, the offset for 8-bit or 16-bit registers * should also be swapped opposite to that in little-endian IP. */ +static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) +{ + u64 l, h; + + if (edma->big_endian) { + l = ioread32be(addr); + h = ioread32be(addr + 4); + } else { + l = ioread32(addr); + h = ioread32(addr + 4); + } + + return (h << 32) | l; +} + static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) { if (edma->big_endian) @@ -298,6 +419,18 @@ static inline void edma_writel(struct fsl_edma_engine *edma, iowrite32(val, addr); } +static inline void edma_writeq(struct fsl_edma_engine *edma, + u64 val, void __iomem *addr) +{ + if (edma->big_endian) { + iowrite32be(val & 0xFFFFFFFF, addr); + iowrite32be(val >> 32, addr + 4); + } else { + iowrite32(val & 0xFFFFFFFF, addr); + iowrite32(val >> 32, addr + 4); + } +} + static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) { return container_of(chan, struct fsl_edma_chan, vchan.chan); diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index d36e28b9c767..402f0058a180 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -360,6 +360,18 @@ static struct fsl_edma_drvdata imx93_data4 = { .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, .chreg_space_sz = 0x8000, .chreg_off = 0x10000, + .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), + .mux_skip = 0x8000, + .setup_irq = fsl_edma3_irq_init, +}; + +static struct fsl_edma_drvdata imx95_data5 = { + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 | + FSL_EDMA_DRV_TCD64, + .chreg_space_sz = 0x8000, + .chreg_off = 0x10000, + .mux_off = 0x200, + .mux_skip = sizeof(u32), .setup_irq = fsl_edma3_irq_init, }; @@ -371,6 +383,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, + { .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); @@ -511,6 +524,9 @@ static int fsl_edma_probe(struct platform_device *pdev) return ret; } + if (drvdata->flags & FSL_EDMA_DRV_TCD64) + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + INIT_LIST_HEAD(&fsl_edma->dma_dev.channels); for (i = 0; i < fsl_edma->n_chans; i++) { struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; @@ -533,11 +549,12 @@ static int fsl_edma_probe(struct platform_device *pdev) offsetof(struct fsl_edma3_ch_reg, tcd) : 0; fsl_chan->tcd = fsl_edma->membase + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; + fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; fsl_chan->pdev = pdev; vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); - edma_write_tcdreg(fsl_chan, 0, csr); + edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); fsl_edma_chan_mux(fsl_chan, 0, false); } diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c index 0c9e689a2e77..b83b27e04f2a 100644 --- a/drivers/dma/idxd/bus.c +++ b/drivers/dma/idxd/bus.c @@ -72,7 +72,7 @@ static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0); } -struct bus_type dsa_bus_type = { +const struct bus_type dsa_bus_type = { .name = "dsa", .match = idxd_config_bus_match, .probe = idxd_config_bus_probe, diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index e5a94a93a3cc..8078ab9acfbc 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev) mutex_unlock(&wq->wq_lock); } -static struct device_type idxd_cdev_file_type = { +static const struct device_type idxd_cdev_file_type = { .name = "idxd_file", .release = idxd_file_dev_release, .groups = cdev_file_attribute_groups, @@ -169,7 +169,7 @@ static void idxd_cdev_dev_release(struct device *dev) kfree(idxd_cdev); } -static struct device_type idxd_cdev_device_type = { +static const struct device_type idxd_cdev_device_type = { .name = "idxd_cdev", .release = idxd_cdev_dev_release, }; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index d0f5db6cf1ed..a4099a1e2340 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -282,7 +282,7 @@ typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); struct idxd_driver_data { const char *name_prefix; enum idxd_type type; - struct device_type *dev_type; + const struct device_type *dev_type; int compl_size; int align; int evl_cr_off; @@ -515,15 +515,15 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } -extern struct bus_type dsa_bus_type; +extern const struct bus_type dsa_bus_type; extern bool support_enqcmd; extern struct ida idxd_ida; -extern struct device_type dsa_device_type; -extern struct device_type iax_device_type; -extern struct device_type idxd_wq_device_type; -extern struct device_type idxd_engine_device_type; -extern struct device_type idxd_group_device_type; +extern const struct device_type dsa_device_type; +extern const struct device_type iax_device_type; +extern const struct device_type idxd_wq_device_type; +extern const struct device_type idxd_engine_device_type; +extern const struct device_type idxd_group_device_type; static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) { diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 523ae0dff7d4..7f28f01be672 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev) kfree(engine); } -struct device_type idxd_engine_device_type = { +const struct device_type idxd_engine_device_type = { .name = "engine", .release = idxd_conf_engine_release, .groups = idxd_engine_attribute_groups, @@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev) kfree(group); } -struct device_type idxd_group_device_type = { +const struct device_type idxd_group_device_type = { .name = "group", .release = idxd_conf_group_release, .groups = idxd_group_attribute_groups, @@ -1369,7 +1369,7 @@ static void idxd_conf_wq_release(struct device *dev) kfree(wq); } -struct device_type idxd_wq_device_type = { +const struct device_type idxd_wq_device_type = { .name = "wq", .release = idxd_conf_wq_release, .groups = idxd_wq_attribute_groups, @@ -1798,13 +1798,13 @@ static void idxd_conf_device_release(struct device *dev) kfree(idxd); } -struct device_type dsa_device_type = { +const struct device_type dsa_device_type = { .name = "dsa", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, }; -struct device_type iax_device_type = { +const struct device_type iax_device_type = { .name = "iax", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c index ab21455d9c3a..dba631783876 100644 --- a/drivers/dma/mcf-edma-main.c +++ b/drivers/dma/mcf-edma-main.c @@ -202,7 +202,7 @@ static int mcf_edma_probe(struct platform_device *pdev) vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); mcf_chan->tcd = mcf_edma->membase + EDMA_TCD + i * sizeof(struct fsl_edma_hw_tcd); - iowrite32(0x0, &mcf_chan->tcd->csr); + edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr); } iowrite32(~0, regs->inth); diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 775a7f408b9a..e588fff9f21d 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -29,7 +29,7 @@ static DEFINE_MUTEX(of_dma_lock); * to the DMA data stored is retuned. A NULL pointer is returned if no match is * found. */ -static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) +static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec) { struct of_dma *ofdma; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c29744bfdf2c..5f6d7f1e095f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2588,6 +2588,7 @@ static struct dma_pl330_desc *pluck_desc(struct list_head *pool, desc->status = PREP; desc->txd.callback = NULL; + desc->txd.callback_result = NULL; } spin_unlock_irqrestore(lock, flags); diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c index 1d5430fc5724..ba08bdcdcd2b 100644 --- a/drivers/dma/ti/k3-psil-j721s2.c +++ b/drivers/dma/ti/k3-psil-j721s2.c @@ -57,6 +57,14 @@ }, \ } +#define PSIL_CSI2RX(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + }, \ + } + /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep j721s2_src_ep_map[] = { /* PDMA_MCASP - McASP0-4 */ @@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = { PSIL_PDMA_XY_PKT(0x4707), PSIL_PDMA_XY_PKT(0x4708), PSIL_PDMA_XY_PKT(0x4709), + /* CSI2RX */ + PSIL_CSI2RX(0x4940), + PSIL_CSI2RX(0x4941), + PSIL_CSI2RX(0x4942), + PSIL_CSI2RX(0x4943), + PSIL_CSI2RX(0x4944), + PSIL_CSI2RX(0x4945), + PSIL_CSI2RX(0x4946), + PSIL_CSI2RX(0x4947), + PSIL_CSI2RX(0x4948), + PSIL_CSI2RX(0x4949), + PSIL_CSI2RX(0x494a), + PSIL_CSI2RX(0x494b), + PSIL_CSI2RX(0x494c), + PSIL_CSI2RX(0x494d), + PSIL_CSI2RX(0x494e), + PSIL_CSI2RX(0x494f), + PSIL_CSI2RX(0x4950), + PSIL_CSI2RX(0x4951), + PSIL_CSI2RX(0x4952), + PSIL_CSI2RX(0x4953), + PSIL_CSI2RX(0x4954), + PSIL_CSI2RX(0x4955), + PSIL_CSI2RX(0x4956), + PSIL_CSI2RX(0x4957), + PSIL_CSI2RX(0x4958), + PSIL_CSI2RX(0x4959), + PSIL_CSI2RX(0x495a), + PSIL_CSI2RX(0x495b), + PSIL_CSI2RX(0x495c), + PSIL_CSI2RX(0x495d), + PSIL_CSI2RX(0x495e), + PSIL_CSI2RX(0x495f), + PSIL_CSI2RX(0x4960), + PSIL_CSI2RX(0x4961), + PSIL_CSI2RX(0x4962), + PSIL_CSI2RX(0x4963), + PSIL_CSI2RX(0x4964), + PSIL_CSI2RX(0x4965), + PSIL_CSI2RX(0x4966), + PSIL_CSI2RX(0x4967), + PSIL_CSI2RX(0x4968), + PSIL_CSI2RX(0x4969), + PSIL_CSI2RX(0x496a), + PSIL_CSI2RX(0x496b), + PSIL_CSI2RX(0x496c), + PSIL_CSI2RX(0x496d), + PSIL_CSI2RX(0x496e), + PSIL_CSI2RX(0x496f), + PSIL_CSI2RX(0x4970), + PSIL_CSI2RX(0x4971), + PSIL_CSI2RX(0x4972), + PSIL_CSI2RX(0x4973), + PSIL_CSI2RX(0x4974), + PSIL_CSI2RX(0x4975), + PSIL_CSI2RX(0x4976), + PSIL_CSI2RX(0x4977), + PSIL_CSI2RX(0x4978), + PSIL_CSI2RX(0x4979), + PSIL_CSI2RX(0x497a), + PSIL_CSI2RX(0x497b), + PSIL_CSI2RX(0x497c), + PSIL_CSI2RX(0x497d), + PSIL_CSI2RX(0x497e), + PSIL_CSI2RX(0x497f), /* MAIN SA2UL */ PSIL_SA2UL(0x4a40, 0), PSIL_SA2UL(0x4a41, 0), diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index c278d5facf7d..c9b93055dc9d 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np, return 0; } +static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id, + bool tx_chn) +{ + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) + return -EINVAL; + + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) + return -EINVAL; + + /* get psil endpoint config */ + common->ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(common->ep_config)) { + dev_err(common->dev, + "No configuration for psi-l thread 0x%04x\n", + thread_id); + return PTR_ERR(common->ep_config); + } + + common->epib = common->ep_config->needs_epib; + common->psdata_size = common->ep_config->psd_size; + + if (tx_chn) + common->dst_thread = thread_id; + else + common->src_thread = thread_id; + + return 0; +} + static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, const char *name, struct k3_udma_glue_common *common, bool tx_chn) @@ -153,38 +182,32 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, common->atype_asel = dma_spec.args[1]; } - if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { - ret = -EINVAL; - goto out_put_spec; - } + ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); - if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { - ret = -EINVAL; - goto out_put_spec; - } +out_put_spec: + of_node_put(dma_spec.np); + return ret; +} - /* get psil endpoint config */ - common->ep_config = psil_get_ep_config(thread_id); - if (IS_ERR(common->ep_config)) { - dev_err(common->dev, - "No configuration for psi-l thread 0x%04x\n", - thread_id); - ret = PTR_ERR(common->ep_config); - goto out_put_spec; - } +static int +of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common, + bool tx_chn, u32 thread_id) +{ + int ret = 0; - common->epib = common->ep_config->needs_epib; - common->psdata_size = common->ep_config->psd_size; + if (unlikely(!udmax_np)) + return -EINVAL; - if (tx_chn) - common->dst_thread = thread_id; - else - common->src_thread = thread_id; + ret = of_k3_udma_glue_parse(udmax_np, common); + if (ret) + goto out_put_spec; + + ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); out_put_spec: - of_node_put(dma_spec.np); + of_node_put(udmax_np); return ret; -}; +} static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { @@ -251,29 +274,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); } -struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, - const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) +static int +k3_udma_glue_request_tx_chn_common(struct device *dev, + struct k3_udma_glue_tx_channel *tx_chn, + struct k3_udma_glue_tx_channel_cfg *cfg) { - struct k3_udma_glue_tx_channel *tx_chn; int ret; - tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); - if (!tx_chn) - return ERR_PTR(-ENOMEM); - - tx_chn->common.dev = dev; - tx_chn->common.swdata_size = cfg->swdata_size; - tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; - tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; - tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; - tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; - - /* parse of udmap channel */ - ret = of_k3_udma_glue_parse_chn(dev->of_node, name, - &tx_chn->common, true); - if (ret) - goto err; - tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, tx_chn->common.psdata_size, tx_chn->common.swdata_size); @@ -289,7 +296,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, if (IS_ERR(tx_chn->udma_tchanx)) { ret = PTR_ERR(tx_chn->udma_tchanx); dev_err(dev, "UDMAX tchanx get err %d\n", ret); - goto err; + return ret; } tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); @@ -302,7 +309,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, dev_err(dev, "Channel Device registration failed %d\n", ret); put_device(&tx_chn->common.chan_dev); tx_chn->common.chan_dev.parent = NULL; - goto err; + return ret; } if (xudma_is_pktdma(tx_chn->common.udmax)) { @@ -326,7 +333,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, &tx_chn->ringtxcq); if (ret) { dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret); - goto err; + return ret; } /* Set the dma_dev for the rings to be configured */ @@ -342,13 +349,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); if (ret) { dev_err(dev, "Failed to cfg ringtx %d\n", ret); - goto err; + return ret; } ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); if (ret) { dev_err(dev, "Failed to cfg ringtx %d\n", ret); - goto err; + return ret; } /* request and cfg psi-l */ @@ -359,11 +366,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ret = k3_udma_glue_cfg_tx_chn(tx_chn); if (ret) { dev_err(dev, "Failed to cfg tchan %d\n", ret); - goto err; + return ret; } k3_udma_glue_dump_tx_chn(tx_chn); + return 0; +} + +struct k3_udma_glue_tx_channel * +k3_udma_glue_request_tx_chn(struct device *dev, const char *name, + struct k3_udma_glue_tx_channel_cfg *cfg) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &tx_chn->common, true); + if (ret) + goto err; + + ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg); + if (ret) + goto err; + return tx_chn; err: @@ -372,6 +410,41 @@ err: } EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); +struct k3_udma_glue_tx_channel * +k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev, + struct k3_udma_glue_tx_channel_cfg *cfg, + struct device_node *udmax_np, u32 thread_id) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id); + if (ret) + goto err; + + ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg); + if (ret) + goto err; + + return tx_chn; + +err: + k3_udma_glue_release_tx_chn(tx_chn); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id); + void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) { if (tx_chn->psil_paired) { @@ -1000,12 +1073,59 @@ err: return ERR_PTR(ret); } +static int +k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn, + struct k3_udma_glue_rx_channel_cfg *cfg, + struct device *dev) +{ + int ret, i; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) + return -ENOMEM; + + rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; + rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); + dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x", + rx_chn->common.src_thread, rx_chn->flow_id_base); + ret = device_register(&rx_chn->common.chan_dev); + if (ret) { + dev_err(dev, "Channel Device registration failed %d\n", ret); + put_device(&rx_chn->common.chan_dev); + rx_chn->common.chan_dev.parent = NULL; + return ret; + } + + if (xudma_is_pktdma(rx_chn->common.udmax)) { + /* prepare the channel device as coherent */ + rx_chn->common.chan_dev.dma_coherent = true; + dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, + DMA_BIT_MASK(48)); + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + return ret; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + k3_udma_glue_dump_rx_chn(rx_chn); + + return 0; +} + static struct k3_udma_glue_rx_channel * k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, struct k3_udma_glue_rx_channel_cfg *cfg) { struct k3_udma_glue_rx_channel *rx_chn; - int ret, i; + int ret; if (cfg->flow_id_num <= 0 || cfg->flow_id_use_rxchan_id || @@ -1036,44 +1156,55 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, if (ret) goto err; - rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, - rx_chn->common.psdata_size, - rx_chn->common.swdata_size); - - rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, - sizeof(*rx_chn->flows), GFP_KERNEL); - if (!rx_chn->flows) { - ret = -ENOMEM; + ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); + if (ret) goto err; - } - rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; - rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); - dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x", - rx_chn->common.src_thread); - ret = device_register(&rx_chn->common.chan_dev); - if (ret) { - dev_err(dev, "Channel Device registration failed %d\n", ret); - put_device(&rx_chn->common.chan_dev); - rx_chn->common.chan_dev.parent = NULL; - goto err; - } + return rx_chn; - if (xudma_is_pktdma(rx_chn->common.udmax)) { - /* prepare the channel device as coherent */ - rx_chn->common.chan_dev.dma_coherent = true; - dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, - DMA_BIT_MASK(48)); - } +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} - ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); +struct k3_udma_glue_rx_channel * +k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev, + struct k3_udma_glue_rx_channel_cfg *cfg, + struct device_node *udmax_np, u32 thread_id) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret; + + if (cfg->flow_id_num <= 0 || + cfg->flow_id_use_rxchan_id || + cfg->def_flow_cfg || + cfg->flow_id_base < 0) + return ERR_PTR(-EINVAL); + + /* + * Remote RX channel is under control of Remote CPU core, so + * Linux can only request and manipulate by dedicated RX flows + */ + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = true; + rx_chn->udma_rchan_id = -1; + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + rx_chn->psil_paired = false; + + ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id); if (ret) goto err; - for (i = 0; i < rx_chn->flow_num; i++) - rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; - - k3_udma_glue_dump_rx_chn(rx_chn); + ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); + if (ret) + goto err; return rx_chn; @@ -1081,6 +1212,7 @@ err: k3_udma_glue_release_rx_chn(rx_chn); return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id); struct k3_udma_glue_rx_channel * k3_udma_glue_request_rx_chn(struct device *dev, const char *name, diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index e40696f6f864..5eb51ae93e89 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -112,7 +112,9 @@ /* Register Direct Mode Registers */ #define XILINX_DMA_REG_VSIZE 0x0000 +#define XILINX_DMA_VSIZE_MASK GENMASK(12, 0) #define XILINX_DMA_REG_HSIZE 0x0004 +#define XILINX_DMA_HSIZE_MASK GENMASK(15, 0) #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 @@ -2050,6 +2052,10 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, if (!xt->numf || !xt->sgl[0].size) return NULL; + if (xt->numf & ~XILINX_DMA_VSIZE_MASK || + xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK) + return NULL; + if (xt->frame_size != 1) return NULL; |