diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-08 20:02:21 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-08 20:02:21 +0200 |
commit | 2996148a9d4169f19a57827003c75605ce3b152b (patch) | |
tree | 77230462b98573fb87237ceafb086c1e7fe4c906 /drivers/dma/sprd-dma.c | |
parent | Merge tag 'iommu-updates-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/... (diff) | |
parent | Merge branch 'topic/txx' into for-linus (diff) | |
download | linux-2996148a9d4169f19a57827003c75605ce3b152b.tar.xz linux-2996148a9d4169f19a57827003c75605ce3b152b.zip |
Merge tag 'dmaengine-4.18-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
- updates to sprd, bam_dma, stm drivers
- remove VLAs in dmatest
- move TI drivers to their own subdir
- switch to SPDX tags for ima/mxs dma drivers
- simplify getting .drvdata on bunch of drivers by Wolfram Sang
* tag 'dmaengine-4.18-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (32 commits)
dmaengine: sprd: Add Spreadtrum DMA configuration
dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
dmaengine: imx-dma: Switch to SPDX identifier
dmaengine: mxs-dma: Switch to SPDX identifier
dmaengine: imx-sdma: Switch to SPDX identifier
dmaengine: usb-dmac: Document R8A7799{0,5} bindings
dmaengine: qcom: bam_dma: fix some doc warnings.
dmaengine: qcom: bam_dma: fix invalid assignment warning
dmaengine: sprd: fix an NULL vs IS_ERR() bug
dmaengine: sprd: Use devm_ioremap_resource() to map memory
dmaengine: sprd: Fix potential NULL dereference in sprd_dma_probe()
dmaengine: pl330: flush before wait, and add dev burst support.
dmaengine: axi-dmac: Request IRQ with IRQF_SHARED
dmaengine: stm32-mdma: fix spelling mistake: "avalaible" -> "available"
dmaengine: rcar-dmac: Document R-Car D3 bindings
dmaengine: sprd: Move DMA request mode and interrupt type into head file
dmaengine: sprd: Define the DMA data width type
dmaengine: sprd: Define the DMA transfer step type
dmaengine: ti: New directory for Texas Instruments DMA drivers
dmaengine: shdmac: Change platform check to CONFIG_ARCH_RENESAS
...
Diffstat (limited to 'drivers/dma/sprd-dma.c')
-rw-r--r-- | drivers/dma/sprd-dma.c | 349 |
1 files changed, 203 insertions, 146 deletions
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 52ebccb483be..55df0d41355b 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -6,6 +6,7 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> +#include <linux/dma/sprd-dma.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -116,57 +117,21 @@ #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) +/* define the DMA transfer step type */ +#define SPRD_DMA_NONE_STEP 0 +#define SPRD_DMA_BYTE_STEP 1 +#define SPRD_DMA_SHORT_STEP 2 +#define SPRD_DMA_WORD_STEP 4 +#define SPRD_DMA_DWORD_STEP 8 + #define SPRD_DMA_SOFTWARE_UID 0 -/* - * enum sprd_dma_req_mode: define the DMA request mode - * @SPRD_DMA_FRAG_REQ: fragment request mode - * @SPRD_DMA_BLK_REQ: block request mode - * @SPRD_DMA_TRANS_REQ: transaction request mode - * @SPRD_DMA_LIST_REQ: link-list request mode - * - * We have 4 types request mode: fragment mode, block mode, transaction mode - * and linklist mode. One transaction can contain several blocks, one block can - * contain several fragments. Link-list mode means we can save several DMA - * configuration into one reserved memory, then DMA can fetch each DMA - * configuration automatically to start transfer. - */ -enum sprd_dma_req_mode { - SPRD_DMA_FRAG_REQ, - SPRD_DMA_BLK_REQ, - SPRD_DMA_TRANS_REQ, - SPRD_DMA_LIST_REQ, -}; - -/* - * enum sprd_dma_int_type: define the DMA interrupt type - * @SPRD_DMA_NO_INT: do not need generate DMA interrupts. - * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request - * is done. - * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done. - * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment - * or one block request is done. - * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction - * request is done. - * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one - * transaction request or fragment request is done. - * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one - * transaction request or block request is done. - * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request - * is done. - * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is - * incorrect. - */ -enum sprd_dma_int_type { - SPRD_DMA_NO_INT, - SPRD_DMA_FRAG_INT, - SPRD_DMA_BLK_INT, - SPRD_DMA_BLK_FRAG_INT, - SPRD_DMA_TRANS_INT, - SPRD_DMA_TRANS_FRAG_INT, - SPRD_DMA_TRANS_BLK_INT, - SPRD_DMA_LIST_INT, - SPRD_DMA_CFGERR_INT, +/* dma data width values */ +enum sprd_dma_datawidth { + SPRD_DMA_DATAWIDTH_1_BYTE, + SPRD_DMA_DATAWIDTH_2_BYTES, + SPRD_DMA_DATAWIDTH_4_BYTES, + SPRD_DMA_DATAWIDTH_8_BYTES, }; /* dma channel hardware configuration */ @@ -199,6 +164,7 @@ struct sprd_dma_desc { struct sprd_dma_chn { struct virt_dma_chan vc; void __iomem *chn_base; + struct dma_slave_config slave_cfg; u32 chn_num; u32 dev_id; struct sprd_dma_desc *cur_desc; @@ -587,52 +553,97 @@ static void sprd_dma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&schan->vc.lock, flags); } -static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc, - dma_addr_t dest, dma_addr_t src, size_t len) +static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth) +{ + switch (buswidth) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + return ffs(buswidth) - 1; + + default: + return -EINVAL; + } +} + +static int sprd_dma_get_step(enum dma_slave_buswidth buswidth) +{ + switch (buswidth) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + return buswidth; + + default: + return -EINVAL; + } +} + +static int sprd_dma_fill_desc(struct dma_chan *chan, + struct sprd_dma_desc *sdesc, + dma_addr_t src, dma_addr_t dst, u32 len, + enum dma_transfer_direction dir, + unsigned long flags, + struct dma_slave_config *slave_cfg) { struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; - u32 datawidth, src_step, des_step, fragment_len; - u32 block_len, req_mode, irq_mode, transcation_len; - u32 fix_mode = 0, fix_en = 0; - - if (IS_ALIGNED(len, 4)) { - datawidth = 2; - src_step = 4; - des_step = 4; - } else if (IS_ALIGNED(len, 2)) { - datawidth = 1; - src_step = 2; - des_step = 2; + u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; + u32 int_mode = flags & SPRD_DMA_INT_MASK; + int src_datawidth, dst_datawidth, src_step, dst_step; + u32 temp, fix_mode = 0, fix_en = 0; + + if (dir == DMA_MEM_TO_DEV) { + src_step = sprd_dma_get_step(slave_cfg->src_addr_width); + if (src_step < 0) { + dev_err(sdev->dma_dev.dev, "invalid source step\n"); + return src_step; + } + dst_step = SPRD_DMA_NONE_STEP; } else { - datawidth = 0; - src_step = 1; - des_step = 1; + dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width); + if (dst_step < 0) { + dev_err(sdev->dma_dev.dev, "invalid destination step\n"); + return dst_step; + } + src_step = SPRD_DMA_NONE_STEP; } - fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE; - if (len <= SPRD_DMA_BLK_LEN_MASK) { - block_len = len; - transcation_len = 0; - req_mode = SPRD_DMA_BLK_REQ; - irq_mode = SPRD_DMA_BLK_INT; - } else { - block_len = SPRD_DMA_MEMCPY_MIN_SIZE; - transcation_len = len; - req_mode = SPRD_DMA_TRANS_REQ; - irq_mode = SPRD_DMA_TRANS_INT; + src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width); + if (src_datawidth < 0) { + dev_err(sdev->dma_dev.dev, "invalid source datawidth\n"); + return src_datawidth; } + dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width); + if (dst_datawidth < 0) { + dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n"); + return dst_datawidth; + } + + if (slave_cfg->slave_id) + schan->dev_id = slave_cfg->slave_id; + hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; - hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) & - SPRD_DMA_HIGH_ADDR_MASK); - hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) & - SPRD_DMA_HIGH_ADDR_MASK); - hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK); - hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK); + /* + * wrap_ptr and wrap_to will save the high 4 bits source address and + * destination address. + */ + hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK; + hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK; + hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK; + hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK; - if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) { + /* + * If the src step and dst step both are 0 or both are not 0, that means + * we can not enable the fix mode. If one is 0 and another one is not, + * we can enable the fix mode. + */ + if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) { fix_en = 0; } else { fix_en = 1; @@ -642,87 +653,119 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc, fix_mode = 0; } - hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET | - datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET | - req_mode << SPRD_DMA_REQ_MODE_OFFSET | - fix_mode << SPRD_DMA_FIX_SEL_OFFSET | - fix_en << SPRD_DMA_FIX_EN_OFFSET | - (fragment_len & SPRD_DMA_FRG_LEN_MASK); - hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK; - - hw->intc = SPRD_DMA_CFG_ERR_INT_EN; - - switch (irq_mode) { - case SPRD_DMA_NO_INT: - break; - - case SPRD_DMA_FRAG_INT: - hw->intc |= SPRD_DMA_FRAG_INT_EN; - break; + hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN; - case SPRD_DMA_BLK_INT: - hw->intc |= SPRD_DMA_BLK_INT_EN; - break; + temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET; + temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET; + temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET; + temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET; + temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET; + temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; + hw->frg_len = temp; - case SPRD_DMA_BLK_FRAG_INT: - hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN; - break; + hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; - case SPRD_DMA_TRANS_INT: - hw->intc |= SPRD_DMA_TRANS_INT_EN; - break; + temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; + temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET; + hw->trsf_step = temp; - case SPRD_DMA_TRANS_FRAG_INT: - hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN; - break; + hw->frg_step = 0; + hw->src_blk_step = 0; + hw->des_blk_step = 0; + return 0; +} - case SPRD_DMA_TRANS_BLK_INT: - hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN; - break; +static struct dma_async_tx_descriptor * +sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct sprd_dma_desc *sdesc; + struct sprd_dma_chn_hw *hw; + enum sprd_dma_datawidth datawidth; + u32 step, temp; - case SPRD_DMA_LIST_INT: - hw->intc |= SPRD_DMA_LIST_INT_EN; - break; + sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); + if (!sdesc) + return NULL; - case SPRD_DMA_CFGERR_INT: - hw->intc |= SPRD_DMA_CFG_ERR_INT_EN; - break; + hw = &sdesc->chn_hw; - default: - dev_err(sdev->dma_dev.dev, "invalid irq mode\n"); - return -EINVAL; + hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; + hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN; + hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK; + hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK; + hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & + SPRD_DMA_HIGH_ADDR_MASK; + hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) & + SPRD_DMA_HIGH_ADDR_MASK; + + if (IS_ALIGNED(len, 8)) { + datawidth = SPRD_DMA_DATAWIDTH_8_BYTES; + step = SPRD_DMA_DWORD_STEP; + } else if (IS_ALIGNED(len, 4)) { + datawidth = SPRD_DMA_DATAWIDTH_4_BYTES; + step = SPRD_DMA_WORD_STEP; + } else if (IS_ALIGNED(len, 2)) { + datawidth = SPRD_DMA_DATAWIDTH_2_BYTES; + step = SPRD_DMA_SHORT_STEP; + } else { + datawidth = SPRD_DMA_DATAWIDTH_1_BYTE; + step = SPRD_DMA_BYTE_STEP; } - if (transcation_len == 0) - hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK; - else - hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK; + temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET; + temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET; + temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET; + temp |= len & SPRD_DMA_FRG_LEN_MASK; + hw->frg_len = temp; - hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) << - SPRD_DMA_DEST_TRSF_STEP_OFFSET | - (src_step & SPRD_DMA_TRSF_STEP_MASK) << - SPRD_DMA_SRC_TRSF_STEP_OFFSET; + hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; + hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; - hw->frg_step = 0; - hw->src_blk_step = 0; - hw->des_blk_step = 0; - hw->src_blk_step = 0; - return 0; + temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; + temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET; + hw->trsf_step = temp; + + return vchan_tx_prep(&schan->vc, &sdesc->vd, flags); } static struct dma_async_tx_descriptor * -sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, - size_t len, unsigned long flags) +sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long flags, void *context) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct dma_slave_config *slave_cfg = &schan->slave_cfg; + dma_addr_t src = 0, dst = 0; struct sprd_dma_desc *sdesc; - int ret; + struct scatterlist *sg; + u32 len = 0; + int ret, i; + + /* TODO: now we only support one sg for each DMA configuration. */ + if (!is_slave_direction(dir) || sglen > 1) + return NULL; sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); if (!sdesc) return NULL; - ret = sprd_dma_config(chan, sdesc, dest, src, len); + for_each_sg(sgl, sg, sglen, i) { + len = sg_dma_len(sg); + + if (dir == DMA_MEM_TO_DEV) { + src = sg_dma_address(sg); + dst = slave_cfg->dst_addr; + } else { + src = slave_cfg->src_addr; + dst = sg_dma_address(sg); + } + } + + ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags, + slave_cfg); if (ret) { kfree(sdesc); return NULL; @@ -731,6 +774,19 @@ sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, return vchan_tx_prep(&schan->vc, &sdesc->vd, flags); } +static int sprd_dma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct dma_slave_config *slave_cfg = &schan->slave_cfg; + + if (!is_slave_direction(config->direction)) + return -EINVAL; + + memcpy(slave_cfg, config, sizeof(*config)); + return 0; +} + static int sprd_dma_pause(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); @@ -842,10 +898,9 @@ static int sprd_dma_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!sdev->glb_base) - return -ENOMEM; + sdev->glb_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sdev->glb_base)) + return PTR_ERR(sdev->glb_base); dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask); sdev->total_chns = chn_count; @@ -858,6 +913,8 @@ static int sprd_dma_probe(struct platform_device *pdev) sdev->dma_dev.device_tx_status = sprd_dma_tx_status; sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending; sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy; + sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg; + sdev->dma_dev.device_config = sprd_dma_slave_config; sdev->dma_dev.device_pause = sprd_dma_pause; sdev->dma_dev.device_resume = sprd_dma_resume; sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all; |