diff options
author | Zhenfang Wang <zhenfang.wang@unisoc.com> | 2020-03-12 14:26:04 +0100 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2020-03-23 07:08:24 +0100 |
commit | d0f19a48a185dab592afe1e18bf31a9d6790620d (patch) | |
tree | 8643c4904d337e0f063409a3415171d973340482 /drivers/dma | |
parent | dmaengine: ppc4xx: Use scnprintf() for avoiding potential buffer overflow (diff) | |
download | linux-d0f19a48a185dab592afe1e18bf31a9d6790620d.tar.xz linux-d0f19a48a185dab592afe1e18bf31a9d6790620d.zip |
dmaengine: sprd: Set request pending flag when DMA controller is active
On new Spreadtrum platforms, when the CPU enters idle, it will close
the DMA controllers' clock to save power if the DMA controller is not
busy. Moreover the DMA controller's busy signal depends on the DMA
enable flag and the request pending flag.
When DMA controller starts to transfer data, which means we already
set the DMA enable flag, but now we should also set the request pending
flag, in case the DMA clock will be closed accidentally if the CPU
can not detect the DMA controller's busy signal.
Signed-off-by: Zhenfang Wang <zhenfang.wang@unisoc.com>
Signed-off-by: Baolin Wang <baolin.wang7@gmail.com>
Link: https://lore.kernel.org/r/02adbe4364ec436ec2c5bc8fd2386bab98edd884.1584019223.git.baolin.wang7@gmail.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/sprd-dma.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 954eff32cc05..0ef5ca81ba4d 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) return 0; } +static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 reg, val, req_id; + + if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) + return; + + /* The DMA request id always starts from 0. */ + req_id = schan->dev_id - 1; + + if (req_id < 32) { + reg = SPRD_DMA_GLB_REQ_PEND0_EN; + val = BIT(req_id); + } else { + reg = SPRD_DMA_GLB_REQ_PEND1_EN; + val = BIT(req_id - 32); + } + + sprd_dma_glb_update(sdev, reg, val, enable ? val : 0); +} + static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, struct sprd_dma_desc *sdesc) { @@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) */ sprd_dma_set_chn_config(schan, schan->cur_desc); sprd_dma_set_uid(schan); + sprd_dma_set_pending(schan, true); sprd_dma_enable_chn(schan); if (schan->dev_id == SPRD_DMA_SOFTWARE_UID && @@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) static void sprd_dma_stop(struct sprd_dma_chn *schan) { sprd_dma_stop_and_disable(schan); + sprd_dma_set_pending(schan, false); sprd_dma_unset_uid(schan); sprd_dma_clear_int(schan); schan->cur_desc = NULL; |