diff options
author | Kedareswara rao Appana <appana.durga.rao@xilinx.com> | 2017-12-07 06:21:03 +0100 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2017-12-18 06:14:09 +0100 |
commit | fe0503e19310bddc892ddbfd6dfc8746abbe7261 (patch) | |
tree | 3e3da8707bd46923cc8936062c25be10f5bc191a /drivers/dma | |
parent | dmaengine: xilinx_dma: Check for channel idle state before submitting dma des... (diff) | |
download | linux-fe0503e19310bddc892ddbfd6dfc8746abbe7261.tar.xz linux-fe0503e19310bddc892ddbfd6dfc8746abbe7261.zip |
dmaeninge: xilinx_dma: Fix bug in multiple frame stores scenario in vdma
VDMA engine default frame buffer configuration is cirular mode.
in this mode dmaengine continuously circles through h/w configured fstore
frame buffers.
When vdma h/w is configured for more than one frame.
for example h/w is configured for n number of frames, user
submits less than n number of frames and triggered the dmaengine
using issue_pending API.
since the h/w (or) driver default configuraiton is circular mode
h/w tries to write/read from an invalid frame buffer resulting
errors from the vdma dmaengine.
This patch fixes this issue by enabling the park mode as
default mode configuration for frame buffers in s/w,
so that driver can handle all cases for "k" frames where n%k==0
(n is a multiple of k) by simply replicating the frame pointers.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index c2465632f3f0..9063ca04e8d6 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -99,7 +99,9 @@ #define XILINX_DMA_REG_FRMPTR_STS 0x0024 #define XILINX_DMA_REG_PARK_PTR 0x0028 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 +#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 +#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) #define XILINX_DMA_REG_VDMA_VERSION 0x002c /* Register Direct Mode Registers */ @@ -998,7 +1000,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_vdma_config *config = &chan->config; struct xilinx_dma_tx_descriptor *desc, *tail_desc; - u32 reg; + u32 reg, j; struct xilinx_vdma_tx_segment *tail_segment; /* This function was invoked with lock held */ @@ -1035,10 +1037,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) else reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; - /* Configure channel to allow number frame buffers */ - dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, - chan->desc_pendingcount); - /* * With SG, start with circular mode, so that BDs can be fetched. * In direct register mode, if not parking, enable circular mode @@ -1051,17 +1049,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - if (config->park && (config->park_frm >= 0) && - (config->park_frm < chan->num_frms)) { - if (chan->direction == DMA_MEM_TO_DEV) - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_RD_REF_SHIFT); - else - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_WR_REF_SHIFT); + j = chan->desc_submitcount; + reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); + if (chan->direction == DMA_MEM_TO_DEV) { + reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; + } else { + reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; } + dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); /* Start the hardware */ xilinx_dma_start(chan); @@ -1073,6 +1070,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) if (chan->has_sg) { dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; } else { struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; @@ -1102,18 +1101,13 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, last->hw.stride); vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); - } - if (!chan->has_sg) { - list_del(&desc->node); - list_add_tail(&desc->node, &chan->active_list); chan->desc_submitcount++; chan->desc_pendingcount--; + list_del(&desc->node); + list_add_tail(&desc->node, &chan->active_list); if (chan->desc_submitcount == chan->num_frms) chan->desc_submitcount = 0; - } else { - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; } chan->idle = false; @@ -1364,6 +1358,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) chan->err = false; chan->idle = true; + chan->desc_submitcount = 0; return err; } @@ -2363,6 +2358,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + chan->config.park = 1; if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) @@ -2379,6 +2375,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + chan->config.park = 1; if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) |