summaryrefslogtreecommitdiffstats
path: root/drivers/dma/dma-jz4780.c
diff options
context:
space:
mode:
authorAlex Smith <alex.smith@imgtec.com>2015-07-24 18:24:22 +0200
committerVinod Koul <vinod.koul@intel.com>2015-08-18 18:58:49 +0200
commit839896ef3fdacf3a27460b6f6dabc6ac1475a00c (patch)
treea8e2c9dbdf9187c39253b9b1822415f47095c8a9 /drivers/dma/dma-jz4780.c
parentdmaengine: jz4780: Fall back on smaller transfer sizes where necessary (diff)
downloadlinux-839896ef3fdacf3a27460b6f6dabc6ac1475a00c.tar.xz
linux-839896ef3fdacf3a27460b6f6dabc6ac1475a00c.zip
dmaengine: jz4780: Fix error handling/signedness issues
There are a some signedness bugs such as testing for < 0 on unsigned return values. Additionally there are some cases where functions which should return NULL on error actually return a PTR_ERR value which can result in oopses on error. Fix these issues. Signed-off-by: Alex Smith <alex.smith@imgtec.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com> Cc: dmaengine@vger.kernel.org Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dma-jz4780.c')
-rw-r--r--drivers/dma/dma-jz4780.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7af886fc47af..e4a291c67e2a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -250,7 +250,7 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
}
}
-static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
enum dma_transfer_direction direction)
{
@@ -301,6 +301,7 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
desc->dtc = len >> jzchan->transfer_shift;
+ return 0;
}
static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
@@ -319,12 +320,11 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
for (i = 0; i < sg_len; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
- sg_dma_address(&sgl[i]),
- sg_dma_len(&sgl[i]),
- direction);
+ sg_dma_address(&sgl[i]),
+ sg_dma_len(&sgl[i]),
+ direction);
if (err < 0)
- return ERR_PTR(err);
-
+ return NULL;
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
@@ -366,9 +366,9 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
for (i = 0; i < periods; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
- period_len, direction);
+ period_len, direction);
if (err < 0)
- return ERR_PTR(err);
+ return NULL;
buf_addr += period_len;
@@ -417,7 +417,7 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
- desc->desc[0].dtc = len >> ord;
+ desc->desc[0].dtc = len >> jzchan->transfer_shift;
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
}
@@ -580,8 +580,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
txstate->residue = 0;
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
- && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
- status = DMA_ERROR;
+ && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ status = DMA_ERROR;
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
return status;
@@ -756,17 +756,19 @@ static int jz4780_dma_probe(struct platform_device *pdev)
if (IS_ERR(jzdma->base))
return PTR_ERR(jzdma->base);
- jzdma->irq = platform_get_irq(pdev, 0);
- if (jzdma->irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(dev, "failed to get IRQ: %d\n", ret);
- return jzdma->irq;
+ return ret;
}
+ jzdma->irq = ret;
+
ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
dev_name(dev), jzdma);
if (ret) {
dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
- return -EINVAL;
+ return ret;
}
jzdma->clk = devm_clk_get(dev, NULL);
@@ -803,7 +805,6 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
/*
* Enable DMA controller, mark all channels as not programmable.
* Also set the FMSC bit - it increases MSC performance, so it makes