From 36387a2b1f62b5c087c5fe6f0f7b23b94f722ad7 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 29 Aug 2016 10:30:51 -0700 Subject: k3dma: Fix memory handling in preparation for cyclic mode With cyclic mode, the shared virt-dma logic doesn't actually manage the descriptor state, nor the calling of the descriptor free callback. This results in leaking a desc structure every time we start an audio transfer. Thus we must manage it ourselves. The k3dma driver already keeps track of the active and finished descriptors via ds_run and ds_done pointers, so cleanup how we handle those two values, so when we tear down everything in terminate_all, call free_desc on the ds_run and ds_done pointers if they are not null. NOTE: HiKey doesn't use the non-cyclic dma modes, so I'm not been able to test those modes. But with this patch we no longer leak the desc structures. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) (limited to 'drivers/dma/k3dma.c') diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 9d96c956c25f..8108fa119d39 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -212,7 +212,9 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) spin_lock_irqsave(&c->vc.lock, flags); vchan_cookie_complete(&p->ds_run->vd); + WARN_ON_ONCE(p->ds_done); p->ds_done = p->ds_run; + p->ds_run = NULL; spin_unlock_irqrestore(&c->vc.lock, flags); } irq_chan |= BIT(i); @@ -253,14 +255,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) * so vc->desc_issued only contains desc pending */ list_del(&ds->vd.node); + + WARN_ON_ONCE(c->phy->ds_run); + WARN_ON_ONCE(c->phy->ds_done); c->phy->ds_run = ds; - c->phy->ds_done = NULL; /* start dma */ k3_dma_set_desc(c->phy, &ds->desc_hw[0]); return 0; } - c->phy->ds_done = NULL; - c->phy->ds_run = NULL; return -EAGAIN; } @@ -594,6 +596,16 @@ static int k3_dma_config(struct dma_chan *chan, return 0; } +static void k3_dma_free_desc(struct virt_dma_desc *vd) +{ + struct k3_dma_desc_sw *ds = + container_of(vd, struct k3_dma_desc_sw, vd); + struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); + + dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); + kfree(ds); +} + static int k3_dma_terminate_all(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); @@ -617,7 +629,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan) k3_dma_terminate_chan(p, d); c->phy = NULL; p->vchan = NULL; - p->ds_run = p->ds_done = NULL; + if (p->ds_run) { + k3_dma_free_desc(&p->ds_run->vd); + p->ds_run = NULL; + } + if (p->ds_done) { + k3_dma_free_desc(&p->ds_done->vd); + p->ds_done = NULL; + } + } spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); @@ -670,16 +690,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan) return 0; } -static void k3_dma_free_desc(struct virt_dma_desc *vd) -{ - struct k3_dma_desc_sw *ds = - container_of(vd, struct k3_dma_desc_sw, vd); - struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); - - dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); - kfree(ds); -} - static const struct of_device_id k3_pdma_dt_ids[] = { { .compatible = "hisilicon,k3-dma-1.0", }, {} -- cgit v1.2.3