summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2016-07-25 19:34:08 +0200
committerVinod Koul <vinod.koul@intel.com>2016-08-08 04:41:43 +0200
commit8058e25809f53cadc0438ebb8f920415a0d2ec17 (patch)
treed4a90a12d29b88b18d5a9b9db073ca442a86876c
parentdmaengine: fsl_raid: move unmap to before callback (diff)
downloadlinux-8058e25809f53cadc0438ebb8f920415a0d2ec17.tar.xz
linux-8058e25809f53cadc0438ebb8f920415a0d2ec17.zip
dmaengine: mv_xor: move unmap to before callback
Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/mv_xor.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f8b5e7424b3a..d550efbc7054 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -206,11 +206,11 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
+ dma_descriptor_unmap(&desc->async_tx);
/* call the callback (must not sleep or submit new
* operations to this channel)
*/
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
- dma_descriptor_unmap(&desc->async_tx);
}
/* run dependent operations */