summaryrefslogtreecommitdiffstats
path: root/crypto/async_tx/async_pq.c
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@broadcom.com>2017-05-15 07:04:53 +0200
committerVinod Koul <vinod.koul@intel.com>2017-05-16 06:31:57 +0200
commitbaae03a0e2497f49704628fd0aaf993cf98e1b99 (patch)
tree103de8b8d9fd6b712a7cb4729660225541ad4269 /crypto/async_tx/async_pq.c
parentlib/raid6: Add log-of-2 table for RAID6 HW requiring disk position (diff)
downloadlinux-baae03a0e2497f49704628fd0aaf993cf98e1b99.tar.xz
linux-baae03a0e2497f49704628fd0aaf993cf98e1b99.zip
async_tx: Fix DMA_PREP_FENCE usage in do_async_gen_syndrome()
The DMA_PREP_FENCE is to be used when preparing Tx descriptor if output of Tx descriptor is to be used by next/dependent Tx descriptor. The DMA_PREP_FENSE will not be set correctly in do_async_gen_syndrome() when calling dma->device_prep_dma_pq() under following conditions: 1. ASYNC_TX_FENCE not set in submit->flags 2. DMA_PREP_FENCE not set in dma_flags 3. src_cnt (= (disks - 2)) is greater than dma_maxpq(dma, dma_flags) This patch fixes DMA_PREP_FENCE usage in do_async_gen_syndrome() taking inspiration from do_async_xor() implementation. Signed-off-by: Anup Patel <anup.patel@broadcom.com> Reviewed-by: Ray Jui <ray.jui@broadcom.com> Reviewed-by: Scott Branden <scott.branden@broadcom.com> Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'crypto/async_tx/async_pq.c')
-rw-r--r--crypto/async_tx/async_pq.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f83de99d7d71..56bd612927ab 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor