diff options
author | Dan Williams <dan.j.williams@intel.com> | 2009-09-21 19:47:40 +0200 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2009-09-21 19:47:40 +0200 |
commit | 1f6672d44c1ae7408b43c06170ec34eb0a0e9b9f (patch) | |
tree | b8c2be522cf71512a5f9587448967d78ae31bfbf | |
parent | ioat3: fix uninitialized var warnings (diff) | |
download | linux-1f6672d44c1ae7408b43c06170ec34eb0a0e9b9f.tar.xz linux-1f6672d44c1ae7408b43c06170ec34eb0a0e9b9f.zip |
async_tx/raid6: add missing dma_unmap calls to the async fail case
If we are unable to offload async_mult() or async_sum_product(), then
unmap the buffers before falling through to the synchronous path.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 822a42d10061..6d73dde4786d 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -55,6 +55,13 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, async_tx_submit(chan, tx, submit); return tx; } + + /* could not get a descriptor, unmap and fall through to + * the synchronous path + */ + dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); + dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); + dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); } /* run the operation synchronously */ @@ -101,6 +108,12 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, async_tx_submit(chan, tx, submit); return tx; } + + /* could not get a descriptor, unmap and fall through to + * the synchronous path + */ + dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); + dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); } /* no channel available, or failed to allocate a descriptor, so |