summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/marvell/hash.c
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@free-electrons.com>2016-12-14 15:15:07 +0100
committerHerbert Xu <herbert@gondor.apana.org.au>2016-12-16 12:59:39 +0100
commit8759fec4af222f338d08f8f1a7ad6a77ca6cb301 (patch)
tree095109417f57fdbc6b2558e827ca8a5bb3f73128 /drivers/crypto/marvell/hash.c
parentcrypto: skcipher - fix crash in virtual walk (diff)
downloadlinux-8759fec4af222f338d08f8f1a7ad6a77ca6cb301.tar.xz
linux-8759fec4af222f338d08f8f1a7ad6a77ca6cb301.zip
crypto: marvell - Copy IVDIG before launching partial DMA ahash requests
Currently, inner IV/DIGEST data are only copied once into the hash engines and not set explicitly before launching a request that is not a first frag. This is an issue especially when multiple ahash reqs are computed in parallel or chained with cipher request, as the state of the request being computed is not updated into the hash engine. It leads to non-deterministic corrupted digest results. Fixes: commit 2786cee8e50b ("crypto: marvell - Move SRAM I/O operations to step functions") Signed-off-by: Romain Perier <romain.perier@free-electrons.com> Acked-by: Boris Brezillon <boris.brezillon@free-electrons.com> Cc: <stable@vger.kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/marvell/hash.c')
-rw-r--r--drivers/crypto/marvell/hash.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 2a9260559654..585c90f9f606 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -281,13 +281,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
sreq->offset = 0;
}
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_req *base = &creq->base;
+
+ /* We must explicitly set the digest state. */
+ if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+ struct mv_cesa_engine *engine = base->engine;
+ int i;
+
+ /* Set the hash state in the IVDIG regs. */
+ for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+ writel_relaxed(creq->state[i], engine->regs +
+ CESA_IVDIG(i));
+ }
+
+ mv_cesa_dma_step(base);
+}
+
static void mv_cesa_ahash_step(struct crypto_async_request *req)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->base);
+ mv_cesa_ahash_dma_step(ahashreq);
else
mv_cesa_ahash_std_step(ahashreq);
}
@@ -585,12 +604,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
+ bool set_state = false;
int ret;
u32 type;
basereq->chain.first = NULL;
basereq->chain.last = NULL;
+ if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+ set_state = true;
+
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE);
@@ -684,6 +707,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (type != CESA_TDMA_RESULT)
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
+ if (set_state) {
+ /*
+ * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+ * let the step logic know that the IVDIG registers should be
+ * explicitly set before launching a TDMA chain.
+ */
+ basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+ }
+
return 0;
err_free_tdma: