summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/inside-secure
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-09 05:57:08 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-09 05:57:08 +0200
commit4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26 (patch)
treecbb763ec5e74cfbaac6ce53df277883cb78a8a1a /drivers/crypto/inside-secure
parentMerge branch 'next-integrity' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff)
parentcrypto: stm32/hash - remove interruptible condition for dma (diff)
downloadlinux-4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26.tar.xz
linux-4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 5.3: API: - Test shash interface directly in testmgr - cra_driver_name is now mandatory Algorithms: - Replace arc4 crypto_cipher with library helper - Implement 5 way interleave for ECB, CBC and CTR on arm64 - Add xxhash - Add continuous self-test on noise source to drbg - Update jitter RNG Drivers: - Add support for SHA204A random number generator - Add support for 7211 in iproc-rng200 - Fix fuzz test failures in inside-secure - Fix fuzz test failures in talitos - Fix fuzz test failures in qat" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits) crypto: stm32/hash - remove interruptible condition for dma crypto: stm32/hash - Fix hmac issue more than 256 bytes crypto: stm32/crc32 - rename driver file crypto: amcc - remove memset after dma_alloc_coherent crypto: ccp - Switch to SPDX license identifiers crypto: ccp - Validate the the error value used to index error messages crypto: doc - Fix formatting of new crypto engine content crypto: doc - Add parameter documentation crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR crypto: arm64/aes-ce - add 5 way interleave routines crypto: talitos - drop icv_ool crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: asymmetric_keys - select CRYPTO_HASH where needed crypto: serpent - mark __serpent_setkey_sbox noinline crypto: testmgr - dynamically allocate crypto_shash crypto: testmgr - dynamically allocate testvec_config crypto: talitos - eliminate unneeded 'done' functions at build time ...
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h17
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c116
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c3
5 files changed, 157 insertions, 84 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 86c699c14f84..df43a2c6933b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Processing Engine configuration */
+ /* Token & context configuration */
+ val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
+ EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
+ EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
+
/* H/W capabilities selection */
val = EIP197_FUNCTION_RSVD;
val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
if (rdesc->result_data.error_code & 0x407f) {
/* Fatal error (bits 0-7, 14) */
dev_err(priv->dev,
- "cipher: result: result descriptor error (%d)\n",
+ "cipher: result: result descriptor error (0x%x)\n",
rdesc->result_data.error_code);
- return -EIO;
+ return -EINVAL;
} else if (rdesc->result_data.error_code == BIT(9)) {
/* Authentication failed */
return -EBADMSG;
@@ -720,11 +726,10 @@ handle_results:
}
acknowledge:
- if (i) {
+ if (i)
writel(EIP197_xDR_PROC_xD_PKT(i) |
EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
- }
/* If the number of requests overflowed the counter, try to proceed more
* requests.
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 65624a81f0fd..e0c202f33674 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -118,6 +118,7 @@
#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
@@ -249,6 +250,11 @@
#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0)
#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1)
+/* EIP197_PE_EIP96_TOKEN_CTRL */
+#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16)
+#define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19)
+#define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20)
+
/* EIP197_PE_EIP96_FUNCTION_EN */
#define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23))
#define EIP197_PROTOCOL_HASH_ONLY BIT(0)
@@ -333,6 +339,7 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_IV3 BIT(8)
#define CONTEXT_CONTROL_DIGEST_CNT BIT(9)
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
+#define CONTEXT_CONTROL_CRYPTO_STORE BIT(12)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
/* The hash counter given to the engine in the context has a granularity of
@@ -425,6 +432,10 @@ struct safexcel_token {
#define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16)
+#define EIP197_TOKEN_CTX_OFFSET(x) (x)
+#define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11)
+#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12)
+
#define EIP197_TOKEN_STAT_LAST_HASH BIT(0)
#define EIP197_TOKEN_STAT_LAST_PACKET BIT(1)
#define EIP197_TOKEN_OPCODE_DIRECTION 0x0
@@ -432,6 +443,7 @@ struct safexcel_token {
#define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT
#define EIP197_TOKEN_OPCODE_RETRIEVE 0x4
#define EIP197_TOKEN_OPCODE_VERIFY 0xd
+#define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe
#define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0)
static inline void eip197_noop_token(struct safexcel_token *token)
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token)
/* Instructions */
#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c
+#define EIP197_TOKEN_INS_ORIGIN_IV0 0x14
+#define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5)
#define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5)
#define EIP197_TOKEN_INS_TYPE_HASH BIT(6)
#define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7)
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
+#define EIP197_OPTION_RC_AUTO (0x2 << 3)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state {
u32 digest;
u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA512_BLOCK_SIZE];
+ u8 cache[SHA512_BLOCK_SIZE << 1];
};
/*
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index de4be10b172f..8cdbdbe35681 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
struct safexcel_cipher_req {
enum safexcel_cipher_direction direction;
+ /* Number of result descriptors associated to the request */
+ unsigned int rdescs;
bool needs_inv;
};
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
u32 length)
{
struct safexcel_token *token;
- unsigned offset = 0;
+ u32 offset = 0, block_sz = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
switch (ctx->alg) {
case SAFEXCEL_DES:
- offset = DES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ block_sz = DES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
case SAFEXCEL_3DES:
- offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ block_sz = DES3_EDE_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
break;
-
case SAFEXCEL_AES:
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ block_sz = AES_BLOCK_SIZE;
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
break;
}
+
+ offset = block_sz / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, block_sz);
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
token[0].instructions = EIP197_TOKEN_INS_LAST |
EIP197_TOKEN_INS_TYPE_CRYTO |
EIP197_TOKEN_INS_TYPE_OUTPUT;
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 last = (EIP197_MAX_TOKENS - 1) - offset;
+
+ token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
+ token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
+ EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
+ EIP197_TOKEN_CTX_OFFSET(0x2);
+ token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+ token[last].instructions =
+ EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
+ EIP197_TOKEN_INS_ORIGIN_IV0;
+
+ /* Store the updated IV values back in the internal context
+ * registers.
+ */
+ cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
+ }
}
static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, cryptlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
}
*should_complete = true;
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
int i, ret = 0;
if (src == dst) {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_BIDIRECTIONAL);
nr_dst = nr_src;
if (!nr_src)
return -EINVAL;
} else {
- nr_src = dma_map_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
+ nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
DMA_TO_DEVICE);
if (!nr_src)
return -EINVAL;
- nr_dst = dma_map_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
+ nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
DMA_FROM_DEVICE);
if (!nr_dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
return -EINVAL;
}
}
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
/* result descriptors */
for_each_sg(dst, sg, nr_dst, i) {
- bool first = !i, last = (i == nr_dst - 1);
+ bool first = !i, last = sg_is_last(sg);
u32 len = sg_dma_len(sg);
rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -483,16 +495,10 @@ cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
if (src == dst) {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src,
- sg_nents_for_len(src, totlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst,
- sg_nents_for_len(dst, totlen),
- DMA_FROM_DEVICE);
+ dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
}
return ret;
@@ -501,6 +507,7 @@ cdesc_rollback:
static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
int ring,
struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
bool *should_complete, int *ret)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- do {
+ if (unlikely(!sreq->rdescs))
+ return 0;
+
+ while (sreq->rdescs--) {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
- } while (!rdesc->last_seg);
+ }
safexcel_complete(priv, ring);
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
int err;
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
req->dst, req->cryptlen, sreq,
should_complete, ret);
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+ u32 block_sz = 0;
+
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ block_sz = DES_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_3DES:
+ block_sz = DES3_EDE_BLOCK_SIZE;
+ break;
+ case SAFEXCEL_AES:
+ block_sz = AES_BLOCK_SIZE;
+ break;
+ }
+
+ memcpy(req->iv, ctx->base.ctxr->data, block_sz);
+ }
}
return err;
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
if (sreq->needs_inv) {
sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
+ err = safexcel_handle_inv_result(priv, ring, async, sreq,
should_complete, ret);
} else {
err = safexcel_handle_req_result(priv, ring, async, req->src,
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
+
+ sreq->rdescs = *results;
return ret;
}
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
+ sreq->rdescs = *results;
return ret;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ac9282c1a5ec..a80a5e757b1f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,19 +41,21 @@ struct safexcel_ahash_req {
u64 len[2];
u64 processed[2];
- u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
};
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
{
- if (req->len[1] > req->processed[1])
- return 0xffffffff - (req->len[0] - req->processed[0]);
+ u64 len, processed;
- return req->len[0] - req->processed[0];
+ len = (0xffffffff * req->len[1]) + req->len[0];
+ processed = (0xffffffff * req->processed[1]) + req->processed[0];
+
+ return len - processed;
}
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= ctx->alg;
cdesc->control_data.control0 |= req->digest;
+ if (!req->finish)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed[0] || req->processed[1]) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
}
- if (!req->finish)
- cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
+ sreq->cache_sz = 0;
}
if (sreq->finish)
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len, cache_max;
+
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
queued = len = safexcel_queued_len(req);
- if (queued <= crypto_ahash_blocksize(ahash))
+ if (queued <= cache_max)
cache_len = queued;
else
cache_len = queued - areq->nbytes;
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
* fit into full blocks, cache it for the next send() call.
*/
extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
+ extra < crypto_ahash_blocksize(ahash))
+ extra += crypto_ahash_blocksize(ahash);
+
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
if (!extra)
- /* If this is not the last request and the queued data
- * is a multiple of a block, cache the last one for now.
- */
extra = crypto_ahash_blocksize(ahash);
- if (extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra,
- areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
- if (!queued) {
- *commands = 0;
- *results = 0;
- return 0;
- }
- }
+ queued -= extra;
+ len -= extra;
}
/* Add a command descriptor for the cached data, if any */
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- req->nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
+ req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
if (!req->nents) {
ret = -ENOMEM;
@@ -345,6 +348,7 @@ unmap_cache:
if (req->cache_dma) {
dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
DMA_TO_DEVICE);
+ req->cache_dma = 0;
req->cache_sz = 0;
}
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(req, 0, sizeof(struct ahash_request));
+ memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine, aka. when there is at least 1 block size in the pipe.
*/
-static int safexcel_ahash_cache(struct ahash_request *areq)
+static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
u64 queued, cache_len;
/* queued: everything accepted by the driver which will be handled by
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
* In case there isn't enough bytes to proceed (less than a
* block size), cache the data until we have enough.
*/
- if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+ if (cache_len + areq->nbytes <= cache_max) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache + cache_len,
areq->nbytes, 0);
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ u32 cache_max;
/* If the request is 0 length, do nothing */
if (!areq->nbytes)
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (req->len[0] < areq->nbytes)
req->len[1]++;
- safexcel_ahash_cache(areq);
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
+
+ safexcel_ahash_cache(areq, cache_max);
/*
* We're not doing partial updates when performing an hmac request.
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > cache_max)
return safexcel_ahash_enqueue(areq);
return 0;
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
+ u32 cache_sz;
+
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
export->len[0] = req->len[0];
export->len[1] = req->len[1];
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->digest = req->digest;
memcpy(export->state, req->state, req->state_sz);
- memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+ memcpy(export->cache, req->cache, cache_sz);
return 0;
}
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
const struct safexcel_ahash_export_state *export = in;
+ u32 cache_sz;
int ret;
ret = crypto_ahash_init(areq);
if (ret)
return ret;
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
+
req->len[0] = export->len[0];
req->len[1] = export->len[1];
req->processed[0] = export->processed[0];
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
req->digest = export->digest;
- memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ memcpy(req->cache, export->cache, cache_sz);
memcpy(req->state, export->state, req->state_sz);
return 0;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index eb75fa684876..142bc3f5c45c 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr
(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
cdesc->control_data.context_hi = upper_32_bits(context);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
+
/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
cdesc->control_data.refresh = 2;