summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c1832
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc_constr.h58
4 files changed, 1466 insertions, 429 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 676d957c22b0..4159265b453b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -62,10 +62,22 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_AEAD_SHARED_TEXT_LEN 4
-#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
-#define DESC_AEAD_DECRYPT_TEXT_LEN 24
-#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
+
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
#ifdef DEBUG
/* for print_hex_dumps with line references */
@@ -76,30 +88,366 @@
#define debug(format, arg...)
#endif
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * Wait for completion of class 1 key loading before allowing
+ * error propagation
+ */
+static inline void append_dec_shr_done(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+{
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ivsize);
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG 1
+#define GIV_DST_CONTIG (1 << 1)
+
/*
* per-session context
*/
struct caam_ctx {
struct device *jrdev;
- u32 *sh_desc;
- dma_addr_t shared_desc_phys;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
- u8 *key;
- dma_addr_t key_phys;
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
unsigned int enckeylen;
unsigned int split_key_len;
unsigned int split_key_pad_len;
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+}
+
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 geniv, moveiv;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize)
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX |
+ MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from class 1 ctx to class 2 fifo*/
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
return 0;
}
@@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err,
#ifdef DEBUG
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
+
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
return ret;
}
-static int build_sh_desc_ipsec(struct caam_ctx *ctx)
-{
- struct device *jrdev = ctx->jrdev;
- u32 *sh_desc;
- u32 *jump_cmd;
- bool keys_fit_inline = 0;
-
- /*
- * largest Job Descriptor and its Shared Descriptor
- * must both fit into the 64-word Descriptor h/w Buffer
- */
- if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
- DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
- ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
-
- /* build shared descriptor for this session */
- sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
- (keys_fit_inline ?
- ctx->split_key_pad_len + ctx->enckeylen :
- CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
- if (!sh_desc) {
- dev_err(jrdev, "could not allocate shared descriptor\n");
- return -ENOMEM;
- }
-
- init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
-
- jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
-
- /*
- * process keys, starting with class 2/authentication.
- */
- if (keys_fit_inline) {
- append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len,
- CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-
- append_key_as_imm(sh_desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* update jump cmd now that we are at the jump target */
- set_jump_tgt_here(sh_desc, jump_cmd);
-
- ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
- desc_bytes(sh_desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- kfree(sh_desc);
- return -ENOMEM;
- }
-
- ctx->sh_desc = sh_desc;
-
- return 0;
-}
-
-static int aead_authenc_setkey(struct crypto_aead *aead,
+static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
@@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
- GFP_KERNEL | GFP_DMA);
- if (!ctx->key) {
- dev_err(jrdev, "could not allocate key output memory\n");
- return -ENOMEM;
- }
ret = gen_split_key(ctx, key, authkeylen);
if (ret) {
- kfree(ctx->key);
goto badkey;
}
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
- ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_phys)) {
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
- kfree(ctx->key);
return -ENOMEM;
}
#ifdef DEBUG
@@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
ctx->enckeylen = enckeylen;
- ret = build_sh_desc_ipsec(ctx);
+ ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- kfree(ctx->key);
}
return ret;
@@ -370,6 +644,119 @@ badkey:
return -EINVAL;
}
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 *desc;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+
+ /* Load iv */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* For aead, only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* load IV */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ /* Wait for key to load before allowing propagating error */
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return ret;
+}
+
struct link_tbl_entry {
u64 ptr;
u32 len;
@@ -379,64 +766,109 @@ struct link_tbl_entry {
};
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @link_tbl_bytes: length of dma mapped link_tbl space
* @link_tbl_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
-struct ipsec_esp_edesc {
+struct aead_edesc {
int assoc_nents;
int src_nents;
int dst_nents;
+ dma_addr_t iv_dma;
int link_tbl_bytes;
dma_addr_t link_tbl_dma;
struct link_tbl_entry *link_tbl;
u32 hw_desc[0];
};
-static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
- struct aead_request *areq)
-{
- dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int link_tbl_bytes;
+ dma_addr_t link_tbl_dma;
+ struct link_tbl_entry *link_tbl;
+ u32 hw_desc[0];
+};
- if (unlikely(areq->dst != areq->src)) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
- DMA_FROM_DEVICE);
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents, int dst_nents,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
+ int link_tbl_bytes)
+{
+ if (unlikely(dst != src)) {
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
- if (edesc->link_tbl_bytes)
- dma_unmap_single(dev, edesc->link_tbl_dma,
- edesc->link_tbl_bytes,
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (link_tbl_bytes)
+ dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
DMA_TO_DEVICE);
}
-/*
- * ipsec_esp descriptor callbacks
- */
-static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- int ivsize = crypto_aead_ivsize(aead);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen +
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1);
#endif
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
}
-static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen, 1);
+#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
@@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + areq->assoclen +
- ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+ ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1);
if (!err && edesc->link_tbl_bytes) {
- struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1);
}
#endif
+
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+ link_tbl_ptr->ptr = dma;
+ link_tbl_ptr->len = len;
+ link_tbl_ptr->reserved = 0;
+ link_tbl_ptr->buf_pool_id = 0;
+ link_tbl_ptr->offset = offset;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
+ sizeof(struct link_tbl_entry), 1);
+#endif
}
/*
* convert scatterlist to h/w link table format
- * scatterlist must have been previously dma mapped
+ * but does not have final bit; instead, returns last entry
*/
-static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- struct link_tbl_entry *link_tbl_ptr, u32 offset)
+static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
+ int sg_count, struct link_tbl_entry
+ *link_tbl_ptr, u32 offset)
{
while (sg_count) {
- link_tbl_ptr->ptr = sg_dma_address(sg);
- link_tbl_ptr->len = sg_dma_len(sg);
- link_tbl_ptr->reserved = 0;
- link_tbl_ptr->buf_pool_id = 0;
- link_tbl_ptr->offset = offset;
+ sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
link_tbl_ptr++;
sg = sg_next(sg);
sg_count--;
}
+ return link_tbl_ptr - 1;
+}
- /* set Final bit (marks end of link table) */
- link_tbl_ptr--;
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
+ struct link_tbl_entry *link_tbl_ptr, u32 offset)
+{
+ link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
link_tbl_ptr->len |= 0x40000000;
}
/*
- * fill in and submit ipsec_esp job descriptor
+ * Fill in aead job descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
- u32 encrypt,
- void (*callback) (struct device *dev, u32 *desc,
- u32 err, void *context))
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 *desc = edesc->hw_desc, options;
- int ret, sg_count, assoc_sg_count;
int ivsize = crypto_aead_ivsize(aead);
int authsize = ctx->authsize;
- dma_addr_t ptr, dst_dma, src_dma;
-#ifdef DEBUG
- u32 *sh_desc = ctx->sh_desc;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
+#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
- areq->assoclen, areq->cryptlen, authsize);
+ req->assoclen, req->cryptlen, authsize);
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
- assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
- DMA_TO_DEVICE);
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- /* start auth operation */
- append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
- (encrypt ? : OP_ALG_ICV_ON));
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* Load FIFO with data for Class 2 CHA */
- options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
- if (!edesc->assoc_nents) {
- ptr = sg_dma_address(areq->assoc);
+ if (all_contig) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
- edesc->link_tbl, 0);
- ptr = edesc->link_tbl_dma;
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
}
- append_fifo_load(desc, ptr, areq->assoclen, options);
-
- /* copy iv from cipher/class1 input context to class2 infifo */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
-
- if (!encrypt) {
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* JUMP if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (encrypt)
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+ else
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen, in_options);
- /* start class 1 (cipher) operation, non-shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL);
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+}
- uncond_jump_cmd = append_jump(desc, 0);
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ int contig)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
- set_jump_tgt_here(desc, jump_cmd);
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
- /* start class 1 (cipher) operation, shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
- } else
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | encrypt);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* load payload & instruct to class2 to snoop class 1 if encrypting */
- options = 0;
- if (!edesc->src_nents) {
- src_dma = sg_dma_address(areq->src);
+ if (contig & GIV_SRC_CONTIG) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
- edesc->assoc_nents, 0);
- src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
- sizeof(struct link_tbl_entry);
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
- FIFOLD_TYPE_LASTBOTH |
- (encrypt ? FIFOLD_TYPE_MSG1OUT2
- : FIFOLD_TYPE_MSG));
-
- /* specify destination */
- if (areq->src == areq->dst) {
- dst_dma = src_dma;
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
} else {
- sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(areq->dst);
- options = 0;
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ edesc->assoc_nents;
+ out_options = LDST_SGF;
} else {
- sg_to_link_tbl(areq->dst, edesc->dst_nents,
- edesc->link_tbl + edesc->assoc_nents +
- edesc->src_nents, 0);
- dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
- edesc->src_nents) *
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
sizeof(struct link_tbl_entry);
- options = LDST_SGF;
+ out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
- /* ICV */
- if (encrypt)
- append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
- else
- append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
#ifdef DEBUG
- debug("job_desc_len %d\n", desc_len(desc));
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
- print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
- edesc->link_tbl_bytes, 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, callback, areq);
- if (!ret)
- ret = -EINPROGRESS;
- else {
- ipsec_esp_unmap(jrdev, edesc, areq);
- kfree(edesc);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (iv_contig) {
+ src_dma = edesc->iv_dma;
+ in_options = 0;
+ } else {
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ in_options = LDST_SGF;
}
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
- return ret;
+ if (likely(req->src == req->dst)) {
+ if (!edesc->src_nents && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index * sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+static int sg_count(struct scatterlist *sg_list, int nbytes)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- *chained = 0;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = 1;
+ BUG(); /* Not support chaining */
sg = scatterwalk_sg_next(sg);
}
+ if (likely(sg_nents == 1))
+ return 0;
+
return sg_nents;
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the aead extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int desc_bytes)
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
- struct ipsec_esp_edesc *edesc;
-
- assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
- BUG_ON(chained);
- if (likely(assoc_nents == 1))
- assoc_nents = 0;
-
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (src_nents == 1)
- src_nents = 0;
-
- if (unlikely(areq->dst != areq->src)) {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (dst_nents == 1)
- dst_nents = 0;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ bool all_contig = true;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
+
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
}
- link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
- sizeof(struct link_tbl_entry);
- debug("link_tbl_bytes %d\n", link_tbl_bytes);
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize !=
+ sg_dma_address(req->src)) {
+ all_contig = false;
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len = assoc_nents + 1 + src_nents;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
link_tbl_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
link_tbl_bytes, DMA_TO_DEVICE);
- edesc->link_tbl_bytes = link_tbl_bytes;
+ *all_contig_ptr = all_contig;
+
+ link_tbl_index = 0;
+ if (!all_contig) {
+ sg_to_link_tbl(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents ? : 1;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src,
+ (src_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents ? : 1;
+ }
+ if (dst_nents) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *areq)
+static int aead_encrypt(struct aead_request *req)
{
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
+ int ret = 0;
+
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- desc = edesc->hw_desc;
-
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
-
- iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+ /* Create and submit job descriptor */
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
+ struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct ipsec_esp_edesc *edesc;
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
-
- req->cryptlen -= ctx->authsize;
+ int ret = 0;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+ *greq, int desc_bytes,
+ u32 *contig_ptr)
+{
+ struct aead_request *req = &greq->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
- return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
+
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ link_tbl_len += 1;
+ }
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len += assoc_nents + 1 + src_nents;
+ if (likely(req->src == req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ *contig_ptr = contig;
+
+ link_tbl_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+ sg_to_link_tbl(req->assoc, assoc_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents;
+ }
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ return edesc;
}
-static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
{
- struct aead_request *areq = &req->areq;
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_request *req = &areq->areq;
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
- dma_addr_t iv_dma;
+ u32 contig;
u32 *desc;
+ int ret = 0;
- iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
-
- debug("%s: giv %p\n", __func__, req->giv);
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- /*
- * LOAD IMM Info FIFO
- * to DECO, Last, Padding, Random, Message, 16 bytes
- */
- append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
- NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
- NFIFOENTRY_PTYPE_RND | ivsize,
- LDST_SRCDST_WORD_INFO_FIFO);
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, link_tbl_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int link_tbl_index;
+
+ src_nents = sg_count(req->src, req->nbytes);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
/*
- * disable info fifo entries since the above serves as the entry
- * this way, the MOVE command won't generate an entry.
- * Note that this isn't required in more recent versions of
- * SEC as a MOVE that doesn't do info FIFO entries is available.
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
*/
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+ src_nents = src_nents ? : 1;
+ link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct link_tbl_entry);
- /* MOVE DECO Alignment -> C1 Context 16 bytes */
- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* re-enable info fifo entries */
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ link_tbl_index = 0;
+ if (!iv_contig) {
+ sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl + 1, 0);
+ link_tbl_index += 1 + src_nents;
+ }
+
+ if (unlikely(dst_nents)) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
+ link_tbl_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
- /* MOVE C1 Context -> OFIFO 16 bytes */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
- append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+#define template_aead template_u.aead
+#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- struct aead_alg aead;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
@@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(aes))",
.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des))",
.driver_name = "authenc-hmac-sha256-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des))",
.driver_name = "authenc-hmac-sha512-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ }
};
struct caam_crypto_alg {
@@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
{
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
- dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- kfree(ctx->sh_desc);
-
- if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
- dma_unmap_single(ctx->jrdev, ctx->key_phys,
- ctx->split_key_pad_len + ctx->enckeylen,
+ if (ctx->sh_desc_enc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+ if (ctx->sh_desc_dec_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+ if (ctx->sh_desc_givenc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
- kfree(ctx->key);
}
static void __exit caam_algapi_exit(void)
@@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
alg->cra_init = caam_cra_init;
alg->cra_exit = caam_cra_exit;
alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_u.aead = template->aead;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_aead;
+ break;
+ }
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 950450346f70..d38f2afaa966 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -31,5 +31,6 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5433c304ac57..73988bb7322a 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
- struct caam_perfmon *perfmon;
struct caam_deco **deco;
u32 deconum;
+#ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+#endif
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 46915800c26f..0991323cf3fd 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -9,7 +9,7 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
-#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -18,6 +18,9 @@
#define PRINT_POS
#endif
+#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+#define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+ APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+ APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+ APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+} while (0);
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)