diff options
author | Radu Alexe <radu.alexe@nxp.com> | 2017-04-25 15:26:38 +0200 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2017-05-18 07:19:51 +0200 |
commit | 52e26d77b8b3de2e9ed6c7126f68e04cad5fe852 (patch) | |
tree | 70ed663f25389bc078e9171f28264da56a88a5ed /drivers/crypto/caam/caampkc.c | |
parent | crypto: caam - incapsulate dropping leading zeros into function (diff) | |
download | linux-52e26d77b8b3de2e9ed6c7126f68e04cad5fe852.tar.xz linux-52e26d77b8b3de2e9ed6c7126f68e04cad5fe852.zip |
crypto: caam - add support for RSA key form 2
CAAM RSA private key may have either of three representations.
1. The first representation consists of the pair (n, d), where the
components have the following meanings:
n the RSA modulus
d the RSA private exponent
2. The second representation consists of the triplet (p, q, d), where
the
components have the following meanings:
p the first prime factor of the RSA modulus n
q the second prime factor of the RSA modulus n
d the RSA private exponent
3. The third representation consists of the quintuple (p, q, dP, dQ,
qInv),
where the components have the following meanings:
p the first prime factor of the RSA modulus n
q the second prime factor of the RSA modulus n
dP the first factors's CRT exponent
dQ the second factors's CRT exponent
qInv the (first) CRT coefficient
The benefit of using the third or the second key form is lower
computational cost for the decryption and signature operations.
This patch adds support for the second RSA private key
representation.
Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam/caampkc.c')
-rw-r--r-- | drivers/crypto/caam/caampkc.c | 231 |
1 files changed, 214 insertions, 17 deletions
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index d2c6977ba82e..912e41700522 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -18,6 +18,8 @@ #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ sizeof(struct rsa_priv_f1_pdb)) +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ + sizeof(struct rsa_priv_f2_pdb)) static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) @@ -54,6 +56,23 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); } +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, + struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; + size_t p_sz = key->p_sz; + size_t q_sz = key->p_sz; + + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); +} + /* RSA Job Completion handler */ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) { @@ -90,6 +109,24 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, akcipher_request_complete(req, err); } +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, + void *context) +{ + struct akcipher_request *req = context; + struct rsa_edesc *edesc; + + if (err) + caam_jr_strstatus(dev, err); + + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); + + rsa_priv_f2_unmap(dev, edesc, req); + rsa_io_unmap(dev, edesc, req); + kfree(edesc); + + akcipher_request_complete(req, err); +} + static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { @@ -258,6 +295,81 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, return 0; } +static int set_rsa_priv_f2_pdb(struct akcipher_request *req, + struct rsa_edesc *edesc) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + struct device *dev = ctx->dev; + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; + int sec4_sg_index = 0; + size_t p_sz = key->p_sz; + size_t q_sz = key->p_sz; + + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->d_dma)) { + dev_err(dev, "Unable to map RSA private exponent memory\n"); + return -ENOMEM; + } + + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->p_dma)) { + dev_err(dev, "Unable to map RSA prime factor p memory\n"); + goto unmap_d; + } + + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->q_dma)) { + dev_err(dev, "Unable to map RSA prime factor q memory\n"); + goto unmap_p; + } + + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->tmp1_dma)) { + dev_err(dev, "Unable to map RSA tmp1 memory\n"); + goto unmap_q; + } + + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + if (dma_mapping_error(dev, pdb->tmp2_dma)) { + dev_err(dev, "Unable to map RSA tmp2 memory\n"); + goto unmap_tmp1; + } + + if (edesc->src_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_G; + pdb->g_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + } else { + pdb->g_dma = sg_dma_address(req->src); + } + + if (edesc->dst_nents > 1) { + pdb->sgf |= RSA_PRIV_PDB_SGF_F; + pdb->f_dma = edesc->sec4_sg_dma + + sec4_sg_index * sizeof(struct sec4_sg_entry); + } else { + pdb->f_dma = sg_dma_address(req->dst); + } + + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; + + return 0; + +unmap_tmp1: + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); +unmap_q: + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); +unmap_p: + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); +unmap_d: + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + + return -ENOMEM; +} + static int caam_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); @@ -301,24 +413,14 @@ init_fail: return ret; } -static int caam_rsa_dec(struct akcipher_request *req) +static int caam_rsa_dec_priv_f1(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); - struct caam_rsa_key *key = &ctx->key; struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; - if (unlikely(!key->n || !key->d)) - return -EINVAL; - - if (req->dst_len < key->n_sz) { - req->dst_len = key->n_sz; - dev_err(jrdev, "Output buffer length less than parameter n\n"); - return -EOVERFLOW; - } - /* Allocate extended descriptor */ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); if (IS_ERR(edesc)) @@ -344,17 +446,73 @@ init_fail: return ret; } +static int caam_rsa_dec_priv_f2(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct device *jrdev = ctx->dev; + struct rsa_edesc *edesc; + int ret; + + /* Allocate extended descriptor */ + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ + ret = set_rsa_priv_f2_pdb(req, edesc); + if (ret) + goto init_fail; + + /* Initialize Job Descriptor */ + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); + + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); + if (!ret) + return -EINPROGRESS; + + rsa_priv_f2_unmap(jrdev, edesc, req); + +init_fail: + rsa_io_unmap(jrdev, edesc, req); + kfree(edesc); + return ret; +} + +static int caam_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); + struct caam_rsa_key *key = &ctx->key; + int ret; + + if (unlikely(!key->n || !key->d)) + return -EINVAL; + + if (req->dst_len < key->n_sz) { + req->dst_len = key->n_sz; + dev_err(ctx->dev, "Output buffer length less than parameter n\n"); + return -EOVERFLOW; + } + + if (key->priv_form == FORM2) + ret = caam_rsa_dec_priv_f2(req); + else + ret = caam_rsa_dec_priv_f1(req); + + return ret; +} + static void caam_rsa_free_key(struct caam_rsa_key *key) { kzfree(key->d); + kzfree(key->p); + kzfree(key->q); + kzfree(key->tmp1); + kzfree(key->tmp2); kfree(key->e); kfree(key->n); - key->d = NULL; - key->e = NULL; - key->n = NULL; - key->d_sz = 0; - key->e_sz = 0; - key->n_sz = 0; + memset(key, 0, sizeof(*key)); } static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) @@ -444,6 +602,43 @@ err: return -ENOMEM; } +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, + struct rsa_key *raw_key) +{ + struct caam_rsa_key *rsa_key = &ctx->key; + size_t p_sz = raw_key->p_sz; + size_t q_sz = raw_key->q_sz; + + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); + if (!rsa_key->p) + return; + rsa_key->p_sz = p_sz; + + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); + if (!rsa_key->q) + goto free_p; + rsa_key->q_sz = q_sz; + + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->tmp1) + goto free_q; + + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); + if (!rsa_key->tmp2) + goto free_tmp1; + + rsa_key->priv_form = FORM2; + + return; + +free_tmp1: + kzfree(rsa_key->tmp1); +free_q: + kzfree(rsa_key->q); +free_p: + kzfree(rsa_key->p); +} + static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { @@ -490,6 +685,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); + caam_rsa_set_priv_key_form(ctx, &raw_key); + return 0; err: |