diff options
author | Gary R Hook <gary.hook@amd.com> | 2018-03-07 18:31:14 +0100 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-03-16 16:35:52 +0100 |
commit | b698a9f4c5c52317db486b069190c7e3d2b97e7e (patch) | |
tree | 0395c4da367e027b2d2231c933c8096dc2a6de10 /drivers/crypto | |
parent | crypto: hash - Prevent use of req->result in ahash update (diff) | |
download | linux-b698a9f4c5c52317db486b069190c7e3d2b97e7e.tar.xz linux-b698a9f4c5c52317db486b069190c7e3d2b97e7e.zip |
crypto: ccp - Validate buffer lengths for copy operations
The CCP driver copies data between scatter/gather lists and DMA buffers.
The length of the requested copy operation must be checked against
the available destination buffer length.
Reported-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
Signed-off-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 108 |
1 files changed, 78 insertions, 30 deletions
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 406b95329b3d..0ea43cdeb05f 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, return 0; } -static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, - struct scatterlist *sg, unsigned int sg_offset, - unsigned int len) +static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, + struct scatterlist *sg, unsigned int sg_offset, + unsigned int len) { WARN_ON(!wa->address); + if (len > (wa->length - wa_offset)) + return -EINVAL; + scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 0); + return 0; } static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, @@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, unsigned int len) { u8 *p, *q; + int rc; - ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); + rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); + if (rc) + return rc; p = wa->address + wa_offset; q = p + len - 1; @@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, goto e_key; dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, goto e_src; } - ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, - aes->cmac_key_len); + ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, + aes->cmac_key_len); + if (ret) + goto e_src; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, goto e_key; dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, goto e_dst; } - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_dst; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, DMA_BIDIRECTIONAL); if (ret) goto e_tag; - ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); + if (ret) + goto e_tag; ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); ccp_dm_free(&tag); @@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + if (ret) + goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - convert to LE */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; - ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { @@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, * big endian to little endian. */ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; - ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); + if (ret) + goto e_key; } else { /* Version 5 CCPs use a 512-bit space for the key: each portion * occupies 256 bits, or one entire slot, and is zero-padded. @@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, dm_offset = CCP_SB_BYTES; pad = dm_offset - xts->key_len; - ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); - ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, - xts->key_len); + ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, + xts->key_len, xts->key_len); + if (ret) + goto e_key; } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); @@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, if (ret) goto e_key; - ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); + ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); + if (ret) + goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { @@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ len_singlekey = des3->key_len / 3; - ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, - des3->key, 0, len_singlekey); - ccp_set_dm_area(&key, dm_offset + len_singlekey, - des3->key, len_singlekey, len_singlekey); - ccp_set_dm_area(&key, dm_offset, - des3->key, 2 * len_singlekey, len_singlekey); + ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, + des3->key, 0, len_singlekey); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, + des3->key, len_singlekey, len_singlekey); + if (ret) + goto e_key; + ret = ccp_set_dm_area(&key, dm_offset, + des3->key, 2 * len_singlekey, len_singlekey); + if (ret) + goto e_key; /* Copy the key to the SB */ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, @@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Load the context into the LSB */ dm_offset = CCP_SB_BYTES - des3->iv_len; - ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); + ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, + des3->iv_len); + if (ret) + goto e_ctx; if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; @@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } else { /* Restore the context */ - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, - sb_count * CCP_SB_BYTES); + ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, + sb_count * CCP_SB_BYTES); + if (ret) + goto e_ctx; } ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, @@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, if (ret) return ret; - ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); + ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); + if (ret) + goto e_mask; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { |