diff options
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_algs.c')
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 153 |
1 files changed, 85 insertions, 68 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index f998ed58457c..148edbe379e3 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -17,7 +17,7 @@ #include <crypto/xts.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" -#include "adf_transport.h" +#include "qat_algs_send.h" #include "adf_common_drv.h" #include "qat_crypto.h" #include "icp_qat_hw.h" @@ -46,19 +46,6 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed __aligned(64); - /* Common content descriptor */ struct qat_alg_cd { union { @@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, bl->bufers[i].len, DMA_BIDIRECTIONAL); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bl); + + if (!qat_req->buf.sgl_src_valid) + kfree(bl); + if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; @@ -704,14 +694,17 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, DMA_BIDIRECTIONAL); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - kfree(blout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(blout); } } static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct scatterlist *sgl, struct scatterlist *sglout, - struct qat_crypto_request *qat_req) + struct qat_crypto_request *qat_req, + gfp_t flags) { struct device *dev = &GET_DEV(inst->accel_dev); int i, sg_nctr = 0; @@ -721,15 +714,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n + 1); + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(inst->accel_dev)); if (unlikely(!n)) return -EINVAL; - bufl = kzalloc_node(sz, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!bufl)) - return -ENOMEM; + qat_req->buf.sgl_src_valid = false; + qat_req->buf.sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, flags, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &qat_req->buf.sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_src_valid = true; + } for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; @@ -760,12 +762,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers; n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n + 1); + sz_out = struct_size(buflout, bufers, n); sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) - goto err_in; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, flags, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &qat_req->buf.sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_dst_valid = true; + } bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) @@ -810,7 +818,9 @@ err_out: dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - kfree(buflout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(buflout); err_in: if (!dma_mapping_error(dev, blp)) @@ -823,7 +833,8 @@ err_in: bufl->bufers[i].len, DMA_BIDIRECTIONAL); - kfree(bufl); + if (!qat_req->buf.sgl_src_valid) + kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; @@ -925,8 +936,25 @@ void qat_alg_callback(void *resp) struct icp_qat_fw_la_resp *qat_resp = resp; struct qat_crypto_request *qat_req = (void *)(__force long)qat_resp->opaque_data; + struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; qat_req->cb(qat_resp, qat_req); + + qat_alg_send_backlog(backlog); +} + +static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, + struct qat_crypto_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->sym_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); } static int qat_alg_aead_dec(struct aead_request *areq) @@ -939,14 +967,15 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); - int ret, ctr = 0; + gfp_t f = qat_algs_alloc_flags(&areq->base); + int ret; u32 cipher_len; cipher_len = areq->cryptlen - digst_size; if (cipher_len % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -965,15 +994,12 @@ static int qat_alg_aead_dec(struct aead_request *areq) auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_aead_enc(struct aead_request *areq) @@ -984,14 +1010,15 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct qat_crypto_request *qat_req = aead_request_ctx(areq); struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; + gfp_t f = qat_algs_alloc_flags(&areq->base); struct icp_qat_fw_la_bulk_req *msg; u8 *iv = areq->iv; - int ret, ctr = 0; + int ret; if (areq->cryptlen % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1013,15 +1040,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + areq->cryptlen; - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, @@ -1173,13 +1196,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; + gfp_t f = qat_algs_alloc_flags(&req->base); struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret; if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1198,15 +1222,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) qat_alg_set_req_iv(qat_req); - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) @@ -1242,13 +1262,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; + gfp_t f = qat_algs_alloc_flags(&req->base); struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret; if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1268,15 +1289,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_alg_set_req_iv(qat_req); qat_alg_update_iv(qat_req); - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) |