summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/inside-secure/safexcel_cipher.c
diff options
context:
space:
mode:
authorOfer Heifetz <oferh@marvell.com>2018-06-28 17:21:57 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2018-07-08 18:30:19 +0200
commit9744fec95f0674fbf67b12c42c3784dc299dc904 (patch)
tree2dcff99ca9570b5b587b434e3b9b0f1d4221dea1 /drivers/crypto/inside-secure/safexcel_cipher.c
parentcrypto: inside-secure - ecb(des3_ede) and cbc(des3_ede) support (diff)
downloadlinux-9744fec95f0674fbf67b12c42c3784dc299dc904.tar.xz
linux-9744fec95f0674fbf67b12c42c3784dc299dc904.zip
crypto: inside-secure - remove request list to improve performance
This patch main goal is to improve driver performance by moving the crypto request from a list to a RDR ring shadow. This is possible since there is one producer and one consume for this RDR request shadow and one ring descriptor is left unused. Doing this change eliminates the use of spinlock when accessing the descriptor ring and the need to dynamicaly allocate memory per crypto request. The crypto request is placed in the first RDR shadow descriptor only if there are enough descriptors, when the result handler is invoked, it fetches the first result descriptor from RDR shadow. Signed-off-by: Ofer Heifetz <oferh@marvell.com> Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/inside-secure/safexcel_cipher.c')
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c39
1 files changed, 12 insertions, 27 deletions
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index e175d0bf69d8..9d292427a488 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -336,7 +336,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -353,7 +352,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) {
dma_unmap_sg(priv->dev, src,
@@ -374,7 +372,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
static int safexcel_send_req(struct crypto_async_request *base, int ring,
- struct safexcel_request *request,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
@@ -384,7 +381,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
+ struct safexcel_result_desc *rdesc, *first_rdesc;
struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -424,8 +421,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz);
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* command descriptors */
for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
@@ -472,12 +467,12 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
+ if (first)
+ first_rdesc = rdesc;
n_rdesc++;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- request->req = base;
+ safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
@@ -490,8 +485,6 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
if (src == dst) {
dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen),
@@ -519,7 +512,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -536,7 +528,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -612,15 +603,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
- request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -631,7 +620,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
@@ -643,18 +631,16 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_send_req(async, ring, request, sreq, req->src,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+ int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -666,11 +652,10 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_send_req(async, ring, request, sreq, req->src,
- req->dst, req->cryptlen, req->assoclen,
+ ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+ req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;