diff options
author | Tom Lendacky <thomas.lendacky@amd.com> | 2014-02-24 15:42:14 +0100 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2014-02-26 22:57:01 +0100 |
commit | c65a52f8360dc29116395708bde18399e4699f87 (patch) | |
tree | 82e139cf4cde4bcb172aa94243a015590ab645aa /drivers | |
parent | crypto: ccp - Invoke context callback when there is a backlog error (diff) | |
download | linux-c65a52f8360dc29116395708bde18399e4699f87.tar.xz linux-c65a52f8360dc29116395708bde18399e4699f87.zip |
crypto: ccp - Account for CCP backlog processing
When the crypto layer is able to queue up a command for processing
by the CCP on the initial call to ccp_crypto_enqueue_request and
the CCP returns -EBUSY, then if the backlog flag is not set the
command needs to be freed and not added to the active command list.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-main.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 7d98635c2c5e..20dc848481e7 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -205,6 +205,7 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) { struct ccp_crypto_cmd *active = NULL, *tmp; unsigned long flags; + bool free_cmd = true; int ret; spin_lock_irqsave(&req_queue_lock, flags); @@ -231,7 +232,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) if (!active) { ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) - goto e_lock; + goto e_lock; /* Error, don't queue it */ + if ((ret == -EBUSY) && + !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) + goto e_lock; /* Not backlogging, don't queue it */ } if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { @@ -244,9 +248,14 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) req_queue.cmd_count++; list_add_tail(&crypto_cmd->entry, &req_queue.cmds); + free_cmd = false; + e_lock: spin_unlock_irqrestore(&req_queue_lock, flags); + if (free_cmd) + kfree(crypto_cmd); + return ret; } @@ -262,7 +271,6 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, { struct ccp_crypto_cmd *crypto_cmd; gfp_t gfp; - int ret; gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -287,11 +295,7 @@ int ccp_crypto_enqueue_request(struct crypto_async_request *req, else cmd->flags &= ~CCP_CMD_MAY_BACKLOG; - ret = ccp_crypto_enqueue_cmd(crypto_cmd); - if (!ccp_crypto_success(ret)) - kfree(crypto_cmd); - - return ret; + return ccp_crypto_enqueue_cmd(crypto_cmd); } struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, |