summaryrefslogtreecommitdiffstats
path: root/crypto/tcrypt.c
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2018-07-23 16:18:48 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2018-08-03 12:06:02 +0200
commit2af632996b898687d382f2b52e820141aba72d63 (patch)
tree609a05e4f08b5287738dbfedc7f496cfab2a12d9 /crypto/tcrypt.c
parentcrypto: virtio - Replace GFP_ATOMIC with GFP_KERNEL in __virtio_crypto_ablkci... (diff)
downloadlinux-2af632996b898687d382f2b52e820141aba72d63.tar.xz
linux-2af632996b898687d382f2b52e820141aba72d63.zip
crypto: tcrypt - reschedule during speed tests
Avoid RCU stalls in the case of non-preemptible kernel and lengthy speed tests by rescheduling when advancing from one block size to another. Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/tcrypt.c')
-rw-r--r--crypto/tcrypt.c36
1 files changed, 24 insertions, 12 deletions
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 078ec36007bf..bdde95e8d369 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
- if (secs)
+ if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
*b_size + (enc ? 0 : authsize),
iv);
- if (secs)
+ if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
- else
+ cond_resched();
+ } else {
ret = test_aead_cycles(req, enc, *b_size);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
if (ret) {
@@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
ahash_request_set_crypt(req, sg, output, speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
- else
+ cond_resched();
+ } else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
+ }
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
iv);
}
- if (secs)
+ if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
- if (secs)
+ if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
- else
+ cond_resched();
+ } else {
ret = test_acipher_cycles(req, enc,
*b_size);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,