diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2018-07-30 23:06:42 +0200 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-08-07 11:38:04 +0200 |
commit | 30f1a9f53e77e4c9ddf55ebfda8a9d7666e46964 (patch) | |
tree | 6696057d0c4cb95671a65cad2df8f9b573a0ed6d /arch | |
parent | crypto: arm64/aes-ce-gcm - implement 2-way aggregation (diff) | |
download | linux-30f1a9f53e77e4c9ddf55ebfda8a9d7666e46964.tar.xz linux-30f1a9f53e77e4c9ddf55ebfda8a9d7666e46964.zip |
crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable
Squeeze out another 5% of performance by minimizing the number
of invocations of kernel_neon_begin()/kernel_neon_end() on the
common path, which also allows some reloads of the key schedule
to be optimized away.
The resulting code runs at 2.3 cycles per byte on a Cortex-A53.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/crypto/ghash-ce-core.S | 9 | ||||
-rw-r--r-- | arch/arm64/crypto/ghash-ce-glue.c | 81 |
2 files changed, 49 insertions, 41 deletions
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S index f7281e7a592f..913e49932ae6 100644 --- a/arch/arm64/crypto/ghash-ce-core.S +++ b/arch/arm64/crypto/ghash-ce-core.S @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -332,8 +332,6 @@ ENDPROC(pmull_ghash_update_p8) ld1 {XL.2d}, [x1] ldr x8, [x5, #8] // load lower counter - load_round_keys w7, x6 - movi MASK.16b, #0xe1 trn1 SHASH2.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d @@ -346,6 +344,8 @@ CPU_LE( rev x8, x8 ) ld1 {KS0.16b-KS1.16b}, [x10] .endif + cbnz x6, 4f + 0: ld1 {INP0.16b-INP1.16b}, [x3], #32 rev x9, x8 @@ -471,6 +471,9 @@ CPU_LE( rev x8, x8 ) enc_round KS0, v20 enc_round KS1, v20 b 1b + +4: load_round_keys w7, x6 + b 0b .endm /* diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index cd91b146c87d..42a0e84e276c 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 PMULL instructions. * - * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -373,37 +373,39 @@ static int gcm_encrypt(struct aead_request *req) memcpy(iv, req->iv, GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE); - if (likely(may_use_simd())) { - kernel_neon_begin(); + err = skcipher_walk_aead_encrypt(&walk, req, false); + if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + u32 const *rk = NULL; + + kernel_neon_begin(); pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); pmull_gcm_encrypt_block(ks, iv, NULL, nrounds); put_unaligned_be32(3, iv + GCM_IV_SIZE); pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds); put_unaligned_be32(4, iv + GCM_IV_SIZE); - kernel_neon_end(); - - err = skcipher_walk_aead_encrypt(&walk, req, false); - while (walk.nbytes >= 2 * AES_BLOCK_SIZE) { + do { int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; - kernel_neon_begin(); + if (rk) + kernel_neon_begin(); + pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, ctx->h2, iv, - ctx->aes_key.key_enc, nrounds, ks); + rk, nrounds, ks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % (2 * AES_BLOCK_SIZE)); - } + + rk = ctx->aes_key.key_enc; + } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } else { __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, false); - while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; @@ -485,50 +487,53 @@ static int gcm_decrypt(struct aead_request *req) memcpy(iv, req->iv, GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE); - if (likely(may_use_simd())) { + err = skcipher_walk_aead_decrypt(&walk, req, false); + + if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { + u32 const *rk = NULL; + kernel_neon_begin(); pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - kernel_neon_end(); - err = skcipher_walk_aead_decrypt(&walk, req, false); - - while (walk.nbytes >= 2 * AES_BLOCK_SIZE) { + do { int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; + int rem = walk.total - blocks * AES_BLOCK_SIZE; + + if (rk) + kernel_neon_begin(); - kernel_neon_begin(); pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, walk.src.virt.addr, ctx->h2, iv, - ctx->aes_key.key_enc, nrounds); - kernel_neon_end(); + rk, nrounds); - err = skcipher_walk_done(&walk, - walk.nbytes % (2 * AES_BLOCK_SIZE)); - } + /* check if this is the final iteration of the loop */ + if (rem < (2 * AES_BLOCK_SIZE)) { + u8 *iv2 = iv + AES_BLOCK_SIZE; - if (walk.nbytes) { - u8 *iv2 = iv + AES_BLOCK_SIZE; + if (rem > AES_BLOCK_SIZE) { + memcpy(iv2, iv, AES_BLOCK_SIZE); + crypto_inc(iv2, AES_BLOCK_SIZE); + } - if (walk.nbytes > AES_BLOCK_SIZE) { - memcpy(iv2, iv, AES_BLOCK_SIZE); - crypto_inc(iv2, AES_BLOCK_SIZE); - } + pmull_gcm_encrypt_block(iv, iv, NULL, nrounds); - kernel_neon_begin(); - pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc, - nrounds); + if (rem > AES_BLOCK_SIZE) + pmull_gcm_encrypt_block(iv2, iv2, NULL, + nrounds); + } - if (walk.nbytes > AES_BLOCK_SIZE) - pmull_gcm_encrypt_block(iv2, iv2, NULL, - nrounds); kernel_neon_end(); - } + + err = skcipher_walk_done(&walk, + walk.nbytes % (2 * AES_BLOCK_SIZE)); + + rk = ctx->aes_key.key_enc; + } while (walk.nbytes >= 2 * AES_BLOCK_SIZE); } else { __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, false); - while (walk.nbytes >= AES_BLOCK_SIZE) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; |