diff options
author | Masahiro Yamada <masahiroy@kernel.org> | 2020-03-26 09:00:55 +0100 |
---|---|---|
committer | Masahiro Yamada <masahiroy@kernel.org> | 2020-04-08 17:01:59 +0200 |
commit | 42251572c4687813d8e7b1d363a23d0f9201e69f (patch) | |
tree | b6321c02de1b4950ebb8dc4015b9cc0ca72e4b29 /arch/x86/crypto/aesni-intel_glue.c | |
parent | x86: remove always-defined CONFIG_AS_SSSE3 (diff) | |
download | linux-42251572c4687813d8e7b1d363a23d0f9201e69f.tar.xz linux-42251572c4687813d8e7b1d363a23d0f9201e69f.zip |
x86: remove always-defined CONFIG_AS_AVX
CONFIG_AS_AVX was introduced by commit ea4d26ae24e5 ("raid5: add AVX
optimized RAID5 checksumming").
We raise the minimal supported binutils version from time to time.
The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum
required binutils version to 2.21").
I confirmed the code in $(call as-instr,...) can be assembled by the
binutils 2.21 assembler and also by LLVM integrated assembler.
Remove CONFIG_AS_AVX, which is always defined.
Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/crypto/aesni-intel_glue.c')
-rw-r--r-- | arch/x86/crypto/aesni-intel_glue.c | 14 |
1 files changed, 1 insertions, 13 deletions
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 75b6ea20491e..655ad6bc8810 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -185,7 +185,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .finalize = &aesni_gcm_finalize, }; -#ifdef CONFIG_AS_AVX asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, @@ -234,8 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .finalize = &aesni_gcm_finalize_avx_gen2, }; -#endif - #ifdef CONFIG_AS_AVX2 /* * asmlinkage void aesni_gcm_init_avx_gen4() @@ -476,7 +473,6 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, crypto_inc(ctrblk, AES_BLOCK_SIZE); } -#ifdef CONFIG_AS_AVX static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -493,7 +489,6 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, else aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); } -#endif static int ctr_crypt(struct skcipher_request *req) { @@ -715,10 +710,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) gcm_tfm = &aesni_gcm_tfm_avx_gen2; #endif -#ifdef CONFIG_AS_AVX if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) gcm_tfm = &aesni_gcm_tfm_sse; -#endif /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && @@ -1082,25 +1075,20 @@ static int __init aesni_init(void) aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; } else #endif -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; - } else -#endif - { + } else { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_sse; } aesni_ctr_enc_tfm = aesni_ctr_enc; -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } #endif -#endif err = crypto_register_alg(&aesni_cipher_alg); if (err) |