diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 22:31:29 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 22:31:29 +0100 |
commit | 0f1d6dfe03ca4e36132221b918499c6f0b0f048d (patch) | |
tree | 0de8e9330610190a23e173ca7d7f3fb74a517aa2 /arch/arm64/crypto/sha256-glue.c | |
parent | vfs,mm: fix return value of read() at s_maxbytes (diff) | |
parent | crypto: testmgr - fix overlap in chunked tests again (diff) | |
download | linux-0f1d6dfe03ca4e36132221b918499c6f0b0f048d.tar.xz linux-0f1d6dfe03ca4e36132221b918499c6f0b0f048d.zip |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.10:
API:
- add skcipher walk interface
- add asynchronous compression (acomp) interface
- fix algif_aed AIO handling of zero buffer
Algorithms:
- fix unaligned access in poly1305
- fix DRBG output to large buffers
Drivers:
- add support for iMX6UL to caam
- fix givenc descriptors (used by IPsec) in caam
- accelerated SHA256/SHA512 for ARM64 from OpenSSL
- add SSE CRCT10DIF and CRC32 to ARM/ARM64
- add AEAD support to Chelsio chcr
- add Armada 8K support to omap-rng"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits)
crypto: testmgr - fix overlap in chunked tests again
crypto: arm/crc32 - accelerated support based on x86 SSE implementation
crypto: arm64/crc32 - accelerated support based on x86 SSE implementation
crypto: arm/crct10dif - port x86 SSE implementation to ARM
crypto: arm64/crct10dif - port x86 SSE implementation to arm64
crypto: testmgr - add/enhance test cases for CRC-T10DIF
crypto: testmgr - avoid overlap in chunked tests
crypto: chcr - checking for IS_ERR() instead of NULL
crypto: caam - check caam_emi_slow instead of re-lookup platform
crypto: algif_aead - fix AIO handling of zero buffer
crypto: aes-ce - Make aes_simd_algs static
crypto: algif_skcipher - set error code when kcalloc fails
crypto: caam - make aamalg_desc a proper module
crypto: caam - pass key buffers with typesafe pointers
crypto: arm64/aes-ce-ccm - Fix AEAD decryption length
MAINTAINERS: add crypto headers to crypto entry
crypt: doc - remove misleading mention of async API
crypto: doc - fix header file name
crypto: api - fix comment typo
crypto: skcipher - Add separate walker for AEAD decryption
..
Diffstat (limited to 'arch/arm64/crypto/sha256-glue.c')
-rw-r--r-- | arch/arm64/crypto/sha256-glue.c | 185 |
1 files changed, 185 insertions, 0 deletions
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c new file mode 100644 index 000000000000..a2226f841960 --- /dev/null +++ b/arch/arm64/crypto/sha256-glue.c @@ -0,0 +1,185 @@ +/* + * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64 + * + * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/internal/hash.h> +#include <crypto/sha.h> +#include <crypto/sha256_base.h> +#include <linux/cryptohash.h> +#include <linux/types.h> +#include <linux/string.h> + +MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64"); +MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); + +asmlinkage void sha256_block_data_order(u32 *digest, const void *data, + unsigned int num_blks); + +asmlinkage void sha256_block_neon(u32 *digest, const void *data, + unsigned int num_blks); + +static int sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); +} + +static int sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + if (len) + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha256_block_data_order); + + return sha256_base_finish(desc, out); +} + +static int sha256_final(struct shash_desc *desc, u8 *out) +{ + return sha256_finup(desc, NULL, 0, out); +} + +static struct shash_alg algs[] = { { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_base_init, + .update = sha256_update, + .final = sha256_final, + .finup = sha256_finup, + .descsize = sizeof(struct sha256_state), + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-arm64", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_base_init, + .update = sha256_update, + .final = sha256_final, + .finup = sha256_finup, + .descsize = sizeof(struct sha256_state), + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-arm64", + .base.cra_priority = 100, + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +} }; + +static int sha256_update_neon(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + /* + * Stacking and unstacking a substantial slice of the NEON register + * file may significantly affect performance for small updates when + * executing in interrupt context, so fall back to the scalar code + * in that case. + */ + if (!may_use_simd()) + return sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); + + kernel_neon_begin(); + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_neon); + kernel_neon_end(); + + return 0; +} + +static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + if (!may_use_simd()) { + if (len) + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha256_block_data_order); + } else { + kernel_neon_begin(); + if (len) + sha256_base_do_update(desc, data, len, + (sha256_block_fn *)sha256_block_neon); + sha256_base_do_finalize(desc, + (sha256_block_fn *)sha256_block_neon); + kernel_neon_end(); + } + return sha256_base_finish(desc, out); +} + +static int sha256_final_neon(struct shash_desc *desc, u8 *out) +{ + return sha256_finup_neon(desc, NULL, 0, out); +} + +static struct shash_alg neon_algs[] = { { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_base_init, + .update = sha256_update_neon, + .final = sha256_final_neon, + .finup = sha256_finup_neon, + .descsize = sizeof(struct sha256_state), + .base.cra_name = "sha256", + .base.cra_driver_name = "sha256-arm64-neon", + .base.cra_priority = 150, + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA256_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_base_init, + .update = sha256_update_neon, + .final = sha256_final_neon, + .finup = sha256_finup_neon, + .descsize = sizeof(struct sha256_state), + .base.cra_name = "sha224", + .base.cra_driver_name = "sha224-arm64-neon", + .base.cra_priority = 150, + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, + .base.cra_blocksize = SHA224_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +} }; + +static int __init sha256_mod_init(void) +{ + int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); + if (ret) + return ret; + + if (elf_hwcap & HWCAP_ASIMD) { + ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs)); + if (ret) + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + } + return ret; +} + +static void __exit sha256_mod_fini(void) +{ + if (elf_hwcap & HWCAP_ASIMD) + crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs)); + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); +} + +module_init(sha256_mod_init); +module_exit(sha256_mod_fini); |