diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 26 | ||||
-rw-r--r-- | crypto/Makefile | 9 | ||||
-rw-r--r-- | crypto/ablk_helper.c | 150 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 21 | ||||
-rw-r--r-- | crypto/ansi_cprng.c | 4 | ||||
-rw-r--r-- | crypto/asymmetric_keys/Kconfig | 4 | ||||
-rw-r--r-- | crypto/asymmetric_keys/asymmetric_type.c | 1 | ||||
-rw-r--r-- | crypto/asymmetric_keys/public_key.c | 66 | ||||
-rw-r--r-- | crypto/asymmetric_keys/public_key.h | 6 | ||||
-rw-r--r-- | crypto/asymmetric_keys/rsa.c | 19 | ||||
-rw-r--r-- | crypto/asymmetric_keys/x509_cert_parser.c | 35 | ||||
-rw-r--r-- | crypto/asymmetric_keys/x509_parser.h | 18 | ||||
-rw-r--r-- | crypto/asymmetric_keys/x509_public_key.c | 157 | ||||
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 37 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 174 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 61 | ||||
-rw-r--r-- | crypto/async_tx/async_tx.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 123 | ||||
-rw-r--r-- | crypto/async_tx/raid6test.c | 10 | ||||
-rw-r--r-- | crypto/authenc.c | 54 | ||||
-rw-r--r-- | crypto/authencesn.c | 34 | ||||
-rw-r--r-- | crypto/ccm.c | 4 | ||||
-rw-r--r-- | crypto/gcm.c | 2 | ||||
-rw-r--r-- | crypto/hash_info.c | 56 | ||||
-rw-r--r-- | crypto/memneq.c | 138 |
25 files changed, 815 insertions, 398 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 71f337aefa39..7bcb70d216e1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -174,9 +174,8 @@ config CRYPTO_TEST help Quick & dirty crypto test module. -config CRYPTO_ABLK_HELPER_X86 +config CRYPTO_ABLK_HELPER tristate - depends on X86 select CRYPTO_CRYPTD config CRYPTO_GLUE_HELPER_X86 @@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL select CRYPTO_AES_X86_64 if 64BIT select CRYPTO_AES_586 if !64BIT select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_ALGAPI select CRYPTO_GLUE_HELPER_X86 if 64BIT select CRYPTO_LRW @@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 depends on CRYPTO select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAMELLIA_X86_64 select CRYPTO_LRW @@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 depends on CRYPTO select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAMELLIA_X86_64 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 @@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_CAST_COMMON select CRYPTO_CAST5 help @@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_CAST_COMMON select CRYPTO_CAST6 @@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586 depends on X86 && !64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_LRW @@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_SERPENT select CRYPTO_SERPENT_AVX_X86_64 @@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 depends on X86 && 64BIT select CRYPTO_ALGAPI select CRYPTO_CRYPTD - select CRYPTO_ABLK_HELPER_X86 + select CRYPTO_ABLK_HELPER select CRYPTO_GLUE_HELPER_X86 select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_X86_64 @@ -1402,6 +1401,9 @@ config CRYPTO_USER_API_SKCIPHER This option enables the user-spaces interface for symmetric key cipher algorithms. +config CRYPTO_HASH_INFO + bool + source "drivers/crypto/Kconfig" source crypto/asymmetric_keys/Kconfig diff --git a/crypto/Makefile b/crypto/Makefile index 80019ba8da3a..989c510da8cc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -2,8 +2,13 @@ # Cryptographic API # +# memneq MUST be built with -Os or -O0 to prevent early-return optimizations +# that will defeat memneq's actual purpose to prevent timing attacks. +CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 +CFLAGS_memneq.o := -Os + obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o compress.o memneq.o obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o @@ -104,3 +109,5 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o obj-$(CONFIG_XOR_BLOCKS) += xor.o obj-$(CONFIG_ASYNC_CORE) += async_tx/ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ +obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o +obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c new file mode 100644 index 000000000000..ffe7278d4bd8 --- /dev/null +++ b/crypto/ablk_helper.c @@ -0,0 +1,150 @@ +/* + * Shared async block cipher helpers + * + * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> + * + * Based on aesni-intel_glue.c by: + * Copyright (C) 2008, Intel Corp. + * Author: Huang Ying <ying.huang@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include <linux/kernel.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <crypto/algapi.h> +#include <crypto/cryptd.h> +#include <crypto/ablk_helper.h> +#include <asm/simd.h> + +int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; + int err; + + crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) + & CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(child, key, key_len); + crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) + & CRYPTO_TFM_RES_MASK); + return err; +} +EXPORT_SYMBOL_GPL(ablk_set_key); + +int __ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct blkcipher_desc desc; + + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + + return crypto_blkcipher_crt(desc.tfm)->encrypt( + &desc, req->dst, req->src, req->nbytes); +} +EXPORT_SYMBOL_GPL(__ablk_encrypt); + +int ablk_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (!may_use_simd()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + + *cryptd_req = *req; + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + + return crypto_ablkcipher_encrypt(cryptd_req); + } else { + return __ablk_encrypt(req); + } +} +EXPORT_SYMBOL_GPL(ablk_encrypt); + +int ablk_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); + + if (!may_use_simd()) { + struct ablkcipher_request *cryptd_req = + ablkcipher_request_ctx(req); + + *cryptd_req = *req; + ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); + + return crypto_ablkcipher_decrypt(cryptd_req); + } else { + struct blkcipher_desc desc; + + desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); + desc.info = req->info; + desc.flags = 0; + + return crypto_blkcipher_crt(desc.tfm)->decrypt( + &desc, req->dst, req->src, req->nbytes); + } +} +EXPORT_SYMBOL_GPL(ablk_decrypt); + +void ablk_exit(struct crypto_tfm *tfm) +{ + struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); + + cryptd_free_ablkcipher(ctx->cryptd_tfm); +} +EXPORT_SYMBOL_GPL(ablk_exit); + +int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) +{ + struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_ablkcipher *cryptd_tfm; + + cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + ctx->cryptd_tfm = cryptd_tfm; + tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(&cryptd_tfm->base); + + return 0; +} +EXPORT_SYMBOL_GPL(ablk_init_common); + +int ablk_init(struct crypto_tfm *tfm) +{ + char drv_name[CRYPTO_MAX_ALG_NAME]; + + snprintf(drv_name, sizeof(drv_name), "__driver-%s", + crypto_tfm_alg_driver_name(tfm)); + + return ablk_init_common(tfm, drv_name); +} +EXPORT_SYMBOL_GPL(ablk_init); + +MODULE_LICENSE("GPL"); diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 7d4a8d28277e..40886c489903 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -16,9 +16,7 @@ #include <crypto/internal/skcipher.h> #include <linux/cpumask.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/sched.h> #include <linux/slab.h> @@ -30,8 +28,6 @@ #include "internal.h" -static const char *skcipher_default_geniv __read_mostly; - struct ablkcipher_buffer { struct list_head entry; struct scatter_walk dst; @@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg) alg->cra_blocksize) return "chainiv"; - return alg->cra_flags & CRYPTO_ALG_ASYNC ? - "eseqiv" : skcipher_default_geniv; + return "eseqiv"; } static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) @@ -709,17 +704,3 @@ err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); - -static int __init skcipher_module_init(void) -{ - skcipher_default_geniv = num_possible_cpus() > 1 ? - "eseqiv" : "chainiv"; - return 0; -} - -static void skcipher_module_exit(void) -{ -} - -module_init(skcipher_module_init); -module_exit(skcipher_module_exit); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c0bb3778f1ae..666f1962a160 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -230,11 +230,11 @@ remainder: */ if (byte_count < DEFAULT_BLK_SZ) { empty_rbuf: - for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; - ctx->rand_data_valid++) { + while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { *ptr = ctx->rand_data[ctx->rand_data_valid]; ptr++; byte_count--; + ctx->rand_data_valid++; if (byte_count == 0) goto done; } diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index 6d2c2ea12559..03a6eb95ab50 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -12,6 +12,8 @@ if ASYMMETRIC_KEY_TYPE config ASYMMETRIC_PUBLIC_KEY_SUBTYPE tristate "Asymmetric public-key crypto algorithm subtype" select MPILIB + select PUBLIC_KEY_ALGO_RSA + select CRYPTO_HASH_INFO help This option provides support for asymmetric public key type handling. If signature generation and/or verification are to be used, @@ -20,8 +22,8 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE config PUBLIC_KEY_ALGO_RSA tristate "RSA public-key algorithm" - depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE select MPILIB_EXTRA + select MPILIB help This option enables support for the RSA algorithm (PKCS#1, RFC3447). diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index cf807654d221..b77eb5304788 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -209,6 +209,7 @@ struct key_type key_type_asymmetric = { .match = asymmetric_key_match, .destroy = asymmetric_key_destroy, .describe = asymmetric_key_describe, + .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE, }; EXPORT_SYMBOL_GPL(key_type_asymmetric); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index cb2e29180a87..97eb001960b9 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -22,29 +22,25 @@ MODULE_LICENSE("GPL"); -const char *const pkey_algo[PKEY_ALGO__LAST] = { +const char *const pkey_algo_name[PKEY_ALGO__LAST] = { [PKEY_ALGO_DSA] = "DSA", [PKEY_ALGO_RSA] = "RSA", }; -EXPORT_SYMBOL_GPL(pkey_algo); +EXPORT_SYMBOL_GPL(pkey_algo_name); -const char *const pkey_hash_algo[PKEY_HASH__LAST] = { - [PKEY_HASH_MD4] = "md4", - [PKEY_HASH_MD5] = "md5", - [PKEY_HASH_SHA1] = "sha1", - [PKEY_HASH_RIPE_MD_160] = "rmd160", - [PKEY_HASH_SHA256] = "sha256", - [PKEY_HASH_SHA384] = "sha384", - [PKEY_HASH_SHA512] = "sha512", - [PKEY_HASH_SHA224] = "sha224", +const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST] = { +#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ + defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) + [PKEY_ALGO_RSA] = &RSA_public_key_algorithm, +#endif }; -EXPORT_SYMBOL_GPL(pkey_hash_algo); +EXPORT_SYMBOL_GPL(pkey_algo); -const char *const pkey_id_type[PKEY_ID_TYPE__LAST] = { +const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST] = { [PKEY_ID_PGP] = "PGP", [PKEY_ID_X509] = "X509", }; -EXPORT_SYMBOL_GPL(pkey_id_type); +EXPORT_SYMBOL_GPL(pkey_id_type_name); /* * Provide a part of a description of the key for /proc/keys. @@ -56,7 +52,7 @@ static void public_key_describe(const struct key *asymmetric_key, if (key) seq_printf(m, "%s.%s", - pkey_id_type[key->id_type], key->algo->name); + pkey_id_type_name[key->id_type], key->algo->name); } /* @@ -78,21 +74,45 @@ EXPORT_SYMBOL_GPL(public_key_destroy); /* * Verify a signature using a public key. */ -static int public_key_verify_signature(const struct key *key, - const struct public_key_signature *sig) +int public_key_verify_signature(const struct public_key *pk, + const struct public_key_signature *sig) { - const struct public_key *pk = key->payload.data; + const struct public_key_algorithm *algo; + + BUG_ON(!pk); + BUG_ON(!pk->mpi[0]); + BUG_ON(!pk->mpi[1]); + BUG_ON(!sig); + BUG_ON(!sig->digest); + BUG_ON(!sig->mpi[0]); + + algo = pk->algo; + if (!algo) { + if (pk->pkey_algo >= PKEY_ALGO__LAST) + return -ENOPKG; + algo = pkey_algo[pk->pkey_algo]; + if (!algo) + return -ENOPKG; + } - if (!pk->algo->verify_signature) + if (!algo->verify_signature) return -ENOTSUPP; - if (sig->nr_mpi != pk->algo->n_sig_mpi) { + if (sig->nr_mpi != algo->n_sig_mpi) { pr_debug("Signature has %u MPI not %u\n", - sig->nr_mpi, pk->algo->n_sig_mpi); + sig->nr_mpi, algo->n_sig_mpi); return -EINVAL; } - return pk->algo->verify_signature(pk, sig); + return algo->verify_signature(pk, sig); +} +EXPORT_SYMBOL_GPL(public_key_verify_signature); + +static int public_key_verify_signature_2(const struct key *key, + const struct public_key_signature *sig) +{ + const struct public_key *pk = key->payload.data; + return public_key_verify_signature(pk, sig); } /* @@ -103,6 +123,6 @@ struct asymmetric_key_subtype public_key_subtype = { .name = "public_key", .describe = public_key_describe, .destroy = public_key_destroy, - .verify_signature = public_key_verify_signature, + .verify_signature = public_key_verify_signature_2, }; EXPORT_SYMBOL_GPL(public_key_subtype); diff --git a/crypto/asymmetric_keys/public_key.h b/crypto/asymmetric_keys/public_key.h index 5e5e35626899..5c37a22a0637 100644 --- a/crypto/asymmetric_keys/public_key.h +++ b/crypto/asymmetric_keys/public_key.h @@ -28,3 +28,9 @@ struct public_key_algorithm { }; extern const struct public_key_algorithm RSA_public_key_algorithm; + +/* + * public_key.c + */ +extern int public_key_verify_signature(const struct public_key *pk, + const struct public_key_signature *sig); diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 4a6a0696f8a3..459cf97a75e2 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <crypto/algapi.h> #include "public_key.h" MODULE_LICENSE("GPL"); @@ -73,13 +74,13 @@ static const struct { size_t size; } RSA_ASN1_templates[PKEY_HASH__LAST] = { #define _(X) { RSA_digest_info_##X, sizeof(RSA_digest_info_##X) } - [PKEY_HASH_MD5] = _(MD5), - [PKEY_HASH_SHA1] = _(SHA1), - [PKEY_HASH_RIPE_MD_160] = _(RIPE_MD_160), - [PKEY_HASH_SHA256] = _(SHA256), - [PKEY_HASH_SHA384] = _(SHA384), - [PKEY_HASH_SHA512] = _(SHA512), - [PKEY_HASH_SHA224] = _(SHA224), + [HASH_ALGO_MD5] = _(MD5), + [HASH_ALGO_SHA1] = _(SHA1), + [HASH_ALGO_RIPE_MD_160] = _(RIPE_MD_160), + [HASH_ALGO_SHA256] = _(SHA256), + [HASH_ALGO_SHA384] = _(SHA384), + [HASH_ALGO_SHA512] = _(SHA512), + [HASH_ALGO_SHA224] = _(SHA224), #undef _ }; @@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, } } - if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { + if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); return -EBADMSG; } - if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { + if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); return -EKEYREJECTED; } diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index facbf26bc6bb..29893162497c 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -47,6 +47,8 @@ void x509_free_certificate(struct x509_certificate *cert) kfree(cert->subject); kfree(cert->fingerprint); kfree(cert->authority); + kfree(cert->sig.digest); + mpi_free(cert->sig.rsa.s); kfree(cert); } } @@ -152,33 +154,33 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, return -ENOPKG; /* Unsupported combination */ case OID_md4WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_MD5; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha1WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA1; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha256WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA256; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha384WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA384; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha512WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA512; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; case OID_sha224WithRSAEncryption: - ctx->cert->sig_hash_algo = PKEY_HASH_SHA224; - ctx->cert->sig_pkey_algo = PKEY_ALGO_RSA; + ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224; + ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; break; } @@ -203,8 +205,8 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } - ctx->cert->sig = value; - ctx->cert->sig_size = vlen; + ctx->cert->raw_sig = value; + ctx->cert->raw_sig_size = vlen; return 0; } @@ -343,8 +345,9 @@ int x509_extract_key_data(void *context, size_t hdrlen, if (ctx->last_oid != OID_rsaEncryption) return -ENOPKG; - /* There seems to be an extraneous 0 byte on the front of the data */ - ctx->cert->pkey_algo = PKEY_ALGO_RSA; + ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA; + + /* Discard the BIT STRING metadata */ ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index f86dc5fcc4ad..87d9cc26f630 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -9,6 +9,7 @@ * 2 of the Licence, or (at your option) any later version. */ +#include <linux/time.h> #include <crypto/public_key.h> struct x509_certificate { @@ -20,13 +21,11 @@ struct x509_certificate { char *authority; /* Authority key fingerprint as hex */ struct tm valid_from; struct tm valid_to; - enum pkey_algo pkey_algo : 8; /* Public key algorithm */ - enum pkey_algo sig_pkey_algo : 8; /* Signature public key algorithm */ - enum pkey_hash_algo sig_hash_algo : 8; /* Signature hash algorithm */ const void *tbs; /* Signed data */ - size_t tbs_size; /* Size of signed data */ - const void *sig; /* Signature data */ - size_t sig_size; /* Size of sigature */ + unsigned tbs_size; /* Size of signed data */ + unsigned raw_sig_size; /* Size of sigature */ + const void *raw_sig; /* Signature data */ + struct public_key_signature sig; /* Signature parameters */ }; /* @@ -34,3 +33,10 @@ struct x509_certificate { */ extern void x509_free_certificate(struct x509_certificate *cert); extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); + +/* + * x509_public_key.c + */ +extern int x509_get_sig_params(struct x509_certificate *cert); +extern int x509_check_signature(const struct public_key *pub, + struct x509_certificate *cert); diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 06007f0e880c..382ef0d2ff2e 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -23,82 +23,84 @@ #include "public_key.h" #include "x509_parser.h" -static const -struct public_key_algorithm *x509_public_key_algorithms[PKEY_ALGO__LAST] = { - [PKEY_ALGO_DSA] = NULL, -#if defined(CONFIG_PUBLIC_KEY_ALGO_RSA) || \ - defined(CONFIG_PUBLIC_KEY_ALGO_RSA_MODULE) - [PKEY_ALGO_RSA] = &RSA_public_key_algorithm, -#endif -}; - /* - * Check the signature on a certificate using the provided public key + * Set up the signature parameters in an X.509 certificate. This involves + * digesting the signed data and extracting the signature. */ -static int x509_check_signature(const struct public_key *pub, - const struct x509_certificate *cert) +int x509_get_sig_params(struct x509_certificate *cert) { - struct public_key_signature *sig; struct crypto_shash *tfm; struct shash_desc *desc; size_t digest_size, desc_size; + void *digest; int ret; pr_devel("==>%s()\n", __func__); - + + if (cert->sig.rsa.s) + return 0; + + cert->sig.rsa.s = mpi_read_raw_data(cert->raw_sig, cert->raw_sig_size); + if (!cert->sig.rsa.s) + return -ENOMEM; + cert->sig.nr_mpi = 1; + /* Allocate the hashing algorithm we're going to need and find out how * big the hash operational data will be. */ - tfm = crypto_alloc_shash(pkey_hash_algo[cert->sig_hash_algo], 0, 0); + tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0); if (IS_ERR(tfm)) return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); digest_size = crypto_shash_digestsize(tfm); - /* We allocate the hash operational data storage on the end of our - * context data. + /* We allocate the hash operational data storage on the end of the + * digest storage space. */ ret = -ENOMEM; - sig = kzalloc(sizeof(*sig) + desc_size + digest_size, GFP_KERNEL); - if (!sig) - goto error_no_sig; + digest = kzalloc(digest_size + desc_size, GFP_KERNEL); + if (!digest) + goto error; - sig->pkey_hash_algo = cert->sig_hash_algo; - sig->digest = (u8 *)sig + sizeof(*sig) + desc_size; - sig->digest_size = digest_size; + cert->sig.digest = digest; + cert->sig.digest_size = digest_size; - desc = (void *)sig + sizeof(*sig); - desc->tfm = tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc = digest + digest_size; + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; ret = crypto_shash_init(desc); if (ret < 0) goto error; + might_sleep(); + ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest); +error: + crypto_free_shash(tfm); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} +EXPORT_SYMBOL_GPL(x509_get_sig_params); - ret = -ENOMEM; - sig->rsa.s = mpi_read_raw_data(cert->sig, cert->sig_size); - if (!sig->rsa.s) - goto error; +/* + * Check the signature on a certificate using the provided public key + */ +int x509_check_signature(const struct public_key *pub, + struct x509_certificate *cert) +{ + int ret; - ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest); - if (ret < 0) - goto error_mpi; + pr_devel("==>%s()\n", __func__); - ret = pub->algo->verify_signature(pub, sig); + ret = x509_get_sig_params(cert); + if (ret < 0) + return ret; + ret = public_key_verify_signature(pub, &cert->sig); pr_debug("Cert Verification: %d\n", ret); - -error_mpi: - mpi_free(sig->rsa.s); -error: - kfree(sig); -error_no_sig: - crypto_free_shash(tfm); - - pr_devel("<==%s() = %d\n", __func__, ret); return ret; } +EXPORT_SYMBOL_GPL(x509_check_signature); /* * Attempt to parse a data blob for a key as an X509 certificate. @@ -106,7 +108,6 @@ error_no_sig: static int x509_key_preparse(struct key_preparsed_payload *prep) { struct x509_certificate *cert; - struct tm now; size_t srlen, sulen; char *desc = NULL; int ret; @@ -117,7 +118,18 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) pr_devel("Cert Issuer: %s\n", cert->issuer); pr_devel("Cert Subject: %s\n", cert->subject); - pr_devel("Cert Key Algo: %s\n", pkey_algo[cert->pkey_algo]); + + if (cert->pub->pkey_algo >= PKEY_ALGO__LAST || + cert->sig.pkey_algo >= PKEY_ALGO__LAST || + cert->sig.pkey_hash_algo >= PKEY_HASH__LAST || + !pkey_algo[cert->pub->pkey_algo] || + !pkey_algo[cert->sig.pkey_algo] || + !hash_algo_name[cert->sig.pkey_hash_algo]) { + ret = -ENOPKG; + goto error_free_cert; + } + + pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]); pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n", cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1, cert->valid_from.tm_mday, cert->valid_from.tm_hour, @@ -127,58 +139,22 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->valid_to.tm_mday, cert->valid_to.tm_hour, cert->valid_to.tm_min, cert->valid_to.tm_sec); pr_devel("Cert Signature: %s + %s\n", - pkey_algo[cert->sig_pkey_algo], - pkey_hash_algo[cert->sig_hash_algo]); + pkey_algo_name[cert->sig.pkey_algo], + hash_algo_name[cert->sig.pkey_hash_algo]); - if (!cert->fingerprint || !cert->authority) { - pr_warn("Cert for '%s' must have SubjKeyId and AuthKeyId extensions\n", + if (!cert->fingerprint) { + pr_warn("Cert for '%s' must have a SubjKeyId extension\n", cert->subject); ret = -EKEYREJECTED; goto error_free_cert; } - time_to_tm(CURRENT_TIME.tv_sec, 0, &now); - pr_devel("Now: %04ld-%02d-%02d %02d:%02d:%02d\n", - now.tm_year + 1900, now.tm_mon + 1, now.tm_mday, - now.tm_hour, now.tm_min, now.tm_sec); - if (now.tm_year < cert->valid_from.tm_year || - (now.tm_year == cert->valid_from.tm_year && - (now.tm_mon < cert->valid_from.tm_mon || - (now.tm_mon == cert->valid_from.tm_mon && - (now.tm_mday < cert->valid_from.tm_mday || - (now.tm_mday == cert->valid_from.tm_mday && - (now.tm_hour < cert->valid_from.tm_hour || - (now.tm_hour == cert->valid_from.tm_hour && - (now.tm_min < cert->valid_from.tm_min || - (now.tm_min == cert->valid_from.tm_min && - (now.tm_sec < cert->valid_from.tm_sec - ))))))))))) { - pr_warn("Cert %s is not yet valid\n", cert->fingerprint); - ret = -EKEYREJECTED; - goto error_free_cert; - } - if (now.tm_year > cert->valid_to.tm_year || - (now.tm_year == cert->valid_to.tm_year && - (now.tm_mon > cert->valid_to.tm_mon || - (now.tm_mon == cert->valid_to.tm_mon && - (now.tm_mday > cert->valid_to.tm_mday || - (now.tm_mday == cert->valid_to.tm_mday && - (now.tm_hour > cert->valid_to.tm_hour || - (now.tm_hour == cert->valid_to.tm_hour && - (now.tm_min > cert->valid_to.tm_min || - (now.tm_min == cert->valid_to.tm_min && - (now.tm_sec > cert->valid_to.tm_sec - ))))))))))) { - pr_warn("Cert %s has expired\n", cert->fingerprint); - ret = -EKEYEXPIRED; - goto error_free_cert; - } - - cert->pub->algo = x509_public_key_algorithms[cert->pkey_algo]; + cert->pub->algo = pkey_algo[cert->pub->pkey_algo]; cert->pub->id_type = PKEY_ID_X509; - /* Check the signature on the key */ - if (strcmp(cert->fingerprint, cert->authority) == 0) { + /* Check the signature on the key if it appears to be self-signed */ + if (!cert->authority || + strcmp(cert->fingerprint, cert->authority) == 0) { ret = x509_check_signature(cert->pub, cert); if (ret < 0) goto error_free_cert; @@ -237,3 +213,6 @@ static void __exit x509_key_exit(void) module_init(x509_key_init); module_exit(x509_key_exit); + +MODULE_DESCRIPTION("X.509 certificate parser"); +MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb374..f8c0b8dbeb75 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; + struct dmaengine_unmap_data *unmap = NULL; - if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { - dma_addr_t dma_dest, dma_src; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + + if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - dma_dest = dma_map_page(device->dev, dest, dest_offset, len, - DMA_FROM_DEVICE); - - dma_src = dma_map_page(device->dev, src, src_offset, len, - DMA_TO_DEVICE); - - tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, - len, dma_prep_flags); - if (!tx) { - dma_unmap_page(device->dev, dma_dest, len, - DMA_FROM_DEVICE); - dma_unmap_page(device->dev, dma_src, len, - DMA_TO_DEVICE); - } + + unmap->to_cnt = 1; + unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, + DMA_TO_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, + DMA_FROM_DEVICE); + unmap->len = len; + + tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + dma_prep_flags); } if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; @@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, async_tx_sync_epilog(submit); } + dmaengine_unmap_put(unmap); + return tx; } EXPORT_SYMBOL_GPL(async_memcpy); diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899e..d05327caf69d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -46,49 +46,24 @@ static struct page *pq_scribble_page; * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * -do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, - const unsigned char *scfs, unsigned int offset, int disks, - size_t len, dma_addr_t *dma_src, +do_async_gen_syndrome(struct dma_chan *chan, + const unsigned char *scfs, int disks, + struct dmaengine_unmap_data *unmap, + enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct dma_device *dma = chan->device; - enum dma_ctrl_flags dma_flags = 0; enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_param_orig = submit->cb_param; int src_cnt = disks - 2; - unsigned char coefs[src_cnt]; unsigned short pq_src_cnt; dma_addr_t dma_dest[2]; int src_off = 0; - int idx; - int i; - /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ - if (P(blocks, disks)) - dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_P; - if (Q(blocks, disks)) - dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_Q; - - /* convert source addresses being careful to collapse 'empty' - * sources and update the coefficients accordingly - */ - for (i = 0, idx = 0; i < src_cnt; i++) { - if (blocks[i] == NULL) - continue; - dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, - DMA_TO_DEVICE); - coefs[idx] = scfs[i]; - idx++; - } - src_cnt = idx; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; while (src_cnt > 0) { submit->flags = flags_orig; @@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { - dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } - if (submit->flags & ASYNC_TX_FENCE) - dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward - * progress in case they can not provide a descriptor + /* Drivers force forward progress in case they can not provide + * a descriptor */ for (;;) { + dma_dest[0] = unmap->addr[disks - 2]; + dma_dest[1] = unmap->addr[disks - 1]; tx = dma->device_prep_dma_pq(chan, dma_dest, - &dma_src[src_off], + &unmap->addr[src_off], pq_src_cnt, - &coefs[src_off], len, + &scfs[src_off], unmap->len, dma_flags); if (likely(tx)) break; @@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, dma_async_issue_pending(chan); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; @@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path. - * - * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overwritten to perform address conversions - * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, &P(blocks, disks), 2, blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && + if (unmap && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && is_dma_pq_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = 0; + unsigned char coefs[src_cnt]; + int i, j; + /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, - disks, len, dma_src, submit); + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (blocks[i] == NULL) + continue; + unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, + len, DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + j++; + } + + /* + * DMAs use destinations as sources, + * so use BIDIRECTIONAL mapping + */ + unmap->bidi_cnt++; + if (P(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_P; + } + + unmap->bidi_cnt++; + if (Q(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + } + + tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); + dmaengine_unmap_put(unmap); + return tx; } + dmaengine_unmap_put(unmap); + /* run the pq synchronously */ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); @@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, struct dma_async_tx_descriptor *tx; unsigned char coefs[disks-2]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; - dma_addr_t *dma_src = NULL; - int src_cnt = 0; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks < 4); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && disks <= dma_maxpq(device, 0) && + if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; - dma_addr_t *pq = &dma_src[disks-2]; - int i; + dma_addr_t pq[2]; + int i, j = 0, src_cnt = 0; pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - if (!P(blocks, disks)) + + unmap->len = len; + for (i = 0; i < disks-2; i++) + if (likely(blocks[i])) { + unmap->addr[j] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + src_cnt++; + j++; + } + + if (!P(blocks, disks)) { + pq[0] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; - else + } else { pq[0] = dma_map_page(dev, P(blocks, disks), offset, len, DMA_TO_DEVICE); - if (!Q(blocks, disks)) + unmap->addr[j++] = pq[0]; + unmap->to_cnt++; + } + if (!Q(blocks, disks)) { + pq[1] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; - else + } else { pq[1] = dma_map_page(dev, Q(blocks, disks), offset, len, DMA_TO_DEVICE); + unmap->addr[j++] = pq[1]; + unmap->to_cnt++; + } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks-2; i++) - if (likely(blocks[i])) { - dma_src[src_cnt] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); - coefs[src_cnt] = raid6_gfexp[i]; - src_cnt++; - } - for (;;) { - tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + tx = device->device_prep_dma_pq_val(chan, pq, + unmap->addr, src_cnt, coefs, len, pqres, @@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); return tx; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582e..934a84981495 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -26,6 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> +#include <linux/dmaengine.h> static struct dma_async_tx_descriptor * async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, @@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, srcs, 2, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *amul, *bmul; u8 ax, bx; u8 *a, *b, *c; - if (dma) { - dma_addr_t dma_dest[2]; - dma_addr_t dma_src[2]; + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { struct device *dev = dma->dev; + dma_addr_t pq[2]; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); - dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, + unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + unmap->to_cnt = 2; + + unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + /* engine only looks at Q, but expects it to follow P */ + pq[1] = unmap->addr[2]; + + unmap->len = len; + tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, len, dma_flags); if (tx) { + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); + dmaengine_unmap_put(unmap); return tx; } /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); - dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* run the operation synchronously */ @@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, &src, 1, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *qmul; /* Q multiplier table */ u8 *d, *s; - if (dma) { + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { dma_addr_t dma_dest[2]; - dma_addr_t dma_src[1]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, - len, dma_flags); + unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + unmap->to_cnt++; + unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_dest[1] = unmap->addr[1]; + unmap->bidi_cnt++; + unmap->len = len; + + /* this looks funny, but the engine looks for Q at + * dma_dest[1] and ignores dma_dest[0] as a dest + * due to DMA_PREP_PQ_DISABLE_P + */ + tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, + 1, &coef, len, dma_flags); + if (tx) { + dma_set_unmap(tx, unmap); + dmaengine_unmap_put(unmap); async_tx_submit(chan, tx, submit); return tx; } @@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* no channel available, or failed to allocate a descriptor, so diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 7be34248b450..39ea4791a3c9 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, } device->device_issue_pending(chan); } else { - if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) + if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE) panic("%s: DMA error waiting for depend_tx\n", __func__); tx->tx_submit(tx); @@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) * we are referring to the correct operation */ BUG_ON(async_tx_test_ack(*tx)); - if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) + if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE) panic("%s: DMA error waiting for transaction\n", __func__); async_tx_ack(*tx); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c6..3c562f5a60bb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,48 +33,31 @@ /* do_async_xor - dma map the pages and perform the xor with an engine */ static __async_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, +do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; struct dma_async_tx_descriptor *tx = NULL; - int src_off = 0; - int i; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; - enum dma_ctrl_flags dma_flags; - int xor_src_cnt = 0; - dma_addr_t dma_dest; - - /* map the dest bidrectional in case it is re-used as a source */ - dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); - for (i = 0; i < src_cnt; i++) { - /* only map the dest once */ - if (!src_list[i]) - continue; - if (unlikely(src_list[i] == dest)) { - dma_src[xor_src_cnt++] = dma_dest; - continue; - } - dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, - len, DMA_TO_DEVICE); - } - src_cnt = xor_src_cnt; + enum dma_ctrl_flags dma_flags = 0; + int src_cnt = unmap->to_cnt; + int xor_src_cnt; + dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; + dma_addr_t *src_list = unmap->addr; while (src_cnt) { + dma_addr_t tmp; + submit->flags = flags_orig; - dma_flags = 0; xor_src_cnt = min(src_cnt, (int)dma->max_xor); - /* if we are submitting additional xors, leave the chain open, - * clear the callback parameters, and leave the destination - * buffer mapped + /* if we are submitting additional xors, leave the chain open + * and clear the callback parameters */ if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { @@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, dma_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward progress - * in case they can not provide a descriptor + + /* Drivers force forward progress in case they can not provide a + * descriptor */ - tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], - xor_src_cnt, len, dma_flags); + tmp = src_list[0]; + if (src_list > unmap->addr) + src_list[0] = dma_dest; + tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, + xor_src_cnt, unmap->len, + dma_flags); + src_list[0] = tmp; + if (unlikely(!tx)) async_tx_quiesce(&submit->depend_tx); @@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, while (unlikely(!tx)) { dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, - &dma_src[src_off], - xor_src_cnt, len, + src_list, + xor_src_cnt, unmap->len, dma_flags); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ src_cnt -= xor_src_cnt; - src_off += xor_src_cnt; - /* use the intermediate result a source */ - dma_src[--src_off] = dma_dest; src_cnt++; + src_list += xor_src_cnt - 1; } else break; } @@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); - dma_addr_t *dma_src = NULL; + struct dma_device *device = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + + if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + int i, j; - if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); - return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - dma_src, submit); + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (!src_list[i]) + continue; + unmap->to_cnt++; + unmap->addr[j++] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + } + + /* map it bidirectional as it may be re-used as a source */ + unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, + DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + + tx = do_async_xor(chan, unmap, submit); + dmaengine_unmap_put(unmap); + return tx; } else { + dmaengine_unmap_put(unmap); /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", @@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); - if (dma_src && device && src_cnt <= device->max_xor && + if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; @@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - for (i = 0; i < src_cnt; i++) - dma_src[i] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); - tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, + for (i = 0; i < src_cnt; i++) { + unmap->addr[i] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + unmap->to_cnt++; + } + unmap->len = len; + + tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, len, result, dma_prep_flags); if (unlikely(!tx)) { @@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, while (!tx) { dma_async_issue_pending(chan); tx = device->device_prep_dma_xor_val(chan, - dma_src, src_cnt, len, result, + unmap->addr, src_cnt, len, result, dma_prep_flags); } } - + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { enum async_tx_flags flags_orig = submit->flags; @@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_sync_epilog(submit); submit->flags = flags_orig; } + dmaengine_unmap_put(unmap); return tx; } diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 4a92bac744dc..dad95f45b88f 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -28,7 +28,7 @@ #undef pr #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) -#define NDISKS 16 /* Including P and Q */ +#define NDISKS 64 /* Including P and Q */ static struct page *dataptrs[NDISKS]; static addr_conv_t addr_conv[NDISKS]; @@ -219,6 +219,14 @@ static int raid6_test(void) err += test(11, &tests); err += test(12, &tests); } + + /* the 24 disk case is special for ioatdma as it is the boudary point + * at which it needs to switch from 8-source ops to 16-source + * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) + */ + if (NDISKS > 24) + err += test(24, &tests); + err += test(NDISKS, &tests); pr("\n"); diff --git a/crypto/authenc.c b/crypto/authenc.c index ffce19de05cf..1875e7026e8f 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err) aead_request_complete(req, err); } -static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, - unsigned int keylen) +int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen) { - unsigned int authkeylen; - unsigned int enckeylen; - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); - struct crypto_ahash *auth = ctx->auth; - struct crypto_ablkcipher *enc = ctx->enc; - struct rtattr *rta = (void *)key; + struct rtattr *rta = (struct rtattr *)key; struct crypto_authenc_key_param *param; - int err = -EINVAL; if (!RTA_OK(rta, keylen)) - goto badkey; + return -EINVAL; if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; + return -EINVAL; if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; + return -EINVAL; param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); + keys->enckeylen = be32_to_cpu(param->enckeylen); key += RTA_ALIGN(rta->rta_len); keylen -= RTA_ALIGN(rta->rta_len); - if (keylen < enckeylen) - goto badkey; + if (keylen < keys->enckeylen) + return -EINVAL; - authkeylen = keylen - enckeylen; + keys->authkeylen = keylen - keys->enckeylen; + keys->authkey = key; + keys->enckey = key + keys->authkeylen; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); + +static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_ahash *auth = ctx->auth; + struct crypto_ablkcipher *enc = ctx->enc; + struct crypto_authenc_keys keys; + int err = -EINVAL; + + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(auth, key, authkeylen); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); @@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); @@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req, ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/authencesn.c b/crypto/authencesn.c index ab53762fc309..4be0dd4373a9 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err) static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, unsigned int keylen) { - unsigned int authkeylen; - unsigned int enckeylen; struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); struct crypto_ahash *auth = ctx->auth; struct crypto_ablkcipher *enc = ctx->enc; - struct rtattr *rta = (void *)key; - struct crypto_authenc_key_param *param; + struct crypto_authenc_keys keys; int err = -EINVAL; - if (!RTA_OK(rta, keylen)) + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) - goto badkey; - if (RTA_PAYLOAD(rta) < sizeof(*param)) - goto badkey; - - param = RTA_DATA(rta); - enckeylen = be32_to_cpu(param->enckeylen); - - key += RTA_ALIGN(rta->rta_len); - keylen -= RTA_ALIGN(rta->rta_len); - - if (keylen < enckeylen) - goto badkey; - - authkeylen = keylen - enckeylen; crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); - err = crypto_ahash_setkey(auth, key, authkeylen); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & CRYPTO_TFM_RES_MASK); @@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & CRYPTO_TFM_REQ_MASK); - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & CRYPTO_TFM_RES_MASK); @@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; if (err) goto out; @@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) ihash = ohash + authsize; scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authsize, 0); - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; } static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, diff --git a/crypto/ccm.c b/crypto/ccm.c index 499c91717d93..3e05499d183a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, if (!err) { err = crypto_ccm_auth(req, req->dst, cryptlen); - if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) + if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) err = -EBADMSG; } aead_request_complete(req, err); @@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) return err; /* verify */ - if (memcmp(authtag, odata, authsize)) + if (crypto_memneq(authtag, odata, authsize)) return -EBADMSG; return err; diff --git a/crypto/gcm.c b/crypto/gcm.c index 43e1fb05ea54..b4f017939004 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); - return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; + return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } static void gcm_decrypt_done(struct crypto_async_request *areq, int err) diff --git a/crypto/hash_info.c b/crypto/hash_info.c new file mode 100644 index 000000000000..3e7ff46f26e8 --- /dev/null +++ b/crypto/hash_info.c @@ -0,0 +1,56 @@ +/* + * Hash Info: Hash algorithms information + * + * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/export.h> +#include <crypto/hash_info.h> + +const char *const hash_algo_name[HASH_ALGO__LAST] = { + [HASH_ALGO_MD4] = "md4", + [HASH_ALGO_MD5] = "md5", + [HASH_ALGO_SHA1] = "sha1", + [HASH_ALGO_RIPE_MD_160] = "rmd160", + [HASH_ALGO_SHA256] = "sha256", + [HASH_ALGO_SHA384] = "sha384", + [HASH_ALGO_SHA512] = "sha512", + [HASH_ALGO_SHA224] = "sha224", + [HASH_ALGO_RIPE_MD_128] = "rmd128", + [HASH_ALGO_RIPE_MD_256] = "rmd256", + [HASH_ALGO_RIPE_MD_320] = "rmd320", + [HASH_ALGO_WP_256] = "wp256", + [HASH_ALGO_WP_384] = "wp384", + [HASH_ALGO_WP_512] = "wp512", + [HASH_ALGO_TGR_128] = "tgr128", + [HASH_ALGO_TGR_160] = "tgr160", + [HASH_ALGO_TGR_192] = "tgr192", +}; +EXPORT_SYMBOL_GPL(hash_algo_name); + +const int hash_digest_size[HASH_ALGO__LAST] = { + [HASH_ALGO_MD4] = MD5_DIGEST_SIZE, + [HASH_ALGO_MD5] = MD5_DIGEST_SIZE, + [HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE, + [HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE, + [HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE, + [HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE, + [HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE, + [HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE, + [HASH_ALGO_WP_256] = WP256_DIGEST_SIZE, + [HASH_ALGO_WP_384] = WP384_DIGEST_SIZE, + [HASH_ALGO_WP_512] = WP512_DIGEST_SIZE, + [HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE, + [HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE, + [HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE, +}; +EXPORT_SYMBOL_GPL(hash_digest_size); diff --git a/crypto/memneq.c b/crypto/memneq.c new file mode 100644 index 000000000000..cd0162221c14 --- /dev/null +++ b/crypto/memneq.c @@ -0,0 +1,138 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= *(unsigned long *)a ^ *(unsigned long *)b; + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) + return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) + | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); + else if (sizeof(unsigned int) == 4) + return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) + | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) + | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) + | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); + else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) + | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) + | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) + | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) + | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) + | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) + | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) + | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) + | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) + | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) + | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) + | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) + | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) + | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) + | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) + | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ |