diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/842.c | 174 | ||||
-rw-r--r-- | crypto/Kconfig | 28 | ||||
-rw-r--r-- | crypto/Makefile | 3 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 10 | ||||
-rw-r--r-- | crypto/aead.c | 451 | ||||
-rw-r--r-- | crypto/algapi.c | 30 | ||||
-rw-r--r-- | crypto/algif_aead.c | 1 | ||||
-rw-r--r-- | crypto/algif_rng.c | 2 | ||||
-rw-r--r-- | crypto/ansi_cprng.c | 88 | ||||
-rw-r--r-- | crypto/authenc.c | 17 | ||||
-rw-r--r-- | crypto/authencesn.c | 17 | ||||
-rw-r--r-- | crypto/blkcipher.c | 1 | ||||
-rw-r--r-- | crypto/ccm.c | 14 | ||||
-rw-r--r-- | crypto/cryptd.c | 63 | ||||
-rw-r--r-- | crypto/crypto_null.c | 39 | ||||
-rw-r--r-- | crypto/drbg.c | 513 | ||||
-rw-r--r-- | crypto/echainiv.c | 546 | ||||
-rw-r--r-- | crypto/fips.c | 53 | ||||
-rw-r--r-- | crypto/gcm.c | 45 | ||||
-rw-r--r-- | crypto/internal.h | 3 | ||||
-rw-r--r-- | crypto/jitterentropy.c | 909 | ||||
-rw-r--r-- | crypto/krng.c | 33 | ||||
-rw-r--r-- | crypto/md5.c | 8 | ||||
-rw-r--r-- | crypto/pcompress.c | 7 | ||||
-rw-r--r-- | crypto/pcrypt.c | 86 | ||||
-rw-r--r-- | crypto/proc.c | 41 | ||||
-rw-r--r-- | crypto/rng.c | 105 | ||||
-rw-r--r-- | crypto/scatterwalk.c | 38 | ||||
-rw-r--r-- | crypto/seqiv.c | 626 | ||||
-rw-r--r-- | crypto/shash.c | 7 | ||||
-rw-r--r-- | crypto/tcrypt.c | 9 | ||||
-rw-r--r-- | crypto/testmgr.c | 15 | ||||
-rw-r--r-- | crypto/testmgr.h | 501 | ||||
-rw-r--r-- | crypto/zlib.c | 4 |
34 files changed, 3591 insertions, 896 deletions
diff --git a/crypto/842.c b/crypto/842.c index b48f4f108c47..98e387efb8c8 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -1,5 +1,5 @@ /* - * Cryptographic API for the 842 compression algorithm. + * Cryptographic API for the 842 software compression algorithm. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,173 +11,73 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * Copyright (C) IBM Corporation, 2011-2015 * - * Copyright (C) IBM Corporation, 2011 + * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> + * Seth Jennings <sjenning@linux.vnet.ibm.com> * - * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> - * Seth Jennings <sjenning@linux.vnet.ibm.com> + * Rewrite: Dan Streetman <ddstreet@ieee.org> + * + * This is the software implementation of compression and decompression using + * the 842 format. This uses the software 842 library at lib/842/ which is + * only a reference implementation, and is very, very slow as compared to other + * software compressors. You probably do not want to use this software + * compression. If you have access to the PowerPC 842 compression hardware, you + * want to use the 842 hardware compression interface, which is at: + * drivers/crypto/nx/nx-842-crypto.c */ #include <linux/init.h> #include <linux/module.h> #include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/nx842.h> -#include <linux/lzo.h> -#include <linux/timer.h> - -static int nx842_uselzo; - -struct nx842_ctx { - void *nx842_wmem; /* working memory for 842/lzo */ -}; +#include <linux/sw842.h> -enum nx842_crypto_type { - NX842_CRYPTO_TYPE_842, - NX842_CRYPTO_TYPE_LZO +struct crypto842_ctx { + char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */ }; -#define NX842_SENTINEL 0xdeadbeef - -struct nx842_crypto_header { - unsigned int sentinel; /* debug */ - enum nx842_crypto_type type; -}; - -static int nx842_init(struct crypto_tfm *tfm) -{ - struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); - int wmemsize; - - wmemsize = max_t(int, nx842_get_workmem_size(), LZO1X_MEM_COMPRESS); - ctx->nx842_wmem = kmalloc(wmemsize, GFP_NOFS); - if (!ctx->nx842_wmem) - return -ENOMEM; - - return 0; -} - -static void nx842_exit(struct crypto_tfm *tfm) -{ - struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); - - kfree(ctx->nx842_wmem); -} - -static void nx842_reset_uselzo(unsigned long data) +static int crypto842_compress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) { - nx842_uselzo = 0; -} - -static DEFINE_TIMER(failover_timer, nx842_reset_uselzo, 0, 0); - -static int nx842_crypto_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); - struct nx842_crypto_header *hdr; - unsigned int tmp_len = *dlen; - size_t lzodlen; /* needed for lzo */ - int err; - - *dlen = 0; - hdr = (struct nx842_crypto_header *)dst; - hdr->sentinel = NX842_SENTINEL; /* debug */ - dst += sizeof(struct nx842_crypto_header); - tmp_len -= sizeof(struct nx842_crypto_header); - lzodlen = tmp_len; - - if (likely(!nx842_uselzo)) { - err = nx842_compress(src, slen, dst, &tmp_len, ctx->nx842_wmem); - - if (likely(!err)) { - hdr->type = NX842_CRYPTO_TYPE_842; - *dlen = tmp_len + sizeof(struct nx842_crypto_header); - return 0; - } - - /* hardware failed */ - nx842_uselzo = 1; + struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - /* set timer to check for hardware again in 1 second */ - mod_timer(&failover_timer, jiffies + msecs_to_jiffies(1000)); - } - - /* no hardware, use lzo */ - err = lzo1x_1_compress(src, slen, dst, &lzodlen, ctx->nx842_wmem); - if (err != LZO_E_OK) - return -EINVAL; - - hdr->type = NX842_CRYPTO_TYPE_LZO; - *dlen = lzodlen + sizeof(struct nx842_crypto_header); - return 0; + return sw842_compress(src, slen, dst, dlen, ctx->wmem); } -static int nx842_crypto_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) +static int crypto842_decompress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) { - struct nx842_ctx *ctx = crypto_tfm_ctx(tfm); - struct nx842_crypto_header *hdr; - unsigned int tmp_len = *dlen; - size_t lzodlen; /* needed for lzo */ - int err; - - *dlen = 0; - hdr = (struct nx842_crypto_header *)src; - - if (unlikely(hdr->sentinel != NX842_SENTINEL)) - return -EINVAL; - - src += sizeof(struct nx842_crypto_header); - slen -= sizeof(struct nx842_crypto_header); - - if (likely(hdr->type == NX842_CRYPTO_TYPE_842)) { - err = nx842_decompress(src, slen, dst, &tmp_len, - ctx->nx842_wmem); - if (err) - return -EINVAL; - *dlen = tmp_len; - } else if (hdr->type == NX842_CRYPTO_TYPE_LZO) { - lzodlen = tmp_len; - err = lzo1x_decompress_safe(src, slen, dst, &lzodlen); - if (err != LZO_E_OK) - return -EINVAL; - *dlen = lzodlen; - } else - return -EINVAL; - - return 0; + return sw842_decompress(src, slen, dst, dlen); } static struct crypto_alg alg = { .cra_name = "842", + .cra_driver_name = "842-generic", + .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_ctx), + .cra_ctxsize = sizeof(struct crypto842_ctx), .cra_module = THIS_MODULE, - .cra_init = nx842_init, - .cra_exit = nx842_exit, .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } + .coa_compress = crypto842_compress, + .coa_decompress = crypto842_decompress } } }; -static int __init nx842_mod_init(void) +static int __init crypto842_mod_init(void) { - del_timer(&failover_timer); return crypto_register_alg(&alg); } +module_init(crypto842_mod_init); -static void __exit nx842_mod_exit(void) +static void __exit crypto842_mod_exit(void) { crypto_unregister_alg(&alg); } - -module_init(nx842_mod_init); -module_exit(nx842_mod_exit); +module_exit(crypto842_mod_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("842 Compression Algorithm"); +MODULE_DESCRIPTION("842 Software Compression Algorithm"); MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-generic"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/crypto/Kconfig b/crypto/Kconfig index 362905e7c841..0ff4cd44e4f8 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -221,11 +221,22 @@ config CRYPTO_SEQIV tristate "Sequence Number IV Generator" select CRYPTO_AEAD select CRYPTO_BLKCIPHER + select CRYPTO_NULL select CRYPTO_RNG help This IV generator generates an IV based on a sequence number by xoring it with a salt. This algorithm is mainly useful for CTR +config CRYPTO_ECHAINIV + tristate "Encrypted Chain IV Generator" + select CRYPTO_AEAD + select CRYPTO_NULL + select CRYPTO_RNG + help + This IV generator generates an IV based on the encryption of + a sequence number xored with a salt. This is the default + algorithm for CBC. + comment "Block modes" config CRYPTO_CBC @@ -1412,10 +1423,9 @@ config CRYPTO_LZO config CRYPTO_842 tristate "842 compression algorithm" - depends on CRYPTO_DEV_NX_COMPRESS - # 842 uses lzo if the hardware becomes unavailable - select LZO_COMPRESS - select LZO_DECOMPRESS + select CRYPTO_ALGAPI + select 842_COMPRESS + select 842_DECOMPRESS help This is the 842 algorithm. @@ -1479,9 +1489,19 @@ config CRYPTO_DRBG tristate default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR) select CRYPTO_RNG + select CRYPTO_JITTERENTROPY endif # if CRYPTO_DRBG_MENU +config CRYPTO_JITTERENTROPY + tristate "Jitterentropy Non-Deterministic Random Number Generator" + help + The Jitterentropy RNG is a noise that is intended + to provide seed to another RNG. The RNG does not + perform any cryptographic whitening of the generated + random numbers. This Jitterentropy RNG registers with + the kernel crypto API and can be used by any caller. + config CRYPTO_USER_API tristate diff --git a/crypto/Makefile b/crypto/Makefile index 97b7d3ac87e7..5db5b95dd8fb 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o +obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o crypto_hash-y += ahash.o crypto_hash-y += shash.o @@ -94,6 +95,8 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o obj-$(CONFIG_CRYPTO_RNG2) += krng.o obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o obj-$(CONFIG_CRYPTO_DRBG) += drbg.o +CFLAGS_jitterentropy.o = -O0 +obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index db201bca1581..b15d797f94f9 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -586,6 +586,13 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) if (!tmpl) goto kill_larval; + if (tmpl->create) { + err = tmpl->create(tmpl, tb); + if (err) + goto put_tmpl; + goto ok; + } + inst = tmpl->alloc(tb); err = PTR_ERR(inst); if (IS_ERR(inst)) @@ -597,6 +604,7 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) goto put_tmpl; } +ok: /* Redo the lookup to use the instance we just registered. */ err = -EAGAIN; @@ -636,7 +644,7 @@ struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_GIVCIPHER) { - if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { crypto_mod_put(alg); alg = ERR_PTR(-ENOENT); } diff --git a/crypto/aead.c b/crypto/aead.c index 222271070b49..7c3d725bd264 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -13,6 +13,7 @@ */ #include <crypto/internal/aead.h> +#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -26,10 +27,12 @@ #include "internal.h" +static int aead_null_givencrypt(struct aead_givcrypt_request *req); +static int aead_null_givdecrypt(struct aead_givcrypt_request *req); + static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { - struct aead_alg *aead = crypto_aead_alg(tfm); unsigned long alignmask = crypto_aead_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; @@ -42,47 +45,94 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); - ret = aead->setkey(tfm, alignbuffer, keylen); + ret = tfm->setkey(tfm, alignbuffer, keylen); memset(alignbuffer, 0, keylen); kfree(buffer); return ret; } -static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) +int crypto_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) { - struct aead_alg *aead = crypto_aead_alg(tfm); unsigned long alignmask = crypto_aead_alignmask(tfm); + tfm = tfm->child; + if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); - return aead->setkey(tfm, key, keylen); + return tfm->setkey(tfm, key, keylen); } +EXPORT_SYMBOL_GPL(crypto_aead_setkey); int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - struct aead_tfm *crt = crypto_aead_crt(tfm); int err; - if (authsize > crypto_aead_alg(tfm)->maxauthsize) + if (authsize > crypto_aead_maxauthsize(tfm)) return -EINVAL; - if (crypto_aead_alg(tfm)->setauthsize) { - err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize); + if (tfm->setauthsize) { + err = tfm->setauthsize(tfm->child, authsize); if (err) return err; } - crypto_aead_crt(crt->base)->authsize = authsize; - crt->authsize = authsize; + tfm->child->authsize = authsize; + tfm->authsize = authsize; return 0; } EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); -static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) +struct aead_old_request { + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct aead_request subreq; +}; + +unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return tfm->reqsize + sizeof(struct aead_old_request); +} +EXPORT_SYMBOL_GPL(crypto_aead_reqsize); + +static int old_crypt(struct aead_request *req, + int (*crypt)(struct aead_request *req)) { - return alg->cra_ctxsize; + struct aead_old_request *nreq = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct scatterlist *src, *dst; + + if (req->old) + return crypt(req); + + src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen); + dst = scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen); + + aead_request_set_tfm(&nreq->subreq, aead); + aead_request_set_callback(&nreq->subreq, aead_request_flags(req), + req->base.complete, req->base.data); + aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen, + req->iv); + aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen); + + return crypt(&nreq->subreq); +} + +static int old_encrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct old_aead_alg *alg = crypto_old_aead_alg(aead); + + return old_crypt(req, alg->encrypt); +} + +static int old_decrypt(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct old_aead_alg *alg = crypto_old_aead_alg(aead); + + return old_crypt(req, alg->decrypt); } static int no_givcrypt(struct aead_givcrypt_request *req) @@ -90,32 +140,54 @@ static int no_givcrypt(struct aead_givcrypt_request *req) return -ENOSYS; } -static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm) { - struct aead_alg *alg = &tfm->__crt_alg->cra_aead; - struct aead_tfm *crt = &tfm->crt_aead; + struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead; + struct crypto_aead *crt = __crypto_aead_cast(tfm); if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) return -EINVAL; - crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? - alg->setkey : setkey; - crt->encrypt = alg->encrypt; - crt->decrypt = alg->decrypt; - crt->givencrypt = alg->givencrypt ?: no_givcrypt; - crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; - crt->base = __crypto_aead_cast(tfm); - crt->ivsize = alg->ivsize; + crt->setkey = alg->setkey; + crt->setauthsize = alg->setauthsize; + crt->encrypt = old_encrypt; + crt->decrypt = old_decrypt; + if (alg->ivsize) { + crt->givencrypt = alg->givencrypt ?: no_givcrypt; + crt->givdecrypt = alg->givdecrypt ?: no_givcrypt; + } else { + crt->givencrypt = aead_null_givencrypt; + crt->givdecrypt = aead_null_givdecrypt; + } + crt->child = __crypto_aead_cast(tfm); crt->authsize = alg->maxauthsize; return 0; } +static int crypto_aead_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_aead *aead = __crypto_aead_cast(tfm); + struct aead_alg *alg = crypto_aead_alg(aead); + + if (crypto_old_aead_alg(aead)->encrypt) + return crypto_old_aead_init_tfm(tfm); + + aead->setkey = alg->setkey; + aead->setauthsize = alg->setauthsize; + aead->encrypt = alg->encrypt; + aead->decrypt = alg->decrypt; + aead->child = __crypto_aead_cast(tfm); + aead->authsize = alg->maxauthsize; + + return 0; +} + #ifdef CONFIG_NET -static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) +static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_aead raead; - struct aead_alg *aead = &alg->cra_aead; + struct old_aead_alg *aead = &alg->cra_aead; strncpy(raead.type, "aead", sizeof(raead.type)); strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv)); @@ -133,6 +205,64 @@ nla_put_failure: return -EMSGSIZE; } #else +static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + return -ENOSYS; +} +#endif + +static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct old_aead_alg *aead = &alg->cra_aead; + + seq_printf(m, "type : aead\n"); + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? + "yes" : "no"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "ivsize : %u\n", aead->ivsize); + seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); + seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>"); +} + +const struct crypto_type crypto_aead_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_aead_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_old_aead_show, +#endif + .report = crypto_old_aead_report, + .lookup = crypto_lookup_aead, + .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_AEAD, + .tfmsize = offsetof(struct crypto_aead, base), +}; +EXPORT_SYMBOL_GPL(crypto_aead_type); + +#ifdef CONFIG_NET +static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) +{ + struct crypto_report_aead raead; + struct aead_alg *aead = container_of(alg, struct aead_alg, base); + + strncpy(raead.type, "aead", sizeof(raead.type)); + strncpy(raead.geniv, "<none>", sizeof(raead.geniv)); + + raead.blocksize = alg->cra_blocksize; + raead.maxauthsize = aead->maxauthsize; + raead.ivsize = aead->ivsize; + + if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, + sizeof(struct crypto_report_aead), &raead)) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} +#else static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; @@ -143,7 +273,7 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) { - struct aead_alg *aead = &alg->cra_aead; + struct aead_alg *aead = container_of(alg, struct aead_alg, base); seq_printf(m, "type : aead\n"); seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? @@ -151,18 +281,21 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); - seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>"); + seq_printf(m, "geniv : <none>\n"); } -const struct crypto_type crypto_aead_type = { - .ctxsize = crypto_aead_ctxsize, - .init = crypto_init_aead_ops, +static const struct crypto_type crypto_new_aead_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_aead_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_aead_show, #endif .report = crypto_aead_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_AEAD, + .tfmsize = offsetof(struct crypto_aead, base), }; -EXPORT_SYMBOL_GPL(crypto_aead_type); static int aead_null_givencrypt(struct aead_givcrypt_request *req) { @@ -174,33 +307,11 @@ static int aead_null_givdecrypt(struct aead_givcrypt_request *req) return crypto_aead_decrypt(&req->areq); } -static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) -{ - struct aead_alg *alg = &tfm->__crt_alg->cra_aead; - struct aead_tfm *crt = &tfm->crt_aead; - - if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) - return -EINVAL; - - crt->setkey = setkey; - crt->encrypt = alg->encrypt; - crt->decrypt = alg->decrypt; - if (!alg->ivsize) { - crt->givencrypt = aead_null_givencrypt; - crt->givdecrypt = aead_null_givdecrypt; - } - crt->base = __crypto_aead_cast(tfm); - crt->ivsize = alg->ivsize; - crt->authsize = alg->maxauthsize; - - return 0; -} - #ifdef CONFIG_NET static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_aead raead; - struct aead_alg *aead = &alg->cra_aead; + struct old_aead_alg *aead = &alg->cra_aead; strncpy(raead.type, "nivaead", sizeof(raead.type)); strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv)); @@ -229,7 +340,7 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) __attribute__ ((unused)); static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) { - struct aead_alg *aead = &alg->cra_aead; + struct old_aead_alg *aead = &alg->cra_aead; seq_printf(m, "type : nivaead\n"); seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? @@ -241,43 +352,36 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg) } const struct crypto_type crypto_nivaead_type = { - .ctxsize = crypto_aead_ctxsize, - .init = crypto_init_nivaead_ops, + .extsize = crypto_alg_extsize, + .init_tfm = crypto_aead_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_nivaead_show, #endif .report = crypto_nivaead_report, + .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV), + .maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV, + .type = CRYPTO_ALG_TYPE_AEAD, + .tfmsize = offsetof(struct crypto_aead, base), }; EXPORT_SYMBOL_GPL(crypto_nivaead_type); static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn, const char *name, u32 type, u32 mask) { - struct crypto_alg *alg; - int err; - - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_AEAD; - mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV; - - alg = crypto_alg_mod_lookup(name, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); - - err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); - crypto_mod_put(alg); - return err; + spawn->base.frontend = &crypto_nivaead_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); } -struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, - struct rtattr **tb, u32 type, - u32 mask) +struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, + struct rtattr **tb, u32 type, u32 mask) { const char *name; struct crypto_aead_spawn *spawn; struct crypto_attr_type *algt; - struct crypto_instance *inst; - struct crypto_alg *alg; + struct aead_instance *inst; + struct aead_alg *alg; + unsigned int ivsize; + unsigned int maxauthsize; int err; algt = crypto_get_attr_type(tb); @@ -296,20 +400,23 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, if (!inst) return ERR_PTR(-ENOMEM); - spawn = crypto_instance_ctx(inst); + spawn = aead_instance_ctx(inst); /* Ignore async algorithms if necessary. */ mask |= crypto_requires_sync(algt->type, algt->mask); - crypto_set_aead_spawn(spawn, inst); + crypto_set_aead_spawn(spawn, aead_crypto_instance(inst)); err = crypto_grab_nivaead(spawn, name, type, mask); if (err) goto err_free_inst; - alg = crypto_aead_spawn_alg(spawn); + alg = crypto_spawn_aead_alg(spawn); + + ivsize = crypto_aead_alg_ivsize(alg); + maxauthsize = crypto_aead_alg_maxauthsize(alg); err = -EINVAL; - if (!alg->cra_aead.ivsize) + if (!ivsize) goto err_drop_alg; /* @@ -318,39 +425,54 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, * template name and double-check the IV generator. */ if (algt->mask & CRYPTO_ALG_GENIV) { - if (strcmp(tmpl->name, alg->cra_aead.geniv)) + if (!alg->base.cra_aead.encrypt) + goto err_drop_alg; + if (strcmp(tmpl->name, alg->base.cra_aead.geniv)) goto err_drop_alg; - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, + memcpy(inst->alg.base.cra_name, alg->base.cra_name, CRYPTO_MAX_ALG_NAME); - } else { - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "%s(%s)", tmpl->name, alg->cra_driver_name) >= - CRYPTO_MAX_ALG_NAME) - goto err_drop_alg; + memcpy(inst->alg.base.cra_driver_name, + alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_GENIV; + inst->alg.base.cra_flags |= alg->base.cra_flags & + CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = alg->base.cra_blocksize; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; + inst->alg.base.cra_type = &crypto_aead_type; + + inst->alg.base.cra_aead.ivsize = ivsize; + inst->alg.base.cra_aead.maxauthsize = maxauthsize; + + inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey; + inst->alg.base.cra_aead.setauthsize = + alg->base.cra_aead.setauthsize; + inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt; + inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt; + + goto out; } - inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV; - inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_aead_type; + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "%s(%s)", tmpl->name, alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_drop_alg; - inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; - inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; - inst->alg.cra_aead.geniv = alg->cra_aead.geniv; + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = alg->base.cra_priority; + inst->alg.base.cra_blocksize = alg->base.cra_blocksize; + inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - inst->alg.cra_aead.setkey = alg->cra_aead.setkey; - inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; - inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt; - inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt; + inst->alg.ivsize = ivsize; + inst->alg.maxauthsize = maxauthsize; out: return inst; @@ -364,9 +486,9 @@ err_free_inst: } EXPORT_SYMBOL_GPL(aead_geniv_alloc); -void aead_geniv_free(struct crypto_instance *inst) +void aead_geniv_free(struct aead_instance *inst) { - crypto_drop_aead(crypto_instance_ctx(inst)); + crypto_drop_aead(aead_instance_ctx(inst)); kfree(inst); } EXPORT_SYMBOL_GPL(aead_geniv_free); @@ -374,14 +496,17 @@ EXPORT_SYMBOL_GPL(aead_geniv_free); int aead_geniv_init(struct crypto_tfm *tfm) { struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_aead *child; struct crypto_aead *aead; - aead = crypto_spawn_aead(crypto_instance_ctx(inst)); - if (IS_ERR(aead)) - return PTR_ERR(aead); + aead = __crypto_aead_cast(tfm); + + child = crypto_spawn_aead(crypto_instance_ctx(inst)); + if (IS_ERR(child)) + return PTR_ERR(child); - tfm->crt_aead.base = aead; - tfm->crt_aead.reqsize += crypto_aead_reqsize(aead); + aead->child = child; + aead->reqsize += crypto_aead_reqsize(child); return 0; } @@ -389,7 +514,7 @@ EXPORT_SYMBOL_GPL(aead_geniv_init); void aead_geniv_exit(struct crypto_tfm *tfm) { - crypto_free_aead(tfm->crt_aead.base); + crypto_free_aead(__crypto_aead_cast(tfm)->child); } EXPORT_SYMBOL_GPL(aead_geniv_exit); @@ -443,6 +568,13 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) if (!tmpl) goto kill_larval; + if (tmpl->create) { + err = tmpl->create(tmpl, tb); + if (err) + goto put_tmpl; + goto ok; + } + inst = tmpl->alloc(tb); err = PTR_ERR(inst); if (IS_ERR(inst)) @@ -454,6 +586,7 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask) goto put_tmpl; } +ok: /* Redo the lookup to use the instance we just registered. */ err = -EAGAIN; @@ -489,7 +622,7 @@ struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask) return alg; if (alg->cra_type == &crypto_aead_type) { - if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { crypto_mod_put(alg); alg = ERR_PTR(-ENOENT); } @@ -505,62 +638,62 @@ EXPORT_SYMBOL_GPL(crypto_lookup_aead); int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, u32 type, u32 mask) { - struct crypto_alg *alg; - int err; + spawn->base.frontend = &crypto_aead_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_aead); - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_AEAD; - mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - mask |= CRYPTO_ALG_TYPE_MASK; +struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_aead); - alg = crypto_lookup_aead(name, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); +static int aead_prepare_alg(struct aead_alg *alg) +{ + struct crypto_alg *base = &alg->base; - err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); - crypto_mod_put(alg); - return err; + if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_new_aead_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; + + return 0; } -EXPORT_SYMBOL_GPL(crypto_grab_aead); -struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask) +int crypto_register_aead(struct aead_alg *alg) { - struct crypto_tfm *tfm; + struct crypto_alg *base = &alg->base; int err; - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_AEAD; - mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - mask |= CRYPTO_ALG_TYPE_MASK; - - for (;;) { - struct crypto_alg *alg; + err = aead_prepare_alg(alg); + if (err) + return err; - alg = crypto_lookup_aead(alg_name, type, mask); - if (IS_ERR(alg)) { - err = PTR_ERR(alg); - goto err; - } + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_aead); - tfm = __crypto_alloc_tfm(alg, type, mask); - if (!IS_ERR(tfm)) - return __crypto_aead_cast(tfm); +int crypto_unregister_aead(struct aead_alg *alg) +{ + return crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_aead); - crypto_mod_put(alg); - err = PTR_ERR(tfm); +int aead_register_instance(struct crypto_template *tmpl, + struct aead_instance *inst) +{ + int err; -err: - if (err != -EAGAIN) - break; - if (signal_pending(current)) { - err = -EINTR; - break; - } - } + err = aead_prepare_alg(&inst->alg); + if (err) + return err; - return ERR_PTR(err); + return crypto_register_instance(tmpl, aead_crypto_instance(inst)); } -EXPORT_SYMBOL_GPL(crypto_alloc_aead); +EXPORT_SYMBOL_GPL(aead_register_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); diff --git a/crypto/algapi.c b/crypto/algapi.c index d2627a3d4ed8..abf100c054e0 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -12,6 +12,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/fips.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> @@ -43,12 +44,9 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg) static inline void crypto_check_module_sig(struct module *mod) { -#ifdef CONFIG_CRYPTO_FIPS - if (fips_enabled && mod && !mod->sig_ok) + if (fips_enabled && mod && !module_sig_ok(mod)) panic("Module %s signature verification failed in FIPS mode\n", - mod->name); -#endif - return; + module_name(mod)); } static int crypto_check_alg(struct crypto_alg *alg) @@ -614,6 +612,22 @@ out: } EXPORT_SYMBOL_GPL(crypto_init_spawn2); +int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + struct crypto_alg *alg; + int err; + + alg = crypto_find_alg(name, spawn->frontend, type, mask); + if (IS_ERR(alg)) + return PTR_ERR(alg); + + err = crypto_init_spawn(spawn, alg, spawn->inst, mask); + crypto_mod_put(alg); + return err; +} +EXPORT_SYMBOL_GPL(crypto_grab_spawn); + void crypto_drop_spawn(struct crypto_spawn *spawn) { if (!spawn->alg) @@ -964,6 +978,12 @@ void crypto_xor(u8 *dst, const u8 *src, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_xor); +unsigned int crypto_alg_extsize(struct crypto_alg *alg) +{ + return alg->cra_ctxsize; +} +EXPORT_SYMBOL_GPL(crypto_alg_extsize); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 69abada22373..a55e4e6fa3d8 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -13,6 +13,7 @@ * any later version. */ +#include <crypto/aead.h> #include <crypto/scatterwalk.h> #include <crypto/if_alg.h> #include <linux/init.h> diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 8109aaad2726..150c2b6480ed 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -164,7 +164,7 @@ static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) * Check whether seedlen is of sufficient size is done in RNG * implementations. */ - return crypto_rng_reset(private, (u8 *)seed, seedlen); + return crypto_rng_reset(private, seed, seedlen); } static const struct af_alg_type algif_type_rng = { diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 765fe7609348..eff337ce9003 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -20,8 +20,6 @@ #include <linux/moduleparam.h> #include <linux/string.h> -#include "internal.h" - #define DEFAULT_PRNG_KEY "0123456789abcdef" #define DEFAULT_PRNG_KSZ 16 #define DEFAULT_BLK_SZ 16 @@ -281,11 +279,11 @@ static void free_prng_context(struct prng_context *ctx) } static int reset_prng_context(struct prng_context *ctx, - unsigned char *key, size_t klen, - unsigned char *V, unsigned char *DT) + const unsigned char *key, size_t klen, + const unsigned char *V, const unsigned char *DT) { int ret; - unsigned char *prng_key; + const unsigned char *prng_key; spin_lock_bh(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; @@ -353,8 +351,9 @@ static void cprng_exit(struct crypto_tfm *tfm) free_prng_context(crypto_tfm_ctx(tfm)); } -static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int cprng_get_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); @@ -367,11 +366,12 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, * V and KEY are required during reset, and DT is optional, detected * as being present by testing the length of the seed */ -static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int cprng_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { struct prng_context *prng = crypto_rng_ctx(tfm); - u8 *key = seed + DEFAULT_BLK_SZ; - u8 *dt = NULL; + const u8 *key = seed + DEFAULT_BLK_SZ; + const u8 *dt = NULL; if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; @@ -387,18 +387,20 @@ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) } #ifdef CONFIG_CRYPTO_FIPS -static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int fips_cprng_get_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 1); } -static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int fips_cprng_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { u8 rdata[DEFAULT_BLK_SZ]; - u8 *key = seed + DEFAULT_BLK_SZ; + const u8 *key = seed + DEFAULT_BLK_SZ; int rc; struct prng_context *prng = crypto_rng_ctx(tfm); @@ -424,40 +426,32 @@ out: } #endif -static struct crypto_alg rng_algs[] = { { - .cra_name = "stdrng", - .cra_driver_name = "ansi_cprng", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = sizeof(struct prng_context), - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - .cra_u = { - .rng = { - .rng_make_random = cprng_get_random, - .rng_reset = cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, - } +static struct rng_alg rng_algs[] = { { + .generate = cprng_get_random, + .seed = cprng_reset, + .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, + .base = { + .cra_name = "stdrng", + .cra_driver_name = "ansi_cprng", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct prng_context), + .cra_module = THIS_MODULE, + .cra_init = cprng_init, + .cra_exit = cprng_exit, } #ifdef CONFIG_CRYPTO_FIPS }, { - .cra_name = "fips(ansi_cprng)", - .cra_driver_name = "fips_ansi_cprng", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = sizeof(struct prng_context), - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - .cra_u = { - .rng = { - .rng_make_random = fips_cprng_get_random, - .rng_reset = fips_cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, - } + .generate = fips_cprng_get_random, + .seed = fips_cprng_reset, + .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, + .base = { + .cra_name = "fips(ansi_cprng)", + .cra_driver_name = "fips_ansi_cprng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct prng_context), + .cra_module = THIS_MODULE, + .cra_init = cprng_init, + .cra_exit = cprng_exit, } #endif } }; @@ -465,12 +459,12 @@ static struct crypto_alg rng_algs[] = { { /* Module initalization */ static int __init prng_mod_init(void) { - return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs)); + return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } static void __exit prng_mod_fini(void) { - crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs)); + crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } MODULE_LICENSE("GPL"); diff --git a/crypto/authenc.c b/crypto/authenc.c index 78fb16cab13f..3e852299afb4 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -10,7 +10,7 @@ * */ -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> @@ -570,13 +570,14 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) crypto_ahash_alignmask(auth) + 1) + crypto_ablkcipher_ivsize(enc); - tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) + - ctx->reqoff + - max_t(unsigned int, - crypto_ahash_reqsize(auth) + - sizeof(struct ahash_request), - sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(enc)); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct authenc_request_ctx) + + ctx->reqoff + + max_t(unsigned int, + crypto_ahash_reqsize(auth) + + sizeof(struct ahash_request), + sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(enc))); return 0; diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 024bff2344fc..a3da6770bc9e 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -12,7 +12,7 @@ * */ -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/authenc.h> @@ -662,13 +662,14 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm) crypto_ahash_alignmask(auth) + 1) + crypto_ablkcipher_ivsize(enc); - tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) + - ctx->reqoff + - max_t(unsigned int, - crypto_ahash_reqsize(auth) + - sizeof(struct ahash_request), - sizeof(struct skcipher_givcrypt_request) + - crypto_ablkcipher_reqsize(enc)); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct authenc_esn_request_ctx) + + ctx->reqoff + + max_t(unsigned int, + crypto_ahash_reqsize(auth) + + sizeof(struct ahash_request), + sizeof(struct skcipher_givcrypt_request) + + crypto_ablkcipher_reqsize(enc))); return 0; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 0122bec38564..11b981492031 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -14,6 +14,7 @@ * */ +#include <crypto/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/errno.h> diff --git a/crypto/ccm.c b/crypto/ccm.c index 003bbbd21a2b..a4d1a5eda18b 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -453,9 +453,9 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm) align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = align + - sizeof(struct crypto_ccm_req_priv_ctx) + - crypto_ablkcipher_reqsize(ctr); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + align + sizeof(struct crypto_ccm_req_priv_ctx) + + crypto_ablkcipher_reqsize(ctr)); return 0; @@ -729,10 +729,10 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm) align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = sizeof(struct aead_request) + - ALIGN(crypto_aead_reqsize(aead), - crypto_tfm_ctx_alignment()) + - align + 16; + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_request) + + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + + align + 16); return 0; } diff --git a/crypto/cryptd.c b/crypto/cryptd.c index b0602ba03111..4264c8d9c97d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -295,6 +295,23 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) crypto_free_blkcipher(ctx->child); } +static int cryptd_init_instance(struct crypto_instance *inst, + struct crypto_alg *alg) +{ + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", + alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.cra_priority = alg->cra_priority + 50; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + + return 0; +} + static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, unsigned int tail) { @@ -308,17 +325,10 @@ static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, inst = (void *)(p + head); - err = -ENAMETOOLONG; - if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, - "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + err = cryptd_init_instance(inst, alg); + if (err) goto out_free_inst; - memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - - inst->alg.cra_priority = alg->cra_priority + 50; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - out: return p; @@ -729,7 +739,8 @@ static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); ctx->child = cipher; - tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct cryptd_aead_request_ctx)); return 0; } @@ -746,29 +757,34 @@ static int cryptd_create_aead(struct crypto_template *tmpl, struct aead_instance_ctx *ctx; struct crypto_instance *inst; struct crypto_alg *alg; - u32 type = CRYPTO_ALG_TYPE_AEAD; - u32 mask = CRYPTO_ALG_TYPE_MASK; + const char *name; + u32 type = 0; + u32 mask = 0; int err; cryptd_check_internal(tb, &type, &mask); - alg = crypto_get_attr_alg(tb, type, mask); - if (IS_ERR(alg)) - return PTR_ERR(alg); + name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(name)) + return PTR_ERR(name); - inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); - err = PTR_ERR(inst); - if (IS_ERR(inst)) - goto out_put_alg; + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return -ENOMEM; ctx = crypto_instance_ctx(inst); ctx->queue = queue; - err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + crypto_set_aead_spawn(&ctx->aead_spawn, inst); + err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); if (err) goto out_free_inst; + alg = crypto_aead_spawn_alg(&ctx->aead_spawn); + err = cryptd_init_instance(inst, alg); + if (err) + goto out_drop_aead; + type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; if (alg->cra_flags & CRYPTO_ALG_INTERNAL) type |= CRYPTO_ALG_INTERNAL; @@ -789,12 +805,11 @@ static int cryptd_create_aead(struct crypto_template *tmpl, err = crypto_register_instance(tmpl, inst); if (err) { - crypto_drop_spawn(&ctx->aead_spawn.base); +out_drop_aead: + crypto_drop_aead(&ctx->aead_spawn); out_free_inst: kfree(inst); } -out_put_alg: - crypto_mod_put(alg); return err; } diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index a20319132e33..941c9a434d50 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -25,6 +25,10 @@ #include <linux/mm.h> #include <linux/string.h> +static DEFINE_MUTEX(crypto_default_null_skcipher_lock); +static struct crypto_blkcipher *crypto_default_null_skcipher; +static int crypto_default_null_skcipher_refcnt; + static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { @@ -149,6 +153,41 @@ MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); +struct crypto_blkcipher *crypto_get_default_null_skcipher(void) +{ + struct crypto_blkcipher *tfm; + + mutex_lock(&crypto_default_null_skcipher_lock); + tfm = crypto_default_null_skcipher; + + if (!tfm) { + tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0); + if (IS_ERR(tfm)) + goto unlock; + + crypto_default_null_skcipher = tfm; + } + + crypto_default_null_skcipher_refcnt++; + +unlock: + mutex_unlock(&crypto_default_null_skcipher_lock); + + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); + +void crypto_put_default_null_skcipher(void) +{ + mutex_lock(&crypto_default_null_skcipher_lock); + if (!--crypto_default_null_skcipher_refcnt) { + crypto_free_blkcipher(crypto_default_null_skcipher); + crypto_default_null_skcipher = NULL; + } + mutex_unlock(&crypto_default_null_skcipher_lock); +} +EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); + static int __init crypto_null_mod_init(void) { int ret = 0; diff --git a/crypto/drbg.c b/crypto/drbg.c index b69409cb7e6a..92843488af09 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -235,7 +235,7 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg, #ifdef CONFIG_CRYPTO_FIPS int ret = 0; /* skip test if we test the overall system */ - if (drbg->test_data) + if (list_empty(&drbg->test_data.list)) return true; /* only perform test in FIPS mode */ if (0 == fips_enabled) @@ -487,7 +487,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, out: memset(iv, 0, drbg_blocklen(drbg)); - memset(temp, 0, drbg_statelen(drbg)); + memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); memset(pad, 0, drbg_blocklen(drbg)); return ret; } @@ -1041,6 +1041,43 @@ static struct drbg_state_ops drbg_hash_ops = { * Functions common for DRBG implementations ******************************************************************/ +static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, + int reseed) +{ + int ret = drbg->d_ops->update(drbg, seed, reseed); + + if (ret) + return ret; + + drbg->seeded = true; + /* 10.1.1.2 / 10.1.1.3 step 5 */ + drbg->reseed_ctr = 1; + + return ret; +} + +static void drbg_async_seed(struct work_struct *work) +{ + struct drbg_string data; + LIST_HEAD(seedlist); + struct drbg_state *drbg = container_of(work, struct drbg_state, + seed_work); + int ret; + + get_blocking_random_bytes(drbg->seed_buf, drbg->seed_buf_len); + + drbg_string_fill(&data, drbg->seed_buf, drbg->seed_buf_len); + list_add_tail(&data.list, &seedlist); + mutex_lock(&drbg->drbg_mutex); + ret = __drbg_seed(drbg, &seedlist, true); + if (!ret && drbg->jent) { + crypto_free_rng(drbg->jent); + drbg->jent = NULL; + } + memzero_explicit(drbg->seed_buf, drbg->seed_buf_len); + mutex_unlock(&drbg->drbg_mutex); +} + /* * Seeding or reseeding of the DRBG * @@ -1056,8 +1093,6 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, bool reseed) { int ret = 0; - unsigned char *entropy = NULL; - size_t entropylen = 0; struct drbg_string data1; LIST_HEAD(seedlist); @@ -1068,31 +1103,29 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, return -EINVAL; } - if (drbg->test_data && drbg->test_data->testentropy) { - drbg_string_fill(&data1, drbg->test_data->testentropy->buf, - drbg->test_data->testentropy->len); + if (list_empty(&drbg->test_data.list)) { + drbg_string_fill(&data1, drbg->test_data.buf, + drbg->test_data.len); pr_devel("DRBG: using test entropy\n"); } else { - /* - * Gather entropy equal to the security strength of the DRBG. - * With a derivation function, a nonce is required in addition - * to the entropy. A nonce must be at least 1/2 of the security - * strength of the DRBG in size. Thus, entropy * nonce is 3/2 - * of the strength. The consideration of a nonce is only - * applicable during initial seeding. - */ - entropylen = drbg_sec_strength(drbg->core->flags); - if (!entropylen) - return -EFAULT; - if (!reseed) - entropylen = ((entropylen + 1) / 2) * 3; - pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n", - entropylen); - entropy = kzalloc(entropylen, GFP_KERNEL); - if (!entropy) - return -ENOMEM; - get_random_bytes(entropy, entropylen); - drbg_string_fill(&data1, entropy, entropylen); + /* Get seed from in-kernel /dev/urandom */ + get_random_bytes(drbg->seed_buf, drbg->seed_buf_len); + + /* Get seed from Jitter RNG */ + if (!drbg->jent || + crypto_rng_get_bytes(drbg->jent, + drbg->seed_buf + drbg->seed_buf_len, + drbg->seed_buf_len)) { + drbg_string_fill(&data1, drbg->seed_buf, + drbg->seed_buf_len); + pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n", + drbg->seed_buf_len); + } else { + drbg_string_fill(&data1, drbg->seed_buf, + drbg->seed_buf_len * 2); + pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n", + drbg->seed_buf_len * 2); + } } list_add_tail(&data1.list, &seedlist); @@ -1111,16 +1144,28 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, memset(drbg->C, 0, drbg_statelen(drbg)); } - ret = drbg->d_ops->update(drbg, &seedlist, reseed); + ret = __drbg_seed(drbg, &seedlist, reseed); + + /* + * Clear the initial entropy buffer as the async call may not overwrite + * that buffer for quite some time. + */ + memzero_explicit(drbg->seed_buf, drbg->seed_buf_len * 2); if (ret) goto out; + /* + * For all subsequent seeding calls, we only need the seed buffer + * equal to the security strength of the DRBG. We undo the calculation + * in drbg_alloc_state. + */ + if (!reseed) + drbg->seed_buf_len = drbg->seed_buf_len / 3 * 2; - drbg->seeded = true; - /* 10.1.1.2 / 10.1.1.3 step 5 */ - drbg->reseed_ctr = 1; + /* Invoke asynchronous seeding unless DRBG is in test mode. */ + if (!list_empty(&drbg->test_data.list) && !reseed) + schedule_work(&drbg->seed_work); out: - kzfree(entropy); return ret; } @@ -1136,11 +1181,19 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) kzfree(drbg->scratchpad); drbg->scratchpad = NULL; drbg->reseed_ctr = 0; + drbg->d_ops = NULL; + drbg->core = NULL; #ifdef CONFIG_CRYPTO_FIPS kzfree(drbg->prev); drbg->prev = NULL; drbg->fips_primed = false; #endif + kzfree(drbg->seed_buf); + drbg->seed_buf = NULL; + if (drbg->jent) { + crypto_free_rng(drbg->jent); + drbg->jent = NULL; + } } /* @@ -1152,6 +1205,27 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) int ret = -ENOMEM; unsigned int sb_size = 0; + switch (drbg->core->flags & DRBG_TYPE_MASK) { +#ifdef CONFIG_CRYPTO_DRBG_HMAC + case DRBG_HMAC: + drbg->d_ops = &drbg_hmac_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_HMAC */ +#ifdef CONFIG_CRYPTO_DRBG_HASH + case DRBG_HASH: + drbg->d_ops = &drbg_hash_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_HASH */ +#ifdef CONFIG_CRYPTO_DRBG_CTR + case DRBG_CTR: + drbg->d_ops = &drbg_ctr_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_CTR */ + default: + ret = -EOPNOTSUPP; + goto err; + } + drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); if (!drbg->V) goto err; @@ -1181,87 +1255,50 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (!drbg->scratchpad) goto err; } - spin_lock_init(&drbg->drbg_lock); - return 0; - -err: - drbg_dealloc_state(drbg); - return ret; -} -/* - * Strategy to avoid holding long term locks: generate a shadow copy of DRBG - * and perform all operations on this shadow copy. After finishing, restore - * the updated state of the shadow copy into original drbg state. This way, - * only the read and write operations of the original drbg state must be - * locked - */ -static inline void drbg_copy_drbg(struct drbg_state *src, - struct drbg_state *dst) -{ - if (!src || !dst) - return; - memcpy(dst->V, src->V, drbg_statelen(src)); - memcpy(dst->C, src->C, drbg_statelen(src)); - dst->reseed_ctr = src->reseed_ctr; - dst->seeded = src->seeded; - dst->pr = src->pr; -#ifdef CONFIG_CRYPTO_FIPS - dst->fips_primed = src->fips_primed; - memcpy(dst->prev, src->prev, drbg_blocklen(src)); -#endif /* - * Not copied: - * scratchpad is initialized drbg_alloc_state; - * priv_data is initialized with call to crypto_init; - * d_ops and core are set outside, as these parameters are const; - * test_data is set outside to prevent it being copied back. + * Gather entropy equal to the security strength of the DRBG. + * With a derivation function, a nonce is required in addition + * to the entropy. A nonce must be at least 1/2 of the security + * strength of the DRBG in size. Thus, entropy * nonce is 3/2 + * of the strength. The consideration of a nonce is only + * applicable during initial seeding. */ -} - -static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) -{ - int ret = -ENOMEM; - struct drbg_state *tmp = NULL; - - tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); - if (!tmp) - return -ENOMEM; + drbg->seed_buf_len = drbg_sec_strength(drbg->core->flags); + if (!drbg->seed_buf_len) { + ret = -EFAULT; + goto err; + } + /* + * Ensure we have sufficient buffer space for initial seed which + * consists of the seed from get_random_bytes and the Jitter RNG. + */ + drbg->seed_buf_len = ((drbg->seed_buf_len + 1) / 2) * 3; + drbg->seed_buf = kzalloc(drbg->seed_buf_len * 2, GFP_KERNEL); + if (!drbg->seed_buf) + goto err; - /* read-only data as they are defined as const, no lock needed */ - tmp->core = drbg->core; - tmp->d_ops = drbg->d_ops; + INIT_WORK(&drbg->seed_work, drbg_async_seed); - ret = drbg_alloc_state(tmp); - if (ret) - goto err; + drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + if(IS_ERR(drbg->jent)) + { + pr_info("DRBG: could not allocate Jitter RNG handle for seeding\n"); + /* + * As the Jitter RNG is a module that may not be present, we + * continue with the operation and do not fully tie the DRBG + * to the Jitter RNG. + */ + drbg->jent = NULL; + } - spin_lock_bh(&drbg->drbg_lock); - drbg_copy_drbg(drbg, tmp); - /* only make a link to the test buffer, as we only read that data */ - tmp->test_data = drbg->test_data; - spin_unlock_bh(&drbg->drbg_lock); - *shadow = tmp; return 0; err: - kzfree(tmp); + drbg_dealloc_state(drbg); return ret; } -static void drbg_restore_shadow(struct drbg_state *drbg, - struct drbg_state **shadow) -{ - struct drbg_state *tmp = *shadow; - - spin_lock_bh(&drbg->drbg_lock); - drbg_copy_drbg(tmp, drbg); - spin_unlock_bh(&drbg->drbg_lock); - drbg_dealloc_state(tmp); - kzfree(tmp); - *shadow = NULL; -} - /************************************************************************* * DRBG interface functions *************************************************************************/ @@ -1287,14 +1324,12 @@ static int drbg_generate(struct drbg_state *drbg, struct drbg_string *addtl) { int len = 0; - struct drbg_state *shadow = NULL; LIST_HEAD(addtllist); - struct drbg_string timestamp; - union { - cycles_t cycles; - unsigned char char_cycles[sizeof(cycles_t)]; - } now; + if (!drbg->core) { + pr_devel("DRBG: not yet seeded\n"); + return -EINVAL; + } if (0 == buflen || !buf) { pr_devel("DRBG: no output buffer provided\n"); return -EINVAL; @@ -1304,15 +1339,9 @@ static int drbg_generate(struct drbg_state *drbg, return -EINVAL; } - len = drbg_make_shadow(drbg, &shadow); - if (len) { - pr_devel("DRBG: shadow copy cannot be generated\n"); - return len; - } - /* 9.3.1 step 2 */ len = -EINVAL; - if (buflen > (drbg_max_request_bytes(shadow))) { + if (buflen > (drbg_max_request_bytes(drbg))) { pr_devel("DRBG: requested random numbers too large %u\n", buflen); goto err; @@ -1321,7 +1350,7 @@ static int drbg_generate(struct drbg_state *drbg, /* 9.3.1 step 3 is implicit with the chosen DRBG */ /* 9.3.1 step 4 */ - if (addtl && addtl->len > (drbg_max_addtl(shadow))) { + if (addtl && addtl->len > (drbg_max_addtl(drbg))) { pr_devel("DRBG: additional information string too long %zu\n", addtl->len); goto err; @@ -1332,46 +1361,29 @@ static int drbg_generate(struct drbg_state *drbg, * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented * here. The spec is a bit convoluted here, we make it simpler. */ - if ((drbg_max_requests(shadow)) < shadow->reseed_ctr) - shadow->seeded = false; - - /* allocate cipher handle */ - len = shadow->d_ops->crypto_init(shadow); - if (len) - goto err; + if ((drbg_max_requests(drbg)) < drbg->reseed_ctr) + drbg->seeded = false; - if (shadow->pr || !shadow->seeded) { + if (drbg->pr || !drbg->seeded) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", drbg->pr ? "true" : "false", drbg->seeded ? "seeded" : "unseeded"); /* 9.3.1 steps 7.1 through 7.3 */ - len = drbg_seed(shadow, addtl, true); + len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; } - /* - * Mix the time stamp into the DRBG state if the DRBG is not in - * test mode. If there are two callers invoking the DRBG at the same - * time, i.e. before the first caller merges its shadow state back, - * both callers would obtain the same random number stream without - * changing the state here. - */ - if (!drbg->test_data) { - now.cycles = random_get_entropy(); - drbg_string_fill(×tamp, now.char_cycles, sizeof(cycles_t)); - list_add_tail(×tamp.list, &addtllist); - } if (addtl && 0 < addtl->len) list_add_tail(&addtl->list, &addtllist); /* 9.3.1 step 8 and 10 */ - len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist); + len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist); /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ - shadow->reseed_ctr++; + drbg->reseed_ctr++; if (0 >= len) goto err; @@ -1391,7 +1403,7 @@ static int drbg_generate(struct drbg_state *drbg, * case somebody has a need to implement the test of 11.3.3. */ #if 0 - if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) { + if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) { int err = 0; pr_devel("DRBG: start to perform self test\n"); if (drbg->core->flags & DRBG_HMAC) @@ -1410,8 +1422,6 @@ static int drbg_generate(struct drbg_state *drbg, * are returned when reusing this DRBG cipher handle */ drbg_uninstantiate(drbg); - drbg_dealloc_state(shadow); - kzfree(shadow); return 0; } else { pr_devel("DRBG: self test successful\n"); @@ -1425,8 +1435,6 @@ static int drbg_generate(struct drbg_state *drbg, */ len = 0; err: - shadow->d_ops->crypto_fini(shadow); - drbg_restore_shadow(drbg, &shadow); return len; } @@ -1442,19 +1450,21 @@ static int drbg_generate_long(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct drbg_string *addtl) { - int len = 0; + unsigned int len = 0; unsigned int slice = 0; do { - int tmplen = 0; + int err = 0; unsigned int chunk = 0; slice = ((buflen - len) / drbg_max_request_bytes(drbg)); chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); - tmplen = drbg_generate(drbg, buf + len, chunk, addtl); - if (0 >= tmplen) - return tmplen; - len += tmplen; + mutex_lock(&drbg->drbg_mutex); + err = drbg_generate(drbg, buf + len, chunk, addtl); + mutex_unlock(&drbg->drbg_mutex); + if (0 > err) + return err; + len += chunk; } while (slice > 0 && (len < buflen)); - return len; + return 0; } /* @@ -1477,32 +1487,12 @@ static int drbg_generate_long(struct drbg_state *drbg, static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, int coreref, bool pr) { - int ret = -ENOMEM; + int ret; + bool reseed = true; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " "%s\n", coreref, pr ? "enabled" : "disabled"); - drbg->core = &drbg_cores[coreref]; - drbg->pr = pr; - drbg->seeded = false; - switch (drbg->core->flags & DRBG_TYPE_MASK) { -#ifdef CONFIG_CRYPTO_DRBG_HMAC - case DRBG_HMAC: - drbg->d_ops = &drbg_hmac_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_HMAC */ -#ifdef CONFIG_CRYPTO_DRBG_HASH - case DRBG_HASH: - drbg->d_ops = &drbg_hash_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_HASH */ -#ifdef CONFIG_CRYPTO_DRBG_CTR - case DRBG_CTR: - drbg->d_ops = &drbg_ctr_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_CTR */ - default: - return -EOPNOTSUPP; - } + mutex_lock(&drbg->drbg_mutex); /* 9.1 step 1 is implicit with the selected DRBG type */ @@ -1514,22 +1504,36 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, /* 9.1 step 4 is implicit in drbg_sec_strength */ - ret = drbg_alloc_state(drbg); - if (ret) - return ret; + if (!drbg->core) { + drbg->core = &drbg_cores[coreref]; + drbg->pr = pr; + drbg->seeded = false; - ret = -EFAULT; - if (drbg->d_ops->crypto_init(drbg)) - goto err; - ret = drbg_seed(drbg, pers, false); - drbg->d_ops->crypto_fini(drbg); - if (ret) + ret = drbg_alloc_state(drbg); + if (ret) + goto unlock; + + ret = -EFAULT; + if (drbg->d_ops->crypto_init(drbg)) + goto err; + + reseed = false; + } + + ret = drbg_seed(drbg, pers, reseed); + + if (ret && !reseed) { + drbg->d_ops->crypto_fini(drbg); goto err; + } - return 0; + mutex_unlock(&drbg->drbg_mutex); + return ret; err: drbg_dealloc_state(drbg); +unlock: + mutex_unlock(&drbg->drbg_mutex); return ret; } @@ -1544,10 +1548,11 @@ err: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - spin_lock_bh(&drbg->drbg_lock); + cancel_work_sync(&drbg->seed_work); + if (drbg->d_ops) + drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); /* no scrubbing of test_data -- this shall survive an uninstantiate */ - spin_unlock_bh(&drbg->drbg_lock); return 0; } @@ -1555,16 +1560,17 @@ static int drbg_uninstantiate(struct drbg_state *drbg) * Helper function for setting the test data in the DRBG * * @drbg DRBG state handle - * @test_data test data to sets + * @data test data + * @len test data length */ -static inline void drbg_set_testdata(struct drbg_state *drbg, - struct drbg_test_data *test_data) +static void drbg_kcapi_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) { - if (!test_data || !test_data->testentropy) - return; - spin_lock_bh(&drbg->drbg_lock); - drbg->test_data = test_data; - spin_unlock_bh(&drbg->drbg_lock); + struct drbg_state *drbg = crypto_rng_ctx(tfm); + + mutex_lock(&drbg->drbg_mutex); + drbg_string_fill(&drbg->test_data, data, len); + mutex_unlock(&drbg->drbg_mutex); } /*************************************************************** @@ -1714,15 +1720,10 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name, static int drbg_kcapi_init(struct crypto_tfm *tfm) { struct drbg_state *drbg = crypto_tfm_ctx(tfm); - bool pr = false; - int coreref = 0; - drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr); - /* - * when personalization string is needed, the caller must call reset - * and provide the personalization string as seed information - */ - return drbg_instantiate(drbg, NULL, coreref, pr); + mutex_init(&drbg->drbg_mutex); + + return 0; } static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) @@ -1734,65 +1735,49 @@ static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) * Generate random numbers invoked by the kernel crypto API: * The API of the kernel crypto API is extended as follows: * - * If dlen is larger than zero, rdata is interpreted as the output buffer - * where random data is to be stored. - * - * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen - * which holds the additional information string that is used for the - * DRBG generation process. The output buffer that is to be used to store - * data is also pointed to by struct drbg_gen. + * src is additional input supplied to the RNG. + * slen is the length of src. + * dst is the output buffer where random data is to be stored. + * dlen is the length of dst. */ -static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int drbg_kcapi_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); - if (0 < dlen) { - return drbg_generate_long(drbg, rdata, dlen, NULL); - } else { - struct drbg_gen *data = (struct drbg_gen *)rdata; - struct drbg_string addtl; - /* catch NULL pointer */ - if (!data) - return 0; - drbg_set_testdata(drbg, data->test_data); + struct drbg_string *addtl = NULL; + struct drbg_string string; + + if (slen) { /* linked list variable is now local to allow modification */ - drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len); - return drbg_generate_long(drbg, data->outbuf, data->outlen, - &addtl); + drbg_string_fill(&string, src, slen); + addtl = &string; } + + return drbg_generate_long(drbg, dst, dlen, addtl); } /* - * Reset the DRBG invoked by the kernel crypto API - * The reset implies a full re-initialization of the DRBG. Similar to the - * generate function of drbg_kcapi_random, this function extends the - * kernel crypto API interface with struct drbg_gen + * Seed the DRBG invoked by the kernel crypto API */ -static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int drbg_kcapi_seed(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); bool pr = false; - struct drbg_string seed_string; + struct drbg_string string; + struct drbg_string *seed_string = NULL; int coreref = 0; - drbg_uninstantiate(drbg); drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, &pr); if (0 < slen) { - drbg_string_fill(&seed_string, seed, slen); - return drbg_instantiate(drbg, &seed_string, coreref, pr); - } else { - struct drbg_gen *data = (struct drbg_gen *)seed; - /* allow invocation of API call with NULL, 0 */ - if (!data) - return drbg_instantiate(drbg, NULL, coreref, pr); - drbg_set_testdata(drbg, data->test_data); - /* linked list variable is now local to allow modification */ - drbg_string_fill(&seed_string, data->addtl->buf, - data->addtl->len); - return drbg_instantiate(drbg, &seed_string, coreref, pr); + drbg_string_fill(&string, seed, slen); + seed_string = &string; } + + return drbg_instantiate(drbg, seed_string, coreref, pr); } /*************************************************************** @@ -1811,7 +1796,6 @@ static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) */ static inline int __init drbg_healthcheck_sanity(void) { -#ifdef CONFIG_CRYPTO_FIPS int len = 0; #define OUTBUFLEN 16 unsigned char buf[OUTBUFLEN]; @@ -1839,6 +1823,8 @@ static inline int __init drbg_healthcheck_sanity(void) if (!drbg) return -ENOMEM; + mutex_init(&drbg->drbg_mutex); + /* * if the following tests fail, it is likely that there is a buffer * overflow as buf is much smaller than the requested or provided @@ -1877,37 +1863,33 @@ static inline int __init drbg_healthcheck_sanity(void) outbuf: kzfree(drbg); return rc; -#else /* CONFIG_CRYPTO_FIPS */ - return 0; -#endif /* CONFIG_CRYPTO_FIPS */ } -static struct crypto_alg drbg_algs[22]; +static struct rng_alg drbg_algs[22]; /* * Fill the array drbg_algs used to register the different DRBGs * with the kernel crypto API. To fill the array, the information * from drbg_cores[] is used. */ -static inline void __init drbg_fill_array(struct crypto_alg *alg, +static inline void __init drbg_fill_array(struct rng_alg *alg, const struct drbg_core *core, int pr) { int pos = 0; static int priority = 100; - memset(alg, 0, sizeof(struct crypto_alg)); - memcpy(alg->cra_name, "stdrng", 6); + memcpy(alg->base.cra_name, "stdrng", 6); if (pr) { - memcpy(alg->cra_driver_name, "drbg_pr_", 8); + memcpy(alg->base.cra_driver_name, "drbg_pr_", 8); pos = 8; } else { - memcpy(alg->cra_driver_name, "drbg_nopr_", 10); + memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10); pos = 10; } - memcpy(alg->cra_driver_name + pos, core->cra_name, + memcpy(alg->base.cra_driver_name + pos, core->cra_name, strlen(core->cra_name)); - alg->cra_priority = priority; + alg->base.cra_priority = priority; priority++; /* * If FIPS mode enabled, the selected DRBG shall have the @@ -1915,17 +1897,16 @@ static inline void __init drbg_fill_array(struct crypto_alg *alg, * it is selected. */ if (fips_enabled) - alg->cra_priority += 200; - - alg->cra_flags = CRYPTO_ALG_TYPE_RNG; - alg->cra_ctxsize = sizeof(struct drbg_state); - alg->cra_type = &crypto_rng_type; - alg->cra_module = THIS_MODULE; - alg->cra_init = drbg_kcapi_init; - alg->cra_exit = drbg_kcapi_cleanup; - alg->cra_u.rng.rng_make_random = drbg_kcapi_random; - alg->cra_u.rng.rng_reset = drbg_kcapi_reset; - alg->cra_u.rng.seedsize = 0; + alg->base.cra_priority += 200; + + alg->base.cra_ctxsize = sizeof(struct drbg_state); + alg->base.cra_module = THIS_MODULE; + alg->base.cra_init = drbg_kcapi_init; + alg->base.cra_exit = drbg_kcapi_cleanup; + alg->generate = drbg_kcapi_random; + alg->seed = drbg_kcapi_seed; + alg->set_ent = drbg_kcapi_set_entropy; + alg->seedsize = 0; } static int __init drbg_init(void) @@ -1958,12 +1939,12 @@ static int __init drbg_init(void) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); - return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); + return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } static void __exit drbg_exit(void) { - crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); + crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } module_init(drbg_init); diff --git a/crypto/echainiv.c b/crypto/echainiv.c new file mode 100644 index 000000000000..bd85dcc4fa3d --- /dev/null +++ b/crypto/echainiv.c @@ -0,0 +1,546 @@ +/* + * echainiv: Encrypted Chain IV Generator + * + * This generator generates an IV based on a sequence number by xoring it + * with a salt and then encrypting it with the same key as used to encrypt + * the plain text. This algorithm requires that the block size be equal + * to the IV size. It is mainly useful for CBC. + * + * This generator can only be used by algorithms where authentication + * is performed after encryption (i.e., authenc). + * + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <crypto/internal/aead.h> +#include <crypto/null.h> +#include <crypto/rng.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> +#include <linux/string.h> + +#define MAX_IV_SIZE 16 + +struct echainiv_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct scatterlist ivbuf[2]; + struct scatterlist *ivsg; + struct aead_givcrypt_request subreq; +}; + +struct echainiv_ctx { + struct crypto_aead *child; + spinlock_t lock; + struct crypto_blkcipher *null; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); + +static int echainiv_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) +{ + struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setkey(ctx->child, key, keylen); +} + +static int echainiv_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setauthsize(ctx->child, authsize); +} + +/* We don't care if we get preempted and read/write IVs from the next CPU. */ +static void echainiv_read_iv(u8 *dst, unsigned size) +{ + u32 *a = (u32 *)dst; + u32 __percpu *b = echainiv_iv; + + for (; size >= 4; size -= 4) { + *a++ = this_cpu_read(*b); + b++; + } +} + +static void echainiv_write_iv(const u8 *src, unsigned size) +{ + const u32 *a = (const u32 *)src; + u32 __percpu *b = echainiv_iv; + + for (; size >= 4; size -= 4) { + this_cpu_write(*b, *a); + a++; + b++; + } +} + +static void echainiv_encrypt_compat_complete2(struct aead_request *req, + int err) +{ + struct echainiv_request_ctx *rctx = aead_request_ctx(req); + struct aead_givcrypt_request *subreq = &rctx->subreq; + struct crypto_aead *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0, + crypto_aead_ivsize(geniv), 1); + +out: + kzfree(subreq->giv); +} + +static void echainiv_encrypt_compat_complete( + struct crypto_async_request *base, int err) +{ + struct aead_request *req = base->data; + + echainiv_encrypt_compat_complete2(req, err); + aead_request_complete(req, err); +} + +static void echainiv_encrypt_complete2(struct aead_request *req, int err) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *geniv; + unsigned int ivsize; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + ivsize = crypto_aead_ivsize(geniv); + + echainiv_write_iv(subreq->iv, ivsize); + + if (req->iv != subreq->iv) + memcpy(req->iv, subreq->iv, ivsize); + +out: + if (req->iv != subreq->iv) + kzfree(subreq->iv); +} + +static void echainiv_encrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + echainiv_encrypt_complete2(req, err); + aead_request_complete(req, err); +} + +static int echainiv_encrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct echainiv_request_ctx *rctx = aead_request_ctx(req); + struct aead_givcrypt_request *subreq = &rctx->subreq; + unsigned int ivsize = crypto_aead_ivsize(geniv); + crypto_completion_t compl; + void *data; + u8 *info; + __be64 seq; + int err; + + if (req->cryptlen < ivsize) + return -EINVAL; + + compl = req->base.complete; + data = req->base.data; + + rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen); + info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg); + + if (!info) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + compl = echainiv_encrypt_compat_complete; + data = req; + } + + memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq)); + + aead_givcrypt_set_tfm(subreq, ctx->child); + aead_givcrypt_set_callback(subreq, req->base.flags, + req->base.complete, req->base.data); + aead_givcrypt_set_crypt(subreq, + scatterwalk_ffwd(rctx->src, req->src, + req->assoclen + ivsize), + scatterwalk_ffwd(rctx->dst, rctx->ivsg, + ivsize), + req->cryptlen - ivsize, req->iv); + aead_givcrypt_set_assoc(subreq, req->src, req->assoclen); + aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq)); + + err = crypto_aead_givencrypt(subreq); + if (unlikely(PageHighMem(sg_page(rctx->ivsg)))) + echainiv_encrypt_compat_complete2(req, err); + return err; +} + +static int echainiv_encrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + u8 *info; + unsigned int ivsize = crypto_aead_ivsize(geniv); + int err; + + if (req->cryptlen < ivsize) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = echainiv_encrypt_complete; + data = req; + info = req->iv; + + if (req->src != req->dst) { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dst, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(src, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + memcpy(info, req->iv, ivsize); + } + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->dst, req->dst, + req->cryptlen - ivsize, info); + aead_request_set_ad(subreq, req->assoclen + ivsize); + + crypto_xor(info, ctx->salt, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); + echainiv_read_iv(info, ivsize); + + err = crypto_aead_encrypt(subreq); + echainiv_encrypt_complete2(req, err); + return err; +} + +static int echainiv_decrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct echainiv_request_ctx *rctx = aead_request_ctx(req); + struct aead_request *subreq = &rctx->subreq.areq; + crypto_completion_t compl; + void *data; + unsigned int ivsize = crypto_aead_ivsize(geniv); + + if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, + scatterwalk_ffwd(rctx->src, req->src, + req->assoclen + ivsize), + scatterwalk_ffwd(rctx->dst, req->dst, + req->assoclen + ivsize), + req->cryptlen - ivsize, req->iv); + aead_request_set_assoc(subreq, req->src, req->assoclen); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + + return crypto_aead_decrypt(subreq); +} + +static int echainiv_decrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize = crypto_aead_ivsize(geniv); + + if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen + ivsize); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + if (req->src != req->dst) + scatterwalk_map_and_copy(req->iv, req->dst, + req->assoclen, ivsize, 1); + + return crypto_aead_decrypt(subreq); +} + +static int echainiv_encrypt_compat_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != echainiv_encrypt_compat_first) + goto unlock; + + geniv->encrypt = echainiv_encrypt_compat; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return echainiv_encrypt_compat(req); +} + +static int echainiv_encrypt_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != echainiv_encrypt_first) + goto unlock; + + geniv->encrypt = echainiv_encrypt; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return echainiv_encrypt(req); +} + +static int echainiv_compat_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx)); + + err = aead_geniv_init(tfm); + + ctx->child = geniv->child; + geniv->child = geniv; + + return err; +} + +static int echainiv_init(struct crypto_tfm *tfm) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + + ctx->null = crypto_get_default_null_skcipher(); + err = PTR_ERR(ctx->null); + if (IS_ERR(ctx->null)) + goto out; + + err = aead_geniv_init(tfm); + if (err) + goto drop_null; + + ctx->child = geniv->child; + geniv->child = geniv; + +out: + return err; + +drop_null: + crypto_put_default_null_skcipher(); + goto out; +} + +static void echainiv_compat_exit(struct crypto_tfm *tfm) +{ + struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); +} + +static void echainiv_exit(struct crypto_tfm *tfm) +{ + struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); + crypto_put_default_null_skcipher(); +} + +static int echainiv_aead_create(struct crypto_template *tmpl, + struct rtattr **tb) +{ + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; + int err; + + inst = aead_geniv_alloc(tmpl, tb, 0, 0); + + if (IS_ERR(inst)) + return PTR_ERR(inst); + + err = -EINVAL; + if (inst->alg.ivsize < sizeof(u64) || + inst->alg.ivsize & (sizeof(u32) - 1) || + inst->alg.ivsize > MAX_IV_SIZE) + goto free_inst; + + spawn = aead_instance_ctx(inst); + alg = crypto_spawn_aead_alg(spawn); + + inst->alg.setkey = echainiv_setkey; + inst->alg.setauthsize = echainiv_setauthsize; + inst->alg.encrypt = echainiv_encrypt_first; + inst->alg.decrypt = echainiv_decrypt; + + inst->alg.base.cra_init = echainiv_init; + inst->alg.base.cra_exit = echainiv_exit; + + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); + inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; + + if (alg->base.cra_aead.encrypt) { + inst->alg.encrypt = echainiv_encrypt_compat_first; + inst->alg.decrypt = echainiv_decrypt_compat; + + inst->alg.base.cra_init = echainiv_compat_init; + inst->alg.base.cra_exit = echainiv_compat_exit; + } + + err = aead_register_instance(tmpl, inst); + if (err) + goto free_inst; + +out: + return err; + +free_inst: + aead_geniv_free(inst); + goto out; +} + +static int echainiv_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + int err; + + err = crypto_get_default_rng(); + if (err) + goto out; + + err = echainiv_aead_create(tmpl, tb); + if (err) + goto put_rng; + +out: + return err; + +put_rng: + crypto_put_default_rng(); + goto out; +} + +static void echainiv_free(struct crypto_instance *inst) +{ + aead_geniv_free(aead_instance(inst)); + crypto_put_default_rng(); +} + +static struct crypto_template echainiv_tmpl = { + .name = "echainiv", + .create = echainiv_create, + .free = echainiv_free, + .module = THIS_MODULE, +}; + +static int __init echainiv_module_init(void) +{ + return crypto_register_template(&echainiv_tmpl); +} + +static void __exit echainiv_module_exit(void) +{ + crypto_unregister_template(&echainiv_tmpl); +} + +module_init(echainiv_module_init); +module_exit(echainiv_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Encrypted Chain IV Generator"); +MODULE_ALIAS_CRYPTO("echainiv"); diff --git a/crypto/fips.c b/crypto/fips.c index 553970081c62..9d627c1cf8bc 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -10,7 +10,12 @@ * */ -#include "internal.h" +#include <linux/export.h> +#include <linux/fips.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sysctl.h> int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -25,3 +30,49 @@ static int fips_enable(char *str) } __setup("fips=", fips_enable); + +static struct ctl_table crypto_sysctl_table[] = { + { + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + {} +}; + +static struct ctl_table crypto_dir_table[] = { + { + .procname = "crypto", + .mode = 0555, + .child = crypto_sysctl_table + }, + {} +}; + +static struct ctl_table_header *crypto_sysctls; + +static void crypto_proc_fips_init(void) +{ + crypto_sysctls = register_sysctl_table(crypto_dir_table); +} + +static void crypto_proc_fips_exit(void) +{ + unregister_sysctl_table(crypto_sysctls); +} + +static int __init fips_init(void) +{ + crypto_proc_fips_init(); + return 0; +} + +static void __exit fips_exit(void) +{ + crypto_proc_fips_exit(); +} + +module_init(fips_init); +module_exit(fips_exit); diff --git a/crypto/gcm.c b/crypto/gcm.c index 2e403f6138c1..fc2b55eaf1ed 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -12,6 +12,7 @@ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> +#include <crypto/null.h> #include <crypto/scatterwalk.h> #include <crypto/hash.h> #include "internal.h" @@ -39,7 +40,6 @@ struct crypto_rfc4106_ctx { struct crypto_rfc4543_instance_ctx { struct crypto_aead_spawn aead; - struct crypto_skcipher_spawn null; }; struct crypto_rfc4543_ctx { @@ -672,12 +672,12 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) align = crypto_tfm_alg_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = align + - offsetof(struct crypto_gcm_req_priv_ctx, u) + + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + align + offsetof(struct crypto_gcm_req_priv_ctx, u) + max(sizeof(struct ablkcipher_request) + crypto_ablkcipher_reqsize(ctr), sizeof(struct ahash_request) + - crypto_ahash_reqsize(ghash)); + crypto_ahash_reqsize(ghash))); return 0; @@ -946,10 +946,10 @@ static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm) align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = sizeof(struct aead_request) + - ALIGN(crypto_aead_reqsize(aead), - crypto_tfm_ctx_alignment()) + - align + 16; + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_request) + + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + + align + 16); return 0; } @@ -1246,7 +1246,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) if (IS_ERR(aead)) return PTR_ERR(aead); - null = crypto_spawn_blkcipher(&ictx->null.base); + null = crypto_get_default_null_skcipher(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; @@ -1256,10 +1256,10 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm) align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); - tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) + - ALIGN(crypto_aead_reqsize(aead), - crypto_tfm_ctx_alignment()) + - align + 16; + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct crypto_rfc4543_req_ctx) + + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + + align + 16); return 0; @@ -1273,7 +1273,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm) struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_aead(ctx->child); - crypto_free_blkcipher(ctx->null); + crypto_put_default_null_skcipher(); } static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) @@ -1311,23 +1311,15 @@ static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) alg = crypto_aead_spawn_alg(spawn); - crypto_set_skcipher_spawn(&ctx->null, inst); - err = crypto_grab_skcipher(&ctx->null, "ecb(cipher_null)", 0, - CRYPTO_ALG_ASYNC); - if (err) - goto out_drop_alg; - - crypto_skcipher_spawn_alg(&ctx->null); - err = -EINVAL; /* We only support 16-byte blocks. */ if (alg->cra_aead.ivsize != 16) - goto out_drop_ecbnull; + goto out_drop_alg; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) - goto out_drop_ecbnull; + goto out_drop_alg; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, @@ -1335,7 +1327,7 @@ static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_drop_ecbnull; + goto out_drop_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; @@ -1362,8 +1354,6 @@ static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) out: return inst; -out_drop_ecbnull: - crypto_drop_skcipher(&ctx->null); out_drop_alg: crypto_drop_aead(spawn); out_free_inst: @@ -1377,7 +1367,6 @@ static void crypto_rfc4543_free(struct crypto_instance *inst) struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_aead(&ctx->aead); - crypto_drop_skcipher(&ctx->null); kfree(inst); } diff --git a/crypto/internal.h b/crypto/internal.h index bd39bfc92eab..00e42a3ed814 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -25,7 +25,6 @@ #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/slab.h> -#include <linux/fips.h> /* Crypto notification events. */ enum { @@ -103,6 +102,8 @@ int crypto_register_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb); int crypto_probing_notify(unsigned long val, void *v); +unsigned int crypto_alg_extsize(struct crypto_alg *alg); + static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { atomic_inc(&alg->cra_refcnt); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c new file mode 100644 index 000000000000..1ebe58a26619 --- /dev/null +++ b/crypto/jitterentropy.c @@ -0,0 +1,909 @@ +/* + * Non-physical true random number generator based on timing jitter. + * + * Copyright Stephan Mueller <smueller@chronox.de>, 2014 + * + * Design + * ====== + * + * See http://www.chronox.de/jent.html + * + * License + * ======= + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, in which case the provisions of the GPL2 are + * required INSTEAD OF the above restrictions. (This clause is + * necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +/* + * This Jitterentropy RNG is based on the jitterentropy library + * version 1.1.0 provided at http://www.chronox.de/jent.html + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/fips.h> +#include <linux/time.h> +#include <linux/crypto.h> +#include <crypto/internal/rng.h> + +#ifdef __OPTIMIZE__ + #error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c." +#endif + +/* The entropy pool */ +struct rand_data { + /* all data values that are vital to maintain the security + * of the RNG are marked as SENSITIVE. A user must not + * access that information while the RNG executes its loops to + * calculate the next random value. */ + __u64 data; /* SENSITIVE Actual random number */ + __u64 old_data; /* SENSITIVE Previous random number */ + __u64 prev_time; /* SENSITIVE Previous time stamp */ +#define DATA_SIZE_BITS ((sizeof(__u64)) * 8) + __u64 last_delta; /* SENSITIVE stuck test */ + __s64 last_delta2; /* SENSITIVE stuck test */ + unsigned int stuck:1; /* Time measurement stuck */ + unsigned int osr; /* Oversample rate */ + unsigned int stir:1; /* Post-processing stirring */ + unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */ +#define JENT_MEMORY_BLOCKS 64 +#define JENT_MEMORY_BLOCKSIZE 32 +#define JENT_MEMORY_ACCESSLOOPS 128 +#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE) + unsigned char *mem; /* Memory access location with size of + * memblocks * memblocksize */ + unsigned int memlocation; /* Pointer to byte in *mem */ + unsigned int memblocks; /* Number of memory blocks in *mem */ + unsigned int memblocksize; /* Size of one memory block in bytes */ + unsigned int memaccessloops; /* Number of memory accesses per random + * bit generation */ +}; + +/* Flags that can be used to initialize the RNG */ +#define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */ +#define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */ +#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more + * entropy, saves MEMORY_SIZE RAM for + * entropy collector */ + +#define DRIVER_NAME "jitterentropy" + +/* -- error codes for init function -- */ +#define JENT_ENOTIME 1 /* Timer service not available */ +#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ +#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ +#define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */ +#define JENT_EVARVAR 5 /* Timer does not produce variations of + * variations (2nd derivation of time is + * zero). */ +#define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi + * small. */ + +/*************************************************************************** + * Helper functions + ***************************************************************************/ + +static inline void jent_get_nstime(__u64 *out) +{ + struct timespec ts; + __u64 tmp = 0; + + tmp = random_get_entropy(); + + /* + * If random_get_entropy does not return a value (which is possible on, + * for example, MIPS), invoke __getnstimeofday + * hoping that there are timers we can work with. + * + * The list of available timers can be obtained from + * /sys/devices/system/clocksource/clocksource0/available_clocksource + * and are registered with clocksource_register() + */ + if ((0 == tmp) && +#ifndef MODULE + (0 == timekeeping_valid_for_hres()) && +#endif + (0 == __getnstimeofday(&ts))) { + tmp = ts.tv_sec; + tmp = tmp << 32; + tmp = tmp | ts.tv_nsec; + } + + *out = tmp; +} + + +/** + * Update of the loop count used for the next round of + * an entropy collection. + * + * Input: + * @ec entropy collector struct -- may be NULL + * @bits is the number of low bits of the timer to consider + * @min is the number of bits we shift the timer value to the right at + * the end to make sure we have a guaranteed minimum value + * + * @return Newly calculated loop counter + */ +static __u64 jent_loop_shuffle(struct rand_data *ec, + unsigned int bits, unsigned int min) +{ + __u64 time = 0; + __u64 shuffle = 0; + unsigned int i = 0; + unsigned int mask = (1<<bits) - 1; + + jent_get_nstime(&time); + /* + * mix the current state of the random number into the shuffle + * calculation to balance that shuffle a bit more + */ + if (ec) + time ^= ec->data; + /* + * we fold the time value as much as possible to ensure that as many + * bits of the time stamp are included as possible + */ + for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) { + shuffle ^= time & mask; + time = time >> bits; + } + + /* + * We add a lower boundary value to ensure we have a minimum + * RNG loop count. + */ + return (shuffle + (1<<min)); +} + +/*************************************************************************** + * Noise sources + ***************************************************************************/ + +/** + * CPU Jitter noise source -- this is the noise source based on the CPU + * execution time jitter + * + * This function folds the time into one bit units by iterating + * through the DATA_SIZE_BITS bit time value as follows: assume our time value + * is 0xabcd + * 1st loop, 1st shift generates 0xd000 + * 1st loop, 2nd shift generates 0x000d + * 2nd loop, 1st shift generates 0xcd00 + * 2nd loop, 2nd shift generates 0x000c + * 3rd loop, 1st shift generates 0xbcd0 + * 3rd loop, 2nd shift generates 0x000b + * 4th loop, 1st shift generates 0xabcd + * 4th loop, 2nd shift generates 0x000a + * Now, the values at the end of the 2nd shifts are XORed together. + * + * The code is deliberately inefficient and shall stay that way. This function + * is the root cause why the code shall be compiled without optimization. This + * function not only acts as folding operation, but this function's execution + * is used to measure the CPU execution time jitter. Any change to the loop in + * this function implies that careful retesting must be done. + * + * Input: + * @ec entropy collector struct -- may be NULL + * @time time stamp to be folded + * @loop_cnt if a value not equal to 0 is set, use the given value as number of + * loops to perform the folding + * + * Output: + * @folded result of folding operation + * + * @return Number of loops the folding operation is performed + */ +static __u64 jent_fold_time(struct rand_data *ec, __u64 time, + __u64 *folded, __u64 loop_cnt) +{ + unsigned int i; + __u64 j = 0; + __u64 new = 0; +#define MAX_FOLD_LOOP_BIT 4 +#define MIN_FOLD_LOOP_BIT 0 + __u64 fold_loop_cnt = + jent_loop_shuffle(ec, MAX_FOLD_LOOP_BIT, MIN_FOLD_LOOP_BIT); + + /* + * testing purposes -- allow test app to set the counter, not + * needed during runtime + */ + if (loop_cnt) + fold_loop_cnt = loop_cnt; + for (j = 0; j < fold_loop_cnt; j++) { + new = 0; + for (i = 1; (DATA_SIZE_BITS) >= i; i++) { + __u64 tmp = time << (DATA_SIZE_BITS - i); + + tmp = tmp >> (DATA_SIZE_BITS - 1); + new ^= tmp; + } + } + *folded = new; + return fold_loop_cnt; +} + +/** + * Memory Access noise source -- this is a noise source based on variations in + * memory access times + * + * This function performs memory accesses which will add to the timing + * variations due to an unknown amount of CPU wait states that need to be + * added when accessing memory. The memory size should be larger than the L1 + * caches as outlined in the documentation and the associated testing. + * + * The L1 cache has a very high bandwidth, albeit its access rate is usually + * slower than accessing CPU registers. Therefore, L1 accesses only add minimal + * variations as the CPU has hardly to wait. Starting with L2, significant + * variations are added because L2 typically does not belong to the CPU any more + * and therefore a wider range of CPU wait states is necessary for accesses. + * L3 and real memory accesses have even a wider range of wait states. However, + * to reliably access either L3 or memory, the ec->mem memory must be quite + * large which is usually not desirable. + * + * Input: + * @ec Reference to the entropy collector with the memory access data -- if + * the reference to the memory block to be accessed is NULL, this noise + * source is disabled + * @loop_cnt if a value not equal to 0 is set, use the given value as number of + * loops to perform the folding + * + * @return Number of memory access operations + */ +static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) +{ + unsigned char *tmpval = NULL; + unsigned int wrap = 0; + __u64 i = 0; +#define MAX_ACC_LOOP_BIT 7 +#define MIN_ACC_LOOP_BIT 0 + __u64 acc_loop_cnt = + jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); + + if (NULL == ec || NULL == ec->mem) + return 0; + wrap = ec->memblocksize * ec->memblocks; + + /* + * testing purposes -- allow test app to set the counter, not + * needed during runtime + */ + if (loop_cnt) + acc_loop_cnt = loop_cnt; + + for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { + tmpval = ec->mem + ec->memlocation; + /* + * memory access: just add 1 to one byte, + * wrap at 255 -- memory access implies read + * from and write to memory location + */ + *tmpval = (*tmpval + 1) & 0xff; + /* + * Addition of memblocksize - 1 to pointer + * with wrap around logic to ensure that every + * memory location is hit evenly + */ + ec->memlocation = ec->memlocation + ec->memblocksize - 1; + ec->memlocation = ec->memlocation % wrap; + } + return i; +} + +/*************************************************************************** + * Start of entropy processing logic + ***************************************************************************/ + +/** + * Stuck test by checking the: + * 1st derivation of the jitter measurement (time delta) + * 2nd derivation of the jitter measurement (delta of time deltas) + * 3rd derivation of the jitter measurement (delta of delta of time deltas) + * + * All values must always be non-zero. + * + * Input: + * @ec Reference to entropy collector + * @current_delta Jitter time delta + * + * @return + * 0 jitter measurement not stuck (good bit) + * 1 jitter measurement stuck (reject bit) + */ +static void jent_stuck(struct rand_data *ec, __u64 current_delta) +{ + __s64 delta2 = ec->last_delta - current_delta; + __s64 delta3 = delta2 - ec->last_delta2; + + ec->last_delta = current_delta; + ec->last_delta2 = delta2; + + if (!current_delta || !delta2 || !delta3) + ec->stuck = 1; +} + +/** + * This is the heart of the entropy generation: calculate time deltas and + * use the CPU jitter in the time deltas. The jitter is folded into one + * bit. You can call this function the "random bit generator" as it + * produces one random bit per invocation. + * + * WARNING: ensure that ->prev_time is primed before using the output + * of this function! This can be done by calling this function + * and not using its result. + * + * Input: + * @entropy_collector Reference to entropy collector + * + * @return One random bit + */ +static __u64 jent_measure_jitter(struct rand_data *ec) +{ + __u64 time = 0; + __u64 data = 0; + __u64 current_delta = 0; + + /* Invoke one noise source before time measurement to add variations */ + jent_memaccess(ec, 0); + + /* + * Get time stamp and calculate time delta to previous + * invocation to measure the timing variations + */ + jent_get_nstime(&time); + current_delta = time - ec->prev_time; + ec->prev_time = time; + + /* Now call the next noise sources which also folds the data */ + jent_fold_time(ec, current_delta, &data, 0); + + /* + * Check whether we have a stuck measurement. The enforcement + * is performed after the stuck value has been mixed into the + * entropy pool. + */ + jent_stuck(ec, current_delta); + + return data; +} + +/** + * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the + * documentation of that RNG, the bits from jent_measure_jitter are considered + * independent which implies that the Von Neuman unbias operation is applicable. + * A proof of the Von-Neumann unbias operation to remove skews is given in the + * document "A proposal for: Functionality classes for random number + * generators", version 2.0 by Werner Schindler, section 5.4.1. + * + * Input: + * @entropy_collector Reference to entropy collector + * + * @return One random bit + */ +static __u64 jent_unbiased_bit(struct rand_data *entropy_collector) +{ + do { + __u64 a = jent_measure_jitter(entropy_collector); + __u64 b = jent_measure_jitter(entropy_collector); + + if (a == b) + continue; + if (1 == a) + return 1; + else + return 0; + } while (1); +} + +/** + * Shuffle the pool a bit by mixing some value with a bijective function (XOR) + * into the pool. + * + * The function generates a mixer value that depends on the bits set and the + * location of the set bits in the random number generated by the entropy + * source. Therefore, based on the generated random number, this mixer value + * can have 2**64 different values. That mixer value is initialized with the + * first two SHA-1 constants. After obtaining the mixer value, it is XORed into + * the random number. + * + * The mixer value is not assumed to contain any entropy. But due to the XOR + * operation, it can also not destroy any entropy present in the entropy pool. + * + * Input: + * @entropy_collector Reference to entropy collector + */ +static void jent_stir_pool(struct rand_data *entropy_collector) +{ + /* + * to shut up GCC on 32 bit, we have to initialize the 64 variable + * with two 32 bit variables + */ + union c { + __u64 u64; + __u32 u32[2]; + }; + /* + * This constant is derived from the first two 32 bit initialization + * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 + */ + union c constant; + /* + * The start value of the mixer variable is derived from the third + * and fourth 32 bit initialization vector of SHA-1 as defined in + * FIPS 180-4 section 5.3.1 + */ + union c mixer; + unsigned int i = 0; + + /* + * Store the SHA-1 constants in reverse order to make up the 64 bit + * value -- this applies to a little endian system, on a big endian + * system, it reverses as expected. But this really does not matter + * as we do not rely on the specific numbers. We just pick the SHA-1 + * constants as they have a good mix of bit set and unset. + */ + constant.u32[1] = 0x67452301; + constant.u32[0] = 0xefcdab89; + mixer.u32[1] = 0x98badcfe; + mixer.u32[0] = 0x10325476; + + for (i = 0; i < DATA_SIZE_BITS; i++) { + /* + * get the i-th bit of the input random number and only XOR + * the constant into the mixer value when that bit is set + */ + if ((entropy_collector->data >> i) & 1) + mixer.u64 ^= constant.u64; + mixer.u64 = rol64(mixer.u64, 1); + } + entropy_collector->data ^= mixer.u64; +} + +/** + * Generator of one 64 bit random number + * Function fills rand_data->data + * + * Input: + * @ec Reference to entropy collector + */ +static void jent_gen_entropy(struct rand_data *ec) +{ + unsigned int k = 0; + + /* priming of the ->prev_time value */ + jent_measure_jitter(ec); + + while (1) { + __u64 data = 0; + + if (ec->disable_unbias == 1) + data = jent_measure_jitter(ec); + else + data = jent_unbiased_bit(ec); + + /* enforcement of the jent_stuck test */ + if (ec->stuck) { + /* + * We only mix in the bit considered not appropriate + * without the LSFR. The reason is that if we apply + * the LSFR and we do not rotate, the 2nd bit with LSFR + * will cancel out the first LSFR application on the + * bad bit. + * + * And we do not rotate as we apply the next bit to the + * current bit location again. + */ + ec->data ^= data; + ec->stuck = 0; + continue; + } + + /* + * Fibonacci LSFR with polynom of + * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is + * primitive according to + * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf + * (the shift values are the polynom values minus one + * due to counting bits from 0 to 63). As the current + * position is always the LSB, the polynom only needs + * to shift data in from the left without wrap. + */ + ec->data ^= data; + ec->data ^= ((ec->data >> 63) & 1); + ec->data ^= ((ec->data >> 60) & 1); + ec->data ^= ((ec->data >> 55) & 1); + ec->data ^= ((ec->data >> 30) & 1); + ec->data ^= ((ec->data >> 27) & 1); + ec->data ^= ((ec->data >> 22) & 1); + ec->data = rol64(ec->data, 1); + + /* + * We multiply the loop value with ->osr to obtain the + * oversampling rate requested by the caller + */ + if (++k >= (DATA_SIZE_BITS * ec->osr)) + break; + } + if (ec->stir) + jent_stir_pool(ec); +} + +/** + * The continuous test required by FIPS 140-2 -- the function automatically + * primes the test if needed. + * + * Return: + * 0 if FIPS test passed + * < 0 if FIPS test failed + */ +static void jent_fips_test(struct rand_data *ec) +{ + if (!fips_enabled) + return; + + /* prime the FIPS test */ + if (!ec->old_data) { + ec->old_data = ec->data; + jent_gen_entropy(ec); + } + + if (ec->data == ec->old_data) + panic(DRIVER_NAME ": Duplicate output detected\n"); + + ec->old_data = ec->data; +} + + +/** + * Entry function: Obtain entropy for the caller. + * + * This function invokes the entropy gathering logic as often to generate + * as many bytes as requested by the caller. The entropy gathering logic + * creates 64 bit per invocation. + * + * This function truncates the last 64 bit entropy value output to the exact + * size specified by the caller. + * + * Input: + * @ec Reference to entropy collector + * @data pointer to buffer for storing random data -- buffer must already + * exist + * @len size of the buffer, specifying also the requested number of random + * in bytes + * + * @return 0 when request is fulfilled or an error + * + * The following error codes can occur: + * -1 entropy_collector is NULL + */ +static ssize_t jent_read_entropy(struct rand_data *ec, u8 *data, size_t len) +{ + u8 *p = data; + + if (!ec) + return -EINVAL; + + while (0 < len) { + size_t tocopy; + + jent_gen_entropy(ec); + jent_fips_test(ec); + if ((DATA_SIZE_BITS / 8) < len) + tocopy = (DATA_SIZE_BITS / 8); + else + tocopy = len; + memcpy(p, &ec->data, tocopy); + + len -= tocopy; + p += tocopy; + } + + return 0; +} + +/*************************************************************************** + * Initialization logic + ***************************************************************************/ + +static struct rand_data *jent_entropy_collector_alloc(unsigned int osr, + unsigned int flags) +{ + struct rand_data *entropy_collector; + + entropy_collector = kzalloc(sizeof(struct rand_data), GFP_KERNEL); + if (!entropy_collector) + return NULL; + + if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) { + /* Allocate memory for adding variations based on memory + * access + */ + entropy_collector->mem = kzalloc(JENT_MEMORY_SIZE, GFP_KERNEL); + if (!entropy_collector->mem) { + kfree(entropy_collector); + return NULL; + } + entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE; + entropy_collector->memblocks = JENT_MEMORY_BLOCKS; + entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS; + } + + /* verify and set the oversampling rate */ + if (0 == osr) + osr = 1; /* minimum sampling rate is 1 */ + entropy_collector->osr = osr; + + entropy_collector->stir = 1; + if (flags & JENT_DISABLE_STIR) + entropy_collector->stir = 0; + if (flags & JENT_DISABLE_UNBIAS) + entropy_collector->disable_unbias = 1; + + /* fill the data pad with non-zero values */ + jent_gen_entropy(entropy_collector); + + return entropy_collector; +} + +static void jent_entropy_collector_free(struct rand_data *entropy_collector) +{ + if (entropy_collector->mem) + kzfree(entropy_collector->mem); + entropy_collector->mem = NULL; + if (entropy_collector) + kzfree(entropy_collector); + entropy_collector = NULL; +} + +static int jent_entropy_init(void) +{ + int i; + __u64 delta_sum = 0; + __u64 old_delta = 0; + int time_backwards = 0; + int count_var = 0; + int count_mod = 0; + + /* We could perform statistical tests here, but the problem is + * that we only have a few loop counts to do testing. These + * loop counts may show some slight skew and we produce + * false positives. + * + * Moreover, only old systems show potentially problematic + * jitter entropy that could potentially be caught here. But + * the RNG is intended for hardware that is available or widely + * used, but not old systems that are long out of favor. Thus, + * no statistical tests. + */ + + /* + * We could add a check for system capabilities such as clock_getres or + * check for CONFIG_X86_TSC, but it does not make much sense as the + * following sanity checks verify that we have a high-resolution + * timer. + */ + /* + * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is + * definitely too little. + */ +#define TESTLOOPCOUNT 300 +#define CLEARCACHE 100 + for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { + __u64 time = 0; + __u64 time2 = 0; + __u64 folded = 0; + __u64 delta = 0; + unsigned int lowdelta = 0; + + jent_get_nstime(&time); + jent_fold_time(NULL, time, &folded, 1<<MIN_FOLD_LOOP_BIT); + jent_get_nstime(&time2); + + /* test whether timer works */ + if (!time || !time2) + return JENT_ENOTIME; + delta = time2 - time; + /* + * test whether timer is fine grained enough to provide + * delta even when called shortly after each other -- this + * implies that we also have a high resolution timer + */ + if (!delta) + return JENT_ECOARSETIME; + + /* + * up to here we did not modify any variable that will be + * evaluated later, but we already performed some work. Thus we + * already have had an impact on the caches, branch prediction, + * etc. with the goal to clear it to get the worst case + * measurements. + */ + if (CLEARCACHE > i) + continue; + + /* test whether we have an increasing timer */ + if (!(time2 > time)) + time_backwards++; + + /* + * Avoid modulo of 64 bit integer to allow code to compile + * on 32 bit architectures. + */ + lowdelta = time2 - time; + if (!(lowdelta % 100)) + count_mod++; + + /* + * ensure that we have a varying delta timer which is necessary + * for the calculation of entropy -- perform this check + * only after the first loop is executed as we need to prime + * the old_data value + */ + if (i) { + if (delta != old_delta) + count_var++; + if (delta > old_delta) + delta_sum += (delta - old_delta); + else + delta_sum += (old_delta - delta); + } + old_delta = delta; + } + + /* + * we allow up to three times the time running backwards. + * CLOCK_REALTIME is affected by adjtime and NTP operations. Thus, + * if such an operation just happens to interfere with our test, it + * should not fail. The value of 3 should cover the NTP case being + * performed during our test run. + */ + if (3 < time_backwards) + return JENT_ENOMONOTONIC; + /* Error if the time variances are always identical */ + if (!delta_sum) + return JENT_EVARVAR; + + /* + * Variations of deltas of time must on average be larger + * than 1 to ensure the entropy estimation + * implied with 1 is preserved + */ + if (delta_sum <= 1) + return JENT_EMINVARVAR; + + /* + * Ensure that we have variations in the time stamp below 10 for at + * least 10% of all checks -- on some platforms, the counter + * increments in multiples of 100, but not always + */ + if ((TESTLOOPCOUNT/10 * 9) < count_mod) + return JENT_ECOARSETIME; + + return 0; +} + +/*************************************************************************** + * Kernel crypto API interface + ***************************************************************************/ + +struct jitterentropy { + spinlock_t jent_lock; + struct rand_data *entropy_collector; +}; + +static int jent_kcapi_init(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + int ret = 0; + + rng->entropy_collector = jent_entropy_collector_alloc(1, 0); + if (!rng->entropy_collector) + ret = -ENOMEM; + + spin_lock_init(&rng->jent_lock); + return ret; +} + +static void jent_kcapi_cleanup(struct crypto_tfm *tfm) +{ + struct jitterentropy *rng = crypto_tfm_ctx(tfm); + + spin_lock(&rng->jent_lock); + if (rng->entropy_collector) + jent_entropy_collector_free(rng->entropy_collector); + rng->entropy_collector = NULL; + spin_unlock(&rng->jent_lock); +} + +static int jent_kcapi_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) +{ + struct jitterentropy *rng = crypto_rng_ctx(tfm); + int ret = 0; + + spin_lock(&rng->jent_lock); + ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); + spin_unlock(&rng->jent_lock); + + return ret; +} + +static int jent_kcapi_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) +{ + return 0; +} + +static struct rng_alg jent_alg = { + .generate = jent_kcapi_random, + .seed = jent_kcapi_reset, + .seedsize = 0, + .base = { + .cra_name = "jitterentropy_rng", + .cra_driver_name = "jitterentropy_rng", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct jitterentropy), + .cra_module = THIS_MODULE, + .cra_init = jent_kcapi_init, + .cra_exit = jent_kcapi_cleanup, + + } +}; + +static int __init jent_mod_init(void) +{ + int ret = 0; + + ret = jent_entropy_init(); + if (ret) { + pr_info(DRIVER_NAME ": Initialization failed with host not compliant with requirements: %d\n", ret); + return -EFAULT; + } + return crypto_register_rng(&jent_alg); +} + +static void __exit jent_mod_exit(void) +{ + crypto_unregister_rng(&jent_alg); +} + +module_init(jent_mod_init); +module_exit(jent_mod_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); +MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter"); +MODULE_ALIAS_CRYPTO("jitterentropy_rng"); diff --git a/crypto/krng.c b/crypto/krng.c index 0224841b6579..40ed78e32fa5 100644 --- a/crypto/krng.c +++ b/crypto/krng.c @@ -16,31 +16,27 @@ #include <linux/module.h> #include <linux/random.h> -static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) +static int krng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { get_random_bytes(rdata, dlen); return 0; } -static int krng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int krng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { return 0; } -static struct crypto_alg krng_alg = { - .cra_name = "stdrng", - .cra_driver_name = "krng", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = 0, - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_u = { - .rng = { - .rng_make_random = krng_get_random, - .rng_reset = krng_reset, - .seedsize = 0, - } +static struct rng_alg krng_alg = { + .generate = krng_generate, + .seed = krng_seed, + .base = { + .cra_name = "stdrng", + .cra_driver_name = "krng", + .cra_priority = 200, + .cra_module = THIS_MODULE, } }; @@ -48,13 +44,12 @@ static struct crypto_alg krng_alg = { /* Module initalization */ static int __init krng_mod_init(void) { - return crypto_register_alg(&krng_alg); + return crypto_register_rng(&krng_alg); } static void __exit krng_mod_fini(void) { - crypto_unregister_alg(&krng_alg); - return; + crypto_unregister_rng(&krng_alg); } module_init(krng_mod_init); diff --git a/crypto/md5.c b/crypto/md5.c index 36f5e5b103f3..33d17e9a8702 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -51,10 +51,10 @@ static int md5_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); - mctx->hash[0] = 0x67452301; - mctx->hash[1] = 0xefcdab89; - mctx->hash[2] = 0x98badcfe; - mctx->hash[3] = 0x10325476; + mctx->hash[0] = MD5_H0; + mctx->hash[1] = MD5_H1; + mctx->hash[2] = MD5_H2; + mctx->hash[3] = MD5_H3; mctx->byte_count = 0; return 0; diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 7140fe70c7af..7a13b4088857 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -38,11 +38,6 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } -static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) -{ - return alg->cra_ctxsize; -} - static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) { return 0; @@ -77,7 +72,7 @@ static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) } static const struct crypto_type crypto_pcomp_type = { - .extsize = crypto_pcomp_extsize, + .extsize = crypto_alg_extsize, .init = crypto_pcomp_init, .init_tfm = crypto_pcomp_init_tfm, #ifdef CONFIG_PROC_FS diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index c305d4112735..ff174b61d820 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -20,6 +20,7 @@ #include <crypto/algapi.h> #include <crypto/internal/aead.h> +#include <linux/atomic.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> @@ -60,8 +61,8 @@ static struct padata_pcrypt pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { - struct crypto_spawn spawn; - unsigned int tfm_count; + struct crypto_aead_spawn spawn; + atomic_t tfm_count; }; struct pcrypt_aead_ctx { @@ -278,9 +279,8 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aead *cipher; - ictx->tfm_count++; - - cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask); + cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % + cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) @@ -292,9 +292,10 @@ static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) return PTR_ERR(cipher); ctx->child = cipher; - tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) - + sizeof(struct aead_givcrypt_request) - + crypto_aead_reqsize(cipher); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct pcrypt_request) + + sizeof(struct aead_givcrypt_request) + + crypto_aead_reqsize(cipher)); return 0; } @@ -306,57 +307,50 @@ static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) crypto_free_aead(ctx->child); } -static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) +static int pcrypt_init_instance(struct crypto_instance *inst, + struct crypto_alg *alg) { - struct crypto_instance *inst; - struct pcrypt_instance_ctx *ctx; - int err; - - inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); - if (!inst) { - inst = ERR_PTR(-ENOMEM); - goto out; - } - - err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) - goto out_free_inst; + return -ENAMETOOLONG; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - ctx = crypto_instance_ctx(inst); - err = crypto_init_spawn(&ctx->spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK); - if (err) - goto out_free_inst; - inst->alg.cra_priority = alg->cra_priority + 100; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; -out: - return inst; - -out_free_inst: - kfree(inst); - inst = ERR_PTR(err); - goto out; + return 0; } static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, u32 type, u32 mask) { + struct pcrypt_instance_ctx *ctx; struct crypto_instance *inst; struct crypto_alg *alg; + const char *name; + int err; + + name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(name)) + return ERR_CAST(name); + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + ctx = crypto_instance_ctx(inst); + crypto_set_aead_spawn(&ctx->spawn, inst); - alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); - if (IS_ERR(alg)) - return ERR_CAST(alg); + err = crypto_grab_aead(&ctx->spawn, name, 0, 0); + if (err) + goto out_free_inst; - inst = pcrypt_alloc_instance(alg); - if (IS_ERR(inst)) - goto out_put_alg; + alg = crypto_aead_spawn_alg(&ctx->spawn); + err = pcrypt_init_instance(inst, alg); + if (err) + goto out_drop_aead; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; inst->alg.cra_type = &crypto_aead_type; @@ -376,9 +370,15 @@ static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; -out_put_alg: - crypto_mod_put(alg); +out: return inst; + +out_drop_aead: + crypto_drop_aead(&ctx->spawn); +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; } static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) @@ -401,7 +401,7 @@ static void pcrypt_free(struct crypto_instance *inst) { struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); - crypto_drop_spawn(&ctx->spawn); + crypto_drop_aead(&ctx->spawn); kfree(inst); } diff --git a/crypto/proc.c b/crypto/proc.c index 4ffe73b51612..2cc10c96d753 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -20,47 +20,8 @@ #include <linux/rwsem.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/sysctl.h> #include "internal.h" -#ifdef CONFIG_CRYPTO_FIPS -static struct ctl_table crypto_sysctl_table[] = { - { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec - }, - {} -}; - -static struct ctl_table crypto_dir_table[] = { - { - .procname = "crypto", - .mode = 0555, - .child = crypto_sysctl_table - }, - {} -}; - -static struct ctl_table_header *crypto_sysctls; - -static void crypto_proc_fips_init(void) -{ - crypto_sysctls = register_sysctl_table(crypto_dir_table); -} - -static void crypto_proc_fips_exit(void) -{ - if (crypto_sysctls) - unregister_sysctl_table(crypto_sysctls); -} -#else -#define crypto_proc_fips_init() -#define crypto_proc_fips_exit() -#endif - static void *c_start(struct seq_file *m, loff_t *pos) { down_read(&crypto_alg_sem); @@ -148,11 +109,9 @@ static const struct file_operations proc_crypto_ops = { void __init crypto_init_proc(void) { proc_create("crypto", 0, NULL, &proc_crypto_ops); - crypto_proc_fips_init(); } void __exit crypto_exit_proc(void) { - crypto_proc_fips_exit(); remove_proc_entry("crypto", NULL); } diff --git a/crypto/rng.c b/crypto/rng.c index e0a25c2456de..13155058b193 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -4,6 +4,7 @@ * RNG operations. * * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com> + * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -24,12 +25,19 @@ #include <linux/cryptouser.h> #include <net/netlink.h> +#include "internal.h" + static DEFINE_MUTEX(crypto_default_rng_lock); struct crypto_rng *crypto_default_rng; EXPORT_SYMBOL_GPL(crypto_default_rng); static int crypto_default_rng_refcnt; -static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_rng, base); +} + +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { u8 *buf = NULL; int err; @@ -43,21 +51,23 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) seed = buf; } - err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen); + err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - kfree(buf); + kzfree(buf); return err; } +EXPORT_SYMBOL_GPL(crypto_rng_reset); -static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { - struct rng_alg *alg = &tfm->__crt_alg->cra_rng; - struct rng_tfm *ops = &tfm->crt_rng; + return 0; +} - ops->rng_gen_random = alg->rng_make_random; - ops->rng_reset = rngapi_reset; +static unsigned int seedsize(struct crypto_alg *alg) +{ + struct rng_alg *ralg = container_of(alg, struct rng_alg, base); - return 0; + return ralg->seedsize; } #ifdef CONFIG_NET @@ -67,7 +77,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rrng.type, "rng", sizeof(rrng.type)); - rrng.seedsize = alg->cra_rng.seedsize; + rrng.seedsize = seedsize(alg); if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(struct crypto_report_rng), &rrng)) @@ -89,24 +99,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : rng\n"); - seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize); -} - -static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) -{ - return alg->cra_ctxsize; + seq_printf(m, "seedsize : %u\n", seedsize(alg)); } -const struct crypto_type crypto_rng_type = { - .ctxsize = crypto_rng_ctxsize, - .init = crypto_init_rng_ops, +static const struct crypto_type crypto_rng_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_rng_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_rng_show, #endif .report = crypto_rng_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_RNG, + .tfmsize = offsetof(struct crypto_rng, base), }; -EXPORT_SYMBOL_GPL(crypto_rng_type); + +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_rng); int crypto_get_default_rng(void) { @@ -150,5 +163,55 @@ void crypto_put_default_rng(void) } EXPORT_SYMBOL_GPL(crypto_put_default_rng); +int crypto_register_rng(struct rng_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->seedsize > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_rng_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_RNG; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_rng); + +void crypto_unregister_rng(struct rng_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_rng); + +int crypto_register_rngs(struct rng_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_rng(algs + i); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_rng(algs + i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_rngs); + +void crypto_unregister_rngs(struct rng_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_rng(algs + i); +} +EXPORT_SYMBOL_GPL(crypto_unregister_rngs); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Random Number Generator"); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 3bd749c7bb70..8690324d3aef 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -104,22 +104,18 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out) { struct scatter_walk walk; - unsigned int offset = 0; + struct scatterlist tmp[2]; if (!nbytes) return; - for (;;) { - scatterwalk_start(&walk, sg); - - if (start < offset + sg->length) - break; + sg = scatterwalk_ffwd(tmp, sg, start); - offset += sg->length; - sg = sg_next(sg); - } + if (sg_page(sg) == virt_to_page(buf) && + sg->offset == offset_in_page(buf)) + return; - scatterwalk_advance(&walk, start - offset); + scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); } @@ -146,3 +142,25 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) return n; } EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen); + +struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], + struct scatterlist *src, + unsigned int len) +{ + for (;;) { + if (!len) + return src; + + if (src->length > len) + break; + + len -= src->length; + src = sg_next(src); + } + + sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); + scatterwalk_crypto_chain(dst, sg_next(src), 0, 2); + + return dst; +} +EXPORT_SYMBOL_GPL(scatterwalk_ffwd); diff --git a/crypto/seqiv.c b/crypto/seqiv.c index b7bb9a2f4a31..127970a69ecf 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -15,7 +15,9 @@ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> +#include <crypto/null.h> #include <crypto/rng.h> +#include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -24,11 +26,41 @@ #include <linux/spinlock.h> #include <linux/string.h> +struct seqniv_request_ctx { + struct scatterlist dst[2]; + struct aead_request subreq; +}; + struct seqiv_ctx { spinlock_t lock; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; +struct seqiv_aead_ctx { + struct crypto_aead *child; + spinlock_t lock; + struct crypto_blkcipher *null; + u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); +}; + +static void seqiv_free(struct crypto_instance *inst); + +static int seqiv_aead_setkey(struct crypto_aead *tfm, + const u8 *key, unsigned int keylen) +{ + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setkey(ctx->child, key, keylen); +} + +static int seqiv_aead_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm); + + return crypto_aead_setauthsize(ctx->child, authsize); +} + static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) { struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); @@ -81,6 +113,77 @@ static void seqiv_aead_complete(struct crypto_async_request *base, int err) aead_givcrypt_complete(req, err); } +static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) +{ + struct aead_request *subreq = aead_request_ctx(req); + struct crypto_aead *geniv; + + if (err == -EINPROGRESS) + return; + + if (err) + goto out; + + geniv = crypto_aead_reqtfm(req); + memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); + +out: + kzfree(subreq->iv); +} + +static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + seqiv_aead_encrypt_complete2(req, err); + aead_request_complete(req, err); +} + +static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err) +{ + unsigned int ivsize = 8; + u8 data[20]; + + if (err == -EINPROGRESS) + return; + + /* Swap IV and ESP header back to correct order. */ + scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0); + scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1); + scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1); +} + +static void seqniv_aead_encrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + seqniv_aead_encrypt_complete2(req, err); + aead_request_complete(req, err); +} + +static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err) +{ + u8 data[4]; + + if (err == -EINPROGRESS) + return; + + /* Move ESP header back to correct location. */ + scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0); + scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1); +} + +static void seqniv_aead_decrypt_complete(struct crypto_async_request *base, + int err) +{ + struct aead_request *req = base->data; + + seqniv_aead_decrypt_complete2(req, err); + aead_request_complete(req, err); +} + static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, unsigned int ivsize) { @@ -186,6 +289,228 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) return err; } +static int seqiv_aead_encrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct seqniv_request_ctx *rctx = aead_request_ctx(req); + struct aead_request *subreq = &rctx->subreq; + struct scatterlist *dst; + crypto_completion_t compl; + void *data; + unsigned int ivsize = 8; + u8 buf[20] __attribute__ ((aligned(__alignof__(u32)))); + int err; + + if (req->cryptlen < ivsize) + return -EINVAL; + + /* ESP AD is at most 12 bytes (ESN). */ + if (req->assoclen > 12) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = seqniv_aead_encrypt_complete; + data = req; + + if (req->src != req->dst) { + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dstbuf, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(srcbuf, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, dst, dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + memcpy(buf, req->iv, ivsize); + crypto_xor(buf, ctx->salt, ivsize); + memcpy(req->iv, buf, ivsize); + + /* Swap order of IV and ESP AD for ICV generation. */ + scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0); + scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1); + + err = crypto_aead_encrypt(subreq); + seqniv_aead_encrypt_complete2(req, err); + return err; +} + +static int seqiv_aead_encrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + u8 *info; + unsigned int ivsize = 8; + int err; + + if (req->cryptlen < ivsize) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + info = req->iv; + + if (req->src != req->dst) { + struct scatterlist src[2]; + struct scatterlist dst[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dst, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(src, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + if (unlikely(!IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1))) { + info = kmalloc(ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: + GFP_ATOMIC); + if (!info) + return -ENOMEM; + + memcpy(info, req->iv, ivsize); + compl = seqiv_aead_encrypt_complete; + data = req; + } + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->dst, req->dst, + req->cryptlen - ivsize, info); + aead_request_set_ad(subreq, req->assoclen + ivsize); + + crypto_xor(info, ctx->salt, ivsize); + scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); + + err = crypto_aead_encrypt(subreq); + if (unlikely(info != req->iv)) + seqiv_aead_encrypt_complete2(req, err); + return err; +} + +static int seqiv_aead_decrypt_compat(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct seqniv_request_ctx *rctx = aead_request_ctx(req); + struct aead_request *subreq = &rctx->subreq; + struct scatterlist *dst; + crypto_completion_t compl; + void *data; + unsigned int ivsize = 8; + u8 buf[20]; + int err; + + if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + if (req->assoclen > 12) + return -EINVAL; + else if (req->assoclen > 8) { + compl = seqniv_aead_decrypt_complete; + data = req; + } + + if (req->src != req->dst) { + struct scatterlist srcbuf[2]; + struct scatterlist dstbuf[2]; + struct blkcipher_desc desc = { + .tfm = ctx->null, + }; + + err = crypto_blkcipher_encrypt( + &desc, + scatterwalk_ffwd(dstbuf, req->dst, + req->assoclen + ivsize), + scatterwalk_ffwd(srcbuf, req->src, + req->assoclen + ivsize), + req->cryptlen - ivsize); + if (err) + return err; + } + + /* Move ESP AD forward for ICV generation. */ + scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0); + memcpy(req->iv, buf + req->assoclen, ivsize); + scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1); + + dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize); + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, dst, dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen); + + err = crypto_aead_decrypt(subreq); + if (req->assoclen > 8) + seqniv_aead_decrypt_complete2(req, err); + return err; +} + +static int seqiv_aead_decrypt(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + struct aead_request *subreq = aead_request_ctx(req); + crypto_completion_t compl; + void *data; + unsigned int ivsize = 8; + + if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) + return -EINVAL; + + aead_request_set_tfm(subreq, ctx->child); + + compl = req->base.complete; + data = req->base.data; + + aead_request_set_callback(subreq, req->base.flags, compl, data); + aead_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen - ivsize, req->iv); + aead_request_set_ad(subreq, req->assoclen + ivsize); + + scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); + if (req->src != req->dst) + scatterwalk_map_and_copy(req->iv, req->dst, + req->assoclen, ivsize, 1); + + return crypto_aead_decrypt(subreq); +} + static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); @@ -232,6 +557,52 @@ unlock: return seqiv_aead_givencrypt(req); } +static int seqiv_aead_encrypt_compat_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != seqiv_aead_encrypt_compat_first) + goto unlock; + + geniv->encrypt = seqiv_aead_encrypt_compat; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return seqiv_aead_encrypt_compat(req); +} + +static int seqiv_aead_encrypt_first(struct aead_request *req) +{ + struct crypto_aead *geniv = crypto_aead_reqtfm(req); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err = 0; + + spin_lock_bh(&ctx->lock); + if (geniv->encrypt != seqiv_aead_encrypt_first) + goto unlock; + + geniv->encrypt = seqiv_aead_encrypt; + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + +unlock: + spin_unlock_bh(&ctx->lock); + + if (err) + return err; + + return seqiv_aead_encrypt(req); +} + static int seqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); @@ -244,34 +615,81 @@ static int seqiv_init(struct crypto_tfm *tfm) return skcipher_geniv_init(tfm); } -static int seqiv_aead_init(struct crypto_tfm *tfm) +static int seqiv_old_aead_init(struct crypto_tfm *tfm) { struct crypto_aead *geniv = __crypto_aead_cast(tfm); struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); spin_lock_init(&ctx->lock); - tfm->crt_aead.reqsize = sizeof(struct aead_request); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_request)); return aead_geniv_init(tfm); } -static struct crypto_template seqiv_tmpl; +static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize) +{ + struct crypto_aead *geniv = __crypto_aead_cast(tfm); + struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); + int err; + + spin_lock_init(&ctx->lock); + + crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + + ctx->null = crypto_get_default_null_skcipher(); + err = PTR_ERR(ctx->null); + if (IS_ERR(ctx->null)) + goto out; + + err = aead_geniv_init(tfm); + if (err) + goto drop_null; + + ctx->child = geniv->child; + geniv->child = geniv; + +out: + return err; -static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) +drop_null: + crypto_put_default_null_skcipher(); + goto out; +} + +static int seqiv_aead_init(struct crypto_tfm *tfm) +{ + return seqiv_aead_init_common(tfm, sizeof(struct aead_request)); +} + +static int seqniv_aead_init(struct crypto_tfm *tfm) +{ + return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx)); +} + +static void seqiv_aead_exit(struct crypto_tfm *tfm) +{ + struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_aead(ctx->child); + crypto_put_default_null_skcipher(); +} + +static int seqiv_ablkcipher_create(struct crypto_template *tmpl, + struct rtattr **tb) { struct crypto_instance *inst; + int err; - inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); + inst = skcipher_geniv_alloc(tmpl, tb, 0, 0); if (IS_ERR(inst)) - goto out; + return PTR_ERR(inst); - if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) { - skcipher_geniv_free(inst); - inst = ERR_PTR(-EINVAL); - goto out; - } + err = -EINVAL; + if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) + goto free_inst; inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; @@ -279,65 +697,174 @@ static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) inst->alg.cra_exit = skcipher_geniv_exit; inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; + inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); + + inst->alg.cra_alignmask |= __alignof__(u32) - 1; + + err = crypto_register_instance(tmpl, inst); + if (err) + goto free_inst; out: - return inst; + return err; + +free_inst: + skcipher_geniv_free(inst); + goto out; } -static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) +static int seqiv_old_aead_create(struct crypto_template *tmpl, + struct aead_instance *aead) { - struct crypto_instance *inst; - - inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); - - if (IS_ERR(inst)) - goto out; + struct crypto_instance *inst = aead_crypto_instance(aead); + int err = -EINVAL; - if (inst->alg.cra_aead.ivsize < sizeof(u64)) { - aead_geniv_free(inst); - inst = ERR_PTR(-EINVAL); - goto out; - } + if (inst->alg.cra_aead.ivsize < sizeof(u64)) + goto free_inst; inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; - inst->alg.cra_init = seqiv_aead_init; + inst->alg.cra_init = seqiv_old_aead_init; inst->alg.cra_exit = aead_geniv_exit; inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; + inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); + + err = crypto_register_instance(tmpl, inst); + if (err) + goto free_inst; out: - return inst; + return err; + +free_inst: + aead_geniv_free(aead); + goto out; } -static struct crypto_instance *seqiv_alloc(struct rtattr **tb) +static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; + int err; + + inst = aead_geniv_alloc(tmpl, tb, 0, 0); + + if (IS_ERR(inst)) + return PTR_ERR(inst); + + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + + if (inst->alg.base.cra_aead.encrypt) + return seqiv_old_aead_create(tmpl, inst); + + err = -EINVAL; + if (inst->alg.ivsize != sizeof(u64)) + goto free_inst; + + spawn = aead_instance_ctx(inst); + alg = crypto_spawn_aead_alg(spawn); + + inst->alg.setkey = seqiv_aead_setkey; + inst->alg.setauthsize = seqiv_aead_setauthsize; + inst->alg.encrypt = seqiv_aead_encrypt_first; + inst->alg.decrypt = seqiv_aead_decrypt; + + inst->alg.base.cra_init = seqiv_aead_init; + inst->alg.base.cra_exit = seqiv_aead_exit; + + inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); + inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; + + if (alg->base.cra_aead.encrypt) { + inst->alg.encrypt = seqiv_aead_encrypt_compat_first; + inst->alg.decrypt = seqiv_aead_decrypt_compat; + + inst->alg.base.cra_init = seqniv_aead_init; + inst->alg.base.cra_exit = seqiv_aead_exit; + } + + err = aead_register_instance(tmpl, inst); + if (err) + goto free_inst; + +out: + return err; + +free_inst: + aead_geniv_free(inst); + goto out; +} + +static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; - struct crypto_instance *inst; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) - return ERR_CAST(algt); + return PTR_ERR(algt); err = crypto_get_default_rng(); if (err) - return ERR_PTR(err); + return err; if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) - inst = seqiv_ablkcipher_alloc(tb); + err = seqiv_ablkcipher_create(tmpl, tb); else - inst = seqiv_aead_alloc(tb); + err = seqiv_aead_create(tmpl, tb); + + if (err) + crypto_put_default_rng(); + return err; +} + +static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct aead_instance *inst; + struct crypto_aead_spawn *spawn; + struct aead_alg *alg; + int err; + + err = crypto_get_default_rng(); + if (err) + return err; + + inst = aead_geniv_alloc(tmpl, tb, 0, 0); + err = PTR_ERR(inst); if (IS_ERR(inst)) goto put_rng; - inst->alg.cra_alignmask |= __alignof__(u32) - 1; - inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); + err = -EINVAL; + if (inst->alg.ivsize != sizeof(u64)) + goto free_inst; + + spawn = aead_instance_ctx(inst); + alg = crypto_spawn_aead_alg(spawn); + + inst->alg.setkey = seqiv_aead_setkey; + inst->alg.setauthsize = seqiv_aead_setauthsize; + inst->alg.encrypt = seqiv_aead_encrypt_compat_first; + inst->alg.decrypt = seqiv_aead_decrypt_compat; + + inst->alg.base.cra_init = seqniv_aead_init; + inst->alg.base.cra_exit = seqiv_aead_exit; + + inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); + inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; + + err = aead_register_instance(tmpl, inst); + if (err) + goto free_inst; out: - return inst; + return err; +free_inst: + aead_geniv_free(inst); put_rng: crypto_put_default_rng(); goto out; @@ -348,20 +875,42 @@ static void seqiv_free(struct crypto_instance *inst) if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) skcipher_geniv_free(inst); else - aead_geniv_free(inst); + aead_geniv_free(aead_instance(inst)); crypto_put_default_rng(); } static struct crypto_template seqiv_tmpl = { .name = "seqiv", - .alloc = seqiv_alloc, + .create = seqiv_create, + .free = seqiv_free, + .module = THIS_MODULE, +}; + +static struct crypto_template seqniv_tmpl = { + .name = "seqniv", + .create = seqniv_create, .free = seqiv_free, .module = THIS_MODULE, }; static int __init seqiv_module_init(void) { - return crypto_register_template(&seqiv_tmpl); + int err; + + err = crypto_register_template(&seqiv_tmpl); + if (err) + goto out; + + err = crypto_register_template(&seqniv_tmpl); + if (err) + goto out_undo_niv; + +out: + return err; + +out_undo_niv: + crypto_unregister_template(&seqiv_tmpl); + goto out; } static void __exit seqiv_module_exit(void) @@ -375,3 +924,4 @@ module_exit(seqiv_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Sequence Number IV Generator"); MODULE_ALIAS_CRYPTO("seqiv"); +MODULE_ALIAS_CRYPTO("seqniv"); diff --git a/crypto/shash.c b/crypto/shash.c index 47c713954bf3..ecb1e3d39bf0 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -520,11 +520,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) return 0; } -static unsigned int crypto_shash_extsize(struct crypto_alg *alg) -{ - return alg->cra_ctxsize; -} - #ifdef CONFIG_NET static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) { @@ -564,7 +559,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) static const struct crypto_type crypto_shash_type = { .ctxsize = crypto_shash_ctxsize, - .extsize = crypto_shash_extsize, + .extsize = crypto_alg_extsize, .init = crypto_init_shash_ops, .init_tfm = crypto_shash_init_tfm, #ifdef CONFIG_PROC_FS diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1a2800107fc8..2bff6130d806 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -22,8 +22,10 @@ * */ +#include <crypto/aead.h> #include <crypto/hash.h> #include <linux/err.h> +#include <linux/fips.h> #include <linux/init.h> #include <linux/gfp.h> #include <linux/module.h> @@ -34,7 +36,6 @@ #include <linux/timex.h> #include <linux/interrupt.h> #include "tcrypt.h" -#include "internal.h" /* * Need slab memory for testing (size in number of pages). @@ -808,7 +809,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen, for (start = jiffies, end = start + secs * HZ, bcount = 0; time_before(jiffies, end); bcount++) { - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) return ret; for (pcount = 0; pcount < blen; pcount += plen) { @@ -877,7 +878,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, /* Warm-up run. */ for (i = 0; i < 4; i++) { - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { @@ -896,7 +897,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, start = get_cycles(); - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f9bce3d7ee7f..277b3ac0ca1a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -20,8 +20,10 @@ * */ +#include <crypto/aead.h> #include <crypto/hash.h> #include <linux/err.h> +#include <linux/fips.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> @@ -2318,6 +2320,15 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "compress_null", .test = alg_test_null, }, { + .alg = "crc32", + .test = alg_test_hash, + .suite = { + .hash = { + .vecs = crc32_tv_template, + .count = CRC32_TEST_VECTORS + } + } + }, { .alg = "crc32c", .test = alg_test_crc32c, .fips_allowed = 1, @@ -3095,6 +3106,10 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = "jitterentropy_rng", + .fips_allowed = 1, + .test = alg_test_null, + }, { .alg = "lrw(aes)", .test = alg_test_skcipher, .suite = { diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 62e2485bb428..60031439f8d3 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -54,7 +54,7 @@ struct cipher_testvec { unsigned short tap[MAX_TAP]; int np; unsigned char also_non_np; - unsigned char fail; + bool fail; unsigned char wk; /* weak key flag */ unsigned char klen; unsigned short ilen; @@ -71,7 +71,7 @@ struct aead_testvec { unsigned char atap[MAX_TAP]; int np; int anp; - unsigned char fail; + bool fail; unsigned char novrfy; /* ccm dec verification failure expected */ unsigned char wk; /* weak key flag */ unsigned char klen; @@ -1822,7 +1822,7 @@ static struct hash_testvec tgr128_tv_template[] = { }, }; -#define GHASH_TEST_VECTORS 5 +#define GHASH_TEST_VECTORS 6 static struct hash_testvec ghash_tv_template[] = { @@ -1875,6 +1875,63 @@ static struct hash_testvec ghash_tv_template[] = .psize = 20, .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28" "\x23\xf7\x93\xf7\x19\xf5\x96\xd9", + }, { + .key = "\x0a\x1b\x2c\x3d\x4e\x5f\x64\x71" + "\x82\x93\xa4\xb5\xc6\xd7\xe8\xf9", + .ksize = 16, + .plaintext = "\x56\x6f\x72\x20\x6c\x61\x75\x74" + "\x65\x72\x20\x4c\x61\x75\x73\x63" + "\x68\x65\x6e\x20\x75\x6e\x64\x20" + "\x53\x74\x61\x75\x6e\x65\x6e\x20" + "\x73\x65\x69\x20\x73\x74\x69\x6c" + "\x6c\x2c\x0a\x64\x75\x20\x6d\x65" + "\x69\x6e\x20\x74\x69\x65\x66\x74" + "\x69\x65\x66\x65\x73\x20\x4c\x65" + "\x62\x65\x6e\x3b\x0a\x64\x61\x73" + "\x73\x20\x64\x75\x20\x77\x65\x69" + "\xc3\x9f\x74\x20\x77\x61\x73\x20" + "\x64\x65\x72\x20\x57\x69\x6e\x64" + "\x20\x64\x69\x72\x20\x77\x69\x6c" + "\x6c\x2c\x0a\x65\x68\x20\x6e\x6f" + "\x63\x68\x20\x64\x69\x65\x20\x42" + "\x69\x72\x6b\x65\x6e\x20\x62\x65" + "\x62\x65\x6e\x2e\x0a\x0a\x55\x6e" + "\x64\x20\x77\x65\x6e\x6e\x20\x64" + "\x69\x72\x20\x65\x69\x6e\x6d\x61" + "\x6c\x20\x64\x61\x73\x20\x53\x63" + "\x68\x77\x65\x69\x67\x65\x6e\x20" + "\x73\x70\x72\x61\x63\x68\x2c\x0a" + "\x6c\x61\x73\x73\x20\x64\x65\x69" + "\x6e\x65\x20\x53\x69\x6e\x6e\x65" + "\x20\x62\x65\x73\x69\x65\x67\x65" + "\x6e\x2e\x0a\x4a\x65\x64\x65\x6d" + "\x20\x48\x61\x75\x63\x68\x65\x20" + "\x67\x69\x62\x74\x20\x64\x69\x63" + "\x68\x2c\x20\x67\x69\x62\x20\x6e" + "\x61\x63\x68\x2c\x0a\x65\x72\x20" + "\x77\x69\x72\x64\x20\x64\x69\x63" + "\x68\x20\x6c\x69\x65\x62\x65\x6e" + "\x20\x75\x6e\x64\x20\x77\x69\x65" + "\x67\x65\x6e\x2e\x0a\x0a\x55\x6e" + "\x64\x20\x64\x61\x6e\x6e\x20\x6d" + "\x65\x69\x6e\x65\x20\x53\x65\x65" + "\x6c\x65\x20\x73\x65\x69\x74\x20" + "\x77\x65\x69\x74\x2c\x20\x73\x65" + "\x69\x20\x77\x65\x69\x74\x2c\x0a" + "\x64\x61\x73\x73\x20\x64\x69\x72" + "\x20\x64\x61\x73\x20\x4c\x65\x62" + "\x65\x6e\x20\x67\x65\x6c\x69\x6e" + "\x67\x65\x2c\x0a\x62\x72\x65\x69" + "\x74\x65\x20\x64\x69\x63\x68\x20" + "\x77\x69\x65\x20\x65\x69\x6e\x20" + "\x46\x65\x69\x65\x72\x6b\x6c\x65" + "\x69\x64\x0a\xc3\xbc\x62\x65\x72" + "\x20\x64\x69\x65\x20\x73\x69\x6e" + "\x6e\x65\x6e\x64\x65\x6e\x20\x44" + "\x69\x6e\x67\x65\x2e\x2e\x2e\x0a", + .psize = 400, + .digest = "\xad\xb1\xc1\xe9\x56\x70\x31\x1d" + "\xbb\x5b\xdf\x5e\x70\x72\x1a\x57", }, }; @@ -3018,7 +3075,7 @@ static struct cipher_testvec des_enc_tv_template[] = { "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", .rlen = 24, }, { /* Weak key */ - .fail = 1, + .fail = true, .wk = 1, .key = "\x01\x01\x01\x01\x01\x01\x01\x01", .klen = 8, @@ -28591,7 +28648,7 @@ struct comp_testvec { }; struct pcomp_testvec { - void *params; + const void *params; unsigned int paramsize; int inlen, outlen; char input[COMP_BUF_SIZE]; @@ -28946,6 +29003,440 @@ static struct hash_testvec michael_mic_tv_template[] = { }; /* + * CRC32 test vectors + */ +#define CRC32_TEST_VECTORS 14 + +static struct hash_testvec crc32_tv_template[] = { + { + .key = "\x87\xa9\xcb\xed", + .ksize = 4, + .psize = 0, + .digest = "\x87\xa9\xcb\xed", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18" + "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" + "\x21\x22\x23\x24\x25\x26\x27\x28", + .psize = 40, + .digest = "\x3a\xdf\x4b\xb0", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", + .psize = 40, + .digest = "\xa9\x7a\x7f\x7b", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78", + .psize = 40, + .digest = "\xba\xd3\xf8\x1c", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", + .psize = 40, + .digest = "\xa8\xa9\xc2\x02", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", + .psize = 40, + .digest = "\x27\xf0\x57\xe2", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 40, + .digest = "\x49\x78\x10\x08", + }, + { + .key = "\x80\xea\xd3\xf1", + .ksize = 4, + .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", + .psize = 40, + .digest = "\x9a\xb1\xdc\xf0", + }, + { + .key = "\xf3\x4a\x1d\x5d", + .ksize = 4, + .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78", + .psize = 40, + .digest = "\xb4\x97\xcc\xd4", + }, + { + .key = "\x2e\x80\x04\x59", + .ksize = 4, + .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", + .psize = 40, + .digest = "\x67\x9b\xfa\x79", + }, + { + .key = "\xa6\xcc\x19\x85", + .ksize = 4, + .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", + .psize = 40, + .digest = "\x24\xb5\x16\xef", + }, + { + .key = "\x41\xfc\xfe\x2d", + .ksize = 4, + .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 40, + .digest = "\x15\x94\x80\x39", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18" + "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" + "\x21\x22\x23\x24\x25\x26\x27\x28" + "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50" + "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78" + "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0" + "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8" + "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 240, + .digest = "\x6c\xc6\x56\xde", + .np = 2, + .tap = { 31, 209 } + }, { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" + "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" + "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a" + "\xa1\x38\xcf\x43\xda\x71\x08\x7c" + "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee" + "\x85\x1c\x90\x27\xbe\x32\xc9\x60" + "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2" + "\x46\xdd\x74\x0b\x7f\x16\xad\x21" + "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93" + "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05" + "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77" + "\x0e\x82\x19\xb0\x24\xbb\x52\xe9" + "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38" + "\xcf\x66\xfd\x71\x08\x9f\x13\xaa" + "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c" + "\xb3\x27\xbe\x55\xec\x60\xf7\x8e" + "\x02\x99\x30\xc7\x3b\xd2\x69\x00" + "\x74\x0b\xa2\x16\xad\x44\xdb\x4f" + "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1" + "\x58\xef\x63\xfa\x91\x05\x9c\x33" + "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5" + "\x19\xb0\x47\xde\x52\xe9\x80\x17" + "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66" + "\xfd\x94\x08\x9f\x36\xcd\x41\xd8" + "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a" + "\xe1\x55\xec\x83\x1a\x8e\x25\xbc" + "\x30\xc7\x5e\xf5\x69\x00\x97\x0b" + "\xa2\x39\xd0\x44\xdb\x72\x09\x7d" + "\x14\xab\x1f\xb6\x4d\xe4\x58\xef" + "\x86\x1d\x91\x28\xbf\x33\xca\x61" + "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3" + "\x47\xde\x75\x0c\x80\x17\xae\x22" + "\xb9\x50\xe7\x5b\xf2\x89\x20\x94" + "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06" + "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78" + "\x0f\x83\x1a\xb1\x25\xbc\x53\xea" + "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39" + "\xd0\x67\xfe\x72\x09\xa0\x14\xab" + "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d" + "\xb4\x28\xbf\x56\xed\x61\xf8\x8f" + "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01" + "\x75\x0c\xa3\x17\xae\x45\xdc\x50" + "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2" + "\x59\xf0\x64\xfb\x92\x06\x9d\x34" + "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6" + "\x1a\xb1\x48\xdf\x53\xea\x81\x18" + "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67" + "\xfe\x95\x09\xa0\x37\xce\x42\xd9" + "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b" + "\xe2\x56\xed\x84\x1b\x8f\x26\xbd" + "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c" + "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e" + "\x15\xac\x20\xb7\x4e\xe5\x59\xf0" + "\x87\x1e\x92\x29\xc0\x34\xcb\x62" + "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4" + "\x48\xdf\x76\x0d\x81\x18\xaf\x23" + "\xba\x51\xe8\x5c\xf3\x8a\x21\x95" + "\x2c\xc3\x37\xce\x65\xfc\x70\x07" + "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79" + "\x10\x84\x1b\xb2\x26\xbd\x54\xeb" + "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a" + "\xd1\x68\xff\x73\x0a\xa1\x15\xac" + "\x43\xda\x4e\xe5\x7c\x13\x87\x1e" + "\xb5\x29\xc0\x57\xee\x62\xf9\x90" + "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02" + "\x76\x0d\xa4\x18\xaf\x46\xdd\x51" + "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3" + "\x5a\xf1\x65\xfc\x93\x07\x9e\x35" + "\xcc\x40\xd7\x6e\x05\x79\x10\xa7" + "\x1b\xb2\x49\xe0\x54\xeb\x82\x19" + "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68" + "\xff\x96\x0a\xa1\x38\xcf\x43\xda" + "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c" + "\xe3\x57\xee\x85\x1c\x90\x27\xbe" + "\x32\xc9\x60\xf7\x6b\x02\x99\x0d" + "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f" + "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1" + "\x88\x1f\x93\x2a\xc1\x35\xcc\x63" + "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5" + "\x49\xe0\x77\x0e\x82\x19\xb0\x24" + "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96" + "\x2d\xc4\x38\xcf\x66\xfd\x71\x08" + "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a" + "\x11\x85\x1c\xb3\x27\xbe\x55\xec" + "\x60\xf7\x8e\x02\x99\x30\xc7\x3b" + "\xd2\x69\x00\x74\x0b\xa2\x16\xad" + "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f" + "\xb6\x2a\xc1\x58\xef\x63\xfa\x91" + "\x05\x9c\x33\xca\x3e\xd5\x6c\x03" + "\x77\x0e\xa5\x19\xb0\x47\xde\x52" + "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4" + "\x5b\xf2\x66\xfd\x94\x08\x9f\x36" + "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8" + "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a" + "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69" + "\x00\x97\x0b\xa2\x39\xd0\x44\xdb" + "\x72\x09\x7d\x14\xab\x1f\xb6\x4d" + "\xe4\x58\xef\x86\x1d\x91\x28\xbf" + "\x33\xca\x61\xf8\x6c\x03\x9a\x0e" + "\xa5\x3c\xd3\x47\xde\x75\x0c\x80" + "\x17\xae\x22\xb9\x50\xe7\x5b\xf2" + "\x89\x20\x94\x2b\xc2\x36\xcd\x64" + "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6" + "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25" + "\xbc\x53\xea\x5e\xf5\x8c\x00\x97" + "\x2e\xc5\x39\xd0\x67\xfe\x72\x09" + "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b" + "\x12\x86\x1d\xb4\x28\xbf\x56\xed" + "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c" + "\xd3\x6a\x01\x75\x0c\xa3\x17\xae" + "\x45\xdc\x50\xe7\x7e\x15\x89\x20" + "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92" + "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04" + "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53" + "\xea\x81\x18\x8c\x23\xba\x2e\xc5" + "\x5c\xf3\x67\xfe\x95\x09\xa0\x37" + "\xce\x42\xd9\x70\x07\x7b\x12\xa9" + "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b" + "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a" + "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc" + "\x73\x0a\x7e\x15\xac\x20\xb7\x4e" + "\xe5\x59\xf0\x87\x1e\x92\x29\xc0" + "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f" + "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81" + "\x18\xaf\x23\xba\x51\xe8\x5c\xf3" + "\x8a\x21\x95\x2c\xc3\x37\xce\x65" + "\xfc\x70\x07\x9e\x12\xa9\x40\xd7" + "\x4b\xe2\x79\x10\x84\x1b\xb2\x26" + "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98" + "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a" + "\xa1\x15\xac\x43\xda\x4e\xe5\x7c" + "\x13\x87\x1e\xb5\x29\xc0\x57\xee" + "\x62\xf9\x90\x04\x9b\x32\xc9\x3d" + "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf" + "\x46\xdd\x51\xe8\x7f\x16\x8a\x21" + "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93" + "\x07\x9e\x35\xcc\x40\xd7\x6e\x05" + "\x79\x10\xa7\x1b\xb2\x49\xe0\x54" + "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6" + "\x5d\xf4\x68\xff\x96\x0a\xa1\x38" + "\xcf\x43\xda\x71\x08\x7c\x13\xaa" + "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c" + "\x90\x27\xbe\x32\xc9\x60\xf7\x6b" + "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd" + "\x74\x0b\x7f\x16\xad\x21\xb8\x4f" + "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1" + "\x35\xcc\x63\xfa\x6e\x05\x9c\x10" + "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82" + "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4" + "\x8b\x22\x96\x2d\xc4\x38\xcf\x66" + "\xfd\x71\x08\x9f\x13\xaa\x41\xd8" + "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27" + "\xbe\x55\xec\x60\xf7\x8e\x02\x99" + "\x30\xc7\x3b\xd2\x69\x00\x74\x0b" + "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d" + "\x14\x88\x1f\xb6\x2a\xc1\x58\xef" + "\x63\xfa\x91\x05\x9c\x33\xca\x3e" + "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0" + "\x47\xde\x52\xe9\x80\x17\x8b\x22" + "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94" + "\x08\x9f\x36\xcd\x41\xd8\x6f\x06" + "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55" + "\xec\x83\x1a\x8e\x25\xbc\x30\xc7" + "\x5e\xf5\x69\x00\x97\x0b\xa2\x39" + "\xd0\x44\xdb\x72\x09\x7d\x14\xab" + "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d" + "\x91\x28\xbf\x33\xca\x61\xf8\x6c" + "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde" + "\x75\x0c\x80\x17\xae\x22\xb9\x50" + "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2" + "\x36\xcd\x64\xfb\x6f\x06\x9d\x11" + "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83" + "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5" + "\x8c\x00\x97\x2e\xc5\x39\xd0\x67" + "\xfe\x72\x09\xa0\x14\xab\x42\xd9" + "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28" + "\xbf\x56\xed\x61\xf8\x8f\x03\x9a" + "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c" + "\xa3\x17\xae\x45\xdc\x50\xe7\x7e" + "\x15\x89\x20\xb7\x2b\xc2\x59\xf0" + "\x64\xfb\x92\x06\x9d\x34\xcb\x3f" + "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1" + "\x48\xdf\x53\xea\x81\x18\x8c\x23" + "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95" + "\x09\xa0\x37\xce\x42\xd9\x70\x07" + "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56" + "\xed\x84\x1b\x8f\x26\xbd\x31\xc8" + "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a" + "\xd1\x45\xdc\x73\x0a\x7e\x15\xac" + "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e" + "\x92\x29\xc0\x34\xcb\x62\xf9\x6d" + "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf" + "\x76\x0d\x81\x18\xaf\x23\xba\x51" + "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3" + "\x37\xce\x65\xfc\x70\x07\x9e\x12" + "\xa9\x40\xd7\x4b\xe2\x79\x10\x84" + "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6" + "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68" + "\xff\x73\x0a\xa1\x15\xac\x43\xda" + "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29" + "\xc0\x57\xee\x62\xf9\x90\x04\x9b" + "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d" + "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f" + "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1" + "\x65\xfc\x93\x07\x9e\x35\xcc\x40" + "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2" + "\x49\xe0\x54\xeb\x82\x19\x8d\x24" + "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96" + "\x0a\xa1\x38\xcf\x43\xda\x71\x08" + "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57" + "\xee\x85\x1c\x90\x27\xbe\x32\xc9" + "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b" + "\xd2\x46\xdd\x74\x0b\x7f\x16\xad" + "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f" + "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e" + "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0" + "\x77\x0e\x82\x19\xb0\x24\xbb\x52" + "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4" + "\x38\xcf\x66\xfd\x71\x08\x9f\x13" + "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85" + "\x1c\xb3\x27\xbe\x55\xec\x60\xf7" + "\x8e\x02\x99\x30\xc7\x3b\xd2\x69" + "\x00\x74\x0b\xa2\x16\xad\x44\xdb" + "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a" + "\xc1\x58\xef\x63\xfa\x91\x05\x9c" + "\x33\xca\x3e\xd5\x6c\x03\x77\x0e" + "\xa5\x19\xb0\x47\xde\x52\xe9\x80" + "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2" + "\x66\xfd\x94\x08\x9f\x36\xcd\x41" + "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3" + "\x4a\xe1\x55\xec\x83\x1a\x8e\x25" + "\xbc\x30\xc7\x5e\xf5\x69\x00\x97" + "\x0b\xa2\x39\xd0\x44\xdb\x72\x09" + "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58" + "\xef\x86\x1d\x91\x28\xbf\x33\xca" + "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c" + "\xd3\x47\xde\x75\x0c\x80\x17\xae" + "\x22\xb9\x50\xe7\x5b\xf2\x89\x20" + "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f" + "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1" + "\x78\x0f\x83\x1a\xb1\x25\xbc\x53" + "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5" + "\x39\xd0\x67\xfe\x72\x09\xa0\x14" + "\xab\x42\xd9\x4d\xe4\x7b\x12\x86" + "\x1d\xb4\x28\xbf\x56\xed\x61\xf8" + "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a" + "\x01\x75\x0c\xa3\x17\xae\x45\xdc" + "\x50\xe7\x7e\x15\x89\x20\xb7\x2b" + "\xc2\x59\xf0\x64\xfb\x92\x06\x9d" + "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f" + "\xa6\x1a\xb1\x48\xdf\x53\xea\x81" + "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3" + "\x67\xfe\x95\x09\xa0\x37\xce\x42" + "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4" + "\x4b\xe2\x56\xed\x84\x1b\x8f\x26" + "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98", + .psize = 2048, + .digest = "\xfb\x3a\x7a\xda", + } +}; + +/* * CRC32C test vectors */ #define CRC32C_TEST_VECTORS 15 diff --git a/crypto/zlib.c b/crypto/zlib.c index 0eefa9d237ac..d51a30a29e42 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c @@ -78,7 +78,7 @@ static void zlib_exit(struct crypto_tfm *tfm) } -static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, +static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); @@ -209,7 +209,7 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, } -static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, +static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); |