diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 03:10:50 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 03:10:50 +0100 |
commit | 36289a03bcd3aabdf66de75cb6d1b4ee15726438 (patch) | |
tree | 1230c6391678f9255f74d7a4f65e95ea8a39d452 /drivers | |
parent | Merge tag 'platform-drivers-x86-v6.3-1' of git://git.kernel.org/pub/scm/linux... (diff) | |
parent | crypto: x86/aria-avx - Do not use avx2 instructions (diff) | |
download | linux-36289a03bcd3aabdf66de75cb6d1b4ee15726438.tar.xz linux-36289a03bcd3aabdf66de75cb6d1b4ee15726438.zip |
Merge tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Use kmap_local instead of kmap_atomic
- Change request callback to take void pointer
- Print FIPS status in /proc/crypto (when enabled)
Algorithms:
- Add rfc4106/gcm support on arm64
- Add ARIA AVX2/512 support on x86
Drivers:
- Add TRNG driver for StarFive SoC
- Delete ux500/hash driver (subsumed by stm32/hash)
- Add zlib support in qat
- Add RSA support in aspeed"
* tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (156 commits)
crypto: x86/aria-avx - Do not use avx2 instructions
crypto: aspeed - Fix modular aspeed-acry
crypto: hisilicon/qm - fix coding style issues
crypto: hisilicon/qm - update comments to match function
crypto: hisilicon/qm - change function names
crypto: hisilicon/qm - use min() instead of min_t()
crypto: hisilicon/qm - remove some unused defines
crypto: proc - Print fips status
crypto: crypto4xx - Call dma_unmap_page when done
crypto: octeontx2 - Fix objects shared between several modules
crypto: nx - Fix sparse warnings
crypto: ecc - Silence sparse warning
tls: Pass rec instead of aead_req into tls_encrypt_done
crypto: api - Remove completion function scaffolding
tls: Remove completion function scaffolding
tipc: Remove completion function scaffolding
net: ipv6: Remove completion function scaffolding
net: ipv4: Remove completion function scaffolding
net: macsec: Remove completion function scaffolding
dm: Remove completion function scaffolding
...
Diffstat (limited to 'drivers')
101 files changed, 2163 insertions, 3015 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 3da8e85f8aae..4fdf07ae3c54 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -549,6 +549,16 @@ config HW_RANDOM_CN10K To compile this driver as a module, choose M here. The module will be called cn10k_rng. If unsure, say Y. +config HW_RANDOM_JH7110 + tristate "StarFive JH7110 Random Number Generator support" + depends on SOC_STARFIVE || COMPILE_TEST + help + This driver provides support for the True Random Number + Generator in StarFive JH7110 SoCs. + + To compile this driver as a module, choose M here. + The module will be called jh7110-trng. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 3e948cf04476..09bde4a0f971 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -47,3 +47,4 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o +obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c new file mode 100644 index 000000000000..38474d48a25e --- /dev/null +++ b/drivers/char/hw_random/jh7110-trng.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TRNG driver for the StarFive JH7110 SoC + * + * Copyright (C) 2022 StarFive Technology Co. + */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/hw_random.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/random.h> +#include <linux/reset.h> + +/* trng register offset */ +#define STARFIVE_CTRL 0x00 +#define STARFIVE_STAT 0x04 +#define STARFIVE_MODE 0x08 +#define STARFIVE_SMODE 0x0C +#define STARFIVE_IE 0x10 +#define STARFIVE_ISTAT 0x14 +#define STARFIVE_RAND0 0x20 +#define STARFIVE_RAND1 0x24 +#define STARFIVE_RAND2 0x28 +#define STARFIVE_RAND3 0x2C +#define STARFIVE_RAND4 0x30 +#define STARFIVE_RAND5 0x34 +#define STARFIVE_RAND6 0x38 +#define STARFIVE_RAND7 0x3C +#define STARFIVE_AUTO_RQSTS 0x60 +#define STARFIVE_AUTO_AGE 0x64 + +/* CTRL CMD */ +#define STARFIVE_CTRL_EXEC_NOP 0x0 +#define STARFIVE_CTRL_GENE_RANDNUM 0x1 +#define STARFIVE_CTRL_EXEC_RANDRESEED 0x2 + +/* STAT */ +#define STARFIVE_STAT_NONCE_MODE BIT(2) +#define STARFIVE_STAT_R256 BIT(3) +#define STARFIVE_STAT_MISSION_MODE BIT(8) +#define STARFIVE_STAT_SEEDED BIT(9) +#define STARFIVE_STAT_LAST_RESEED(x) ((x) << 16) +#define STARFIVE_STAT_SRVC_RQST BIT(27) +#define STARFIVE_STAT_RAND_GENERATING BIT(30) +#define STARFIVE_STAT_RAND_SEEDING BIT(31) + +/* MODE */ +#define STARFIVE_MODE_R256 BIT(3) + +/* SMODE */ +#define STARFIVE_SMODE_NONCE_MODE BIT(2) +#define STARFIVE_SMODE_MISSION_MODE BIT(8) +#define STARFIVE_SMODE_MAX_REJECTS(x) ((x) << 16) + +/* IE */ +#define STARFIVE_IE_RAND_RDY_EN BIT(0) +#define STARFIVE_IE_SEED_DONE_EN BIT(1) +#define STARFIVE_IE_LFSR_LOCKUP_EN BIT(4) +#define STARFIVE_IE_GLBL_EN BIT(31) + +#define STARFIVE_IE_ALL (STARFIVE_IE_GLBL_EN | \ + STARFIVE_IE_RAND_RDY_EN | \ + STARFIVE_IE_SEED_DONE_EN | \ + STARFIVE_IE_LFSR_LOCKUP_EN) + +/* ISTAT */ +#define STARFIVE_ISTAT_RAND_RDY BIT(0) +#define STARFIVE_ISTAT_SEED_DONE BIT(1) +#define STARFIVE_ISTAT_LFSR_LOCKUP BIT(4) + +#define STARFIVE_RAND_LEN sizeof(u32) + +#define to_trng(p) container_of(p, struct starfive_trng, rng) + +enum reseed { + RANDOM_RESEED, + NONCE_RESEED, +}; + +enum mode { + PRNG_128BIT, + PRNG_256BIT, +}; + +struct starfive_trng { + struct device *dev; + void __iomem *base; + struct clk *hclk; + struct clk *ahb; + struct reset_control *rst; + struct hwrng rng; + struct completion random_done; + struct completion reseed_done; + u32 mode; + u32 mission; + u32 reseed; + /* protects against concurrent write to ctrl register */ + spinlock_t write_lock; +}; + +static u16 autoreq; +module_param(autoreq, ushort, 0); +MODULE_PARM_DESC(autoreq, "Auto-reseeding after random number requests by host reaches specified counter:\n" + " 0 - disable counter\n" + " other - reload value for internal counter"); + +static u16 autoage; +module_param(autoage, ushort, 0); +MODULE_PARM_DESC(autoage, "Auto-reseeding after specified timer countdowns to 0:\n" + " 0 - disable timer\n" + " other - reload value for internal timer"); + +static inline int starfive_trng_wait_idle(struct starfive_trng *trng) +{ + u32 stat; + + return readl_relaxed_poll_timeout(trng->base + STARFIVE_STAT, stat, + !(stat & (STARFIVE_STAT_RAND_GENERATING | + STARFIVE_STAT_RAND_SEEDING)), + 10, 100000); +} + +static inline void starfive_trng_irq_mask_clear(struct starfive_trng *trng) +{ + /* clear register: ISTAT */ + u32 data = readl(trng->base + STARFIVE_ISTAT); + + writel(data, trng->base + STARFIVE_ISTAT); +} + +static int starfive_trng_cmd(struct starfive_trng *trng, u32 cmd, bool wait) +{ + int wait_time = 1000; + + /* allow up to 40 us for wait == 0 */ + if (!wait) + wait_time = 40; + + switch (cmd) { + case STARFIVE_CTRL_GENE_RANDNUM: + reinit_completion(&trng->random_done); + spin_lock_irq(&trng->write_lock); + writel(cmd, trng->base + STARFIVE_CTRL); + spin_unlock_irq(&trng->write_lock); + if (!wait_for_completion_timeout(&trng->random_done, usecs_to_jiffies(wait_time))) + return -ETIMEDOUT; + break; + case STARFIVE_CTRL_EXEC_RANDRESEED: + reinit_completion(&trng->reseed_done); + spin_lock_irq(&trng->write_lock); + writel(cmd, trng->base + STARFIVE_CTRL); + spin_unlock_irq(&trng->write_lock); + if (!wait_for_completion_timeout(&trng->reseed_done, usecs_to_jiffies(wait_time))) + return -ETIMEDOUT; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int starfive_trng_init(struct hwrng *rng) +{ + struct starfive_trng *trng = to_trng(rng); + u32 mode, intr = 0; + + /* setup Auto Request/Age register */ + writel(autoage, trng->base + STARFIVE_AUTO_AGE); + writel(autoreq, trng->base + STARFIVE_AUTO_RQSTS); + + /* clear register: ISTAT */ + starfive_trng_irq_mask_clear(trng); + + intr |= STARFIVE_IE_ALL; + writel(intr, trng->base + STARFIVE_IE); + + mode = readl(trng->base + STARFIVE_MODE); + + switch (trng->mode) { + case PRNG_128BIT: + mode &= ~STARFIVE_MODE_R256; + break; + case PRNG_256BIT: + mode |= STARFIVE_MODE_R256; + break; + default: + mode |= STARFIVE_MODE_R256; + break; + } + + writel(mode, trng->base + STARFIVE_MODE); + + return starfive_trng_cmd(trng, STARFIVE_CTRL_EXEC_RANDRESEED, 1); +} + +static irqreturn_t starfive_trng_irq(int irq, void *priv) +{ + u32 status; + struct starfive_trng *trng = (struct starfive_trng *)priv; + + status = readl(trng->base + STARFIVE_ISTAT); + if (status & STARFIVE_ISTAT_RAND_RDY) { + writel(STARFIVE_ISTAT_RAND_RDY, trng->base + STARFIVE_ISTAT); + complete(&trng->random_done); + } + + if (status & STARFIVE_ISTAT_SEED_DONE) { + writel(STARFIVE_ISTAT_SEED_DONE, trng->base + STARFIVE_ISTAT); + complete(&trng->reseed_done); + } + + if (status & STARFIVE_ISTAT_LFSR_LOCKUP) { + writel(STARFIVE_ISTAT_LFSR_LOCKUP, trng->base + STARFIVE_ISTAT); + /* SEU occurred, reseeding required*/ + spin_lock(&trng->write_lock); + writel(STARFIVE_CTRL_EXEC_RANDRESEED, trng->base + STARFIVE_CTRL); + spin_unlock(&trng->write_lock); + } + + return IRQ_HANDLED; +} + +static void starfive_trng_cleanup(struct hwrng *rng) +{ + struct starfive_trng *trng = to_trng(rng); + + writel(0, trng->base + STARFIVE_CTRL); + + reset_control_assert(trng->rst); + clk_disable_unprepare(trng->hclk); + clk_disable_unprepare(trng->ahb); +} + +static int starfive_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct starfive_trng *trng = to_trng(rng); + int ret; + + pm_runtime_get_sync(trng->dev); + + if (trng->mode == PRNG_256BIT) + max = min_t(size_t, max, (STARFIVE_RAND_LEN * 8)); + else + max = min_t(size_t, max, (STARFIVE_RAND_LEN * 4)); + + if (wait) { + ret = starfive_trng_wait_idle(trng); + if (ret) + return -ETIMEDOUT; + } + + ret = starfive_trng_cmd(trng, STARFIVE_CTRL_GENE_RANDNUM, wait); + if (ret) + return ret; + + memcpy_fromio(buf, trng->base + STARFIVE_RAND0, max); + + pm_runtime_put_sync_autosuspend(trng->dev); + + return max; +} + +static int starfive_trng_probe(struct platform_device *pdev) +{ + int ret; + int irq; + struct starfive_trng *trng; + + trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); + if (!trng) + return -ENOMEM; + + platform_set_drvdata(pdev, trng); + trng->dev = &pdev->dev; + + trng->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(trng->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(trng->base), + "Error remapping memory for platform device.\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + init_completion(&trng->random_done); + init_completion(&trng->reseed_done); + spin_lock_init(&trng->write_lock); + + ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name, + (void *)trng); + if (ret) + return dev_err_probe(&pdev->dev, irq, + "Failed to register interrupt handler\n"); + + trng->hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(trng->hclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(trng->hclk), + "Error getting hardware reference clock\n"); + + trng->ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(trng->ahb)) + return dev_err_probe(&pdev->dev, PTR_ERR(trng->ahb), + "Error getting ahb reference clock\n"); + + trng->rst = devm_reset_control_get_shared(&pdev->dev, NULL); + if (IS_ERR(trng->rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(trng->rst), + "Error getting hardware reset line\n"); + + clk_prepare_enable(trng->hclk); + clk_prepare_enable(trng->ahb); + reset_control_deassert(trng->rst); + + trng->rng.name = dev_driver_string(&pdev->dev); + trng->rng.init = starfive_trng_init; + trng->rng.cleanup = starfive_trng_cleanup; + trng->rng.read = starfive_trng_read; + + trng->mode = PRNG_256BIT; + trng->mission = 1; + trng->reseed = RANDOM_RESEED; + + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 100); + pm_runtime_enable(&pdev->dev); + + ret = devm_hwrng_register(&pdev->dev, &trng->rng); + if (ret) { + pm_runtime_disable(&pdev->dev); + + reset_control_assert(trng->rst); + clk_disable_unprepare(trng->ahb); + clk_disable_unprepare(trng->hclk); + + return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n"); + } + + return 0; +} + +static int __maybe_unused starfive_trng_suspend(struct device *dev) +{ + struct starfive_trng *trng = dev_get_drvdata(dev); + + clk_disable_unprepare(trng->hclk); + clk_disable_unprepare(trng->ahb); + + return 0; +} + +static int __maybe_unused starfive_trng_resume(struct device *dev) +{ + struct starfive_trng *trng = dev_get_drvdata(dev); + + clk_prepare_enable(trng->hclk); + clk_prepare_enable(trng->ahb); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend, + starfive_trng_resume); + +static const struct of_device_id trng_dt_ids[] __maybe_unused = { + { .compatible = "starfive,jh7110-trng" }, + { } +}; +MODULE_DEVICE_TABLE(of, trng_dt_ids); + +static struct platform_driver starfive_trng_driver = { + .probe = starfive_trng_probe, + .driver = { + .name = "jh7110-trng", + .pm = &starfive_trng_pm_ops, + .of_match_table = of_match_ptr(trng_dt_ids), + }, +}; + +module_platform_driver(starfive_trng_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("StarFive True Random Number Generator"); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dfb103f81a64..3b2516d1433f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -390,16 +390,6 @@ if CRYPTO_DEV_NX source "drivers/crypto/nx/Kconfig" endif -config CRYPTO_DEV_UX500 - tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" - depends on ARCH_U8500 - help - Driver for ST-Ericsson UX500 crypto engine. - -if CRYPTO_DEV_UX500 - source "drivers/crypto/ux500/Kconfig" -endif # if CRYPTO_DEV_UX500 - config CRYPTO_DEV_ATMEL_AUTHENC bool "Support for Atmel IPSEC/SSL hw accelerator" depends on ARCH_AT91 || COMPILE_TEST diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fa8bf1be1a8c..476f1a25ca32 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/ obj-y += stm32/ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o -obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 9f6594699835..a6865ff4d400 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -118,6 +118,7 @@ static const struct ce_variant ce_d1_variant = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, { "ram", 0, 400000000 }, + { "trng", 0, 0 }, }, .esr = ESR_D1, .prng = CE_ALG_PRNG, diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 8177aaba4434..27029fb77e29 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -105,7 +105,7 @@ #define MAX_SG 8 -#define CE_MAX_CLOCKS 3 +#define CE_MAX_CLOCKS 4 #define MAXFLOW 4 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 902f6be057ec..83c6dfad77e1 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -452,7 +452,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, } kfree_sensitive(op->key); op->keylen = keylen; - op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; @@ -475,7 +475,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, kfree_sensitive(op->key); op->keylen = keylen; - op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index ac2329e2b0e5..c9dc06f97857 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> @@ -527,7 +528,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss) init_completion(&ss->flows[i].complete); ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!ss->flows[i].biv) { err = -ENOMEM; goto error_engine; @@ -535,7 +536,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss) for (j = 0; j < MAX_SG; j++) { ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!ss->flows[i].iv[j]) { err = -ENOMEM; goto error_engine; @@ -544,13 +545,15 @@ static int allocate_flows(struct sun8i_ss_dev *ss) /* the padding could be up to two block. */ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!ss->flows[i].pad) { err = -ENOMEM; goto error_engine; } - ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, - GFP_KERNEL | GFP_DMA); + ss->flows[i].result = + devm_kmalloc(ss->dev, max(SHA256_DIGEST_SIZE, + dma_get_cache_alignment()), + GFP_KERNEL); if (!ss->flows[i].result) { err = -ENOMEM; goto error_engine; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 36a82b22953c..577bf636f7fb 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -79,10 +79,10 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, memcpy(tfmctx->key, key, keylen); } - tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + tfmctx->ipad = kzalloc(bs, GFP_KERNEL); if (!tfmctx->ipad) return -ENOMEM; - tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + tfmctx->opad = kzalloc(bs, GFP_KERNEL); if (!tfmctx->opad) { ret = -ENOMEM; goto err_opad; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index dd677e9ed06f..70c7b5d571b8 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -11,6 +11,8 @@ */ #include "sun8i-ss.h" #include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/mm.h> #include <linux/pm_runtime.h> #include <crypto/internal/rng.h> @@ -25,7 +27,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, ctx->seed = NULL; } if (!ctx->seed) - ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA); + ctx->seed = kmalloc(slen, GFP_KERNEL); if (!ctx->seed) return -ENOMEM; @@ -58,6 +60,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); struct rng_alg *alg = crypto_rng_alg(tfm); struct sun8i_ss_alg_template *algt; + unsigned int todo_with_padding; struct sun8i_ss_dev *ss; dma_addr_t dma_iv, dma_dst; unsigned int todo; @@ -81,7 +84,11 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE; todo -= todo % PRNG_DATA_SIZE; - d = kzalloc(todo, GFP_KERNEL | GFP_DMA); + todo_with_padding = ALIGN(todo, dma_get_cache_alignment()); + if (todo_with_padding < todo || todo < dlen) + return -EOVERFLOW; + + d = kzalloc(todo_with_padding, GFP_KERNEL); if (!d) return -ENOMEM; diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 280f4b0e7133..50dc783821b6 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, { struct skcipher_request *req; struct scatterlist *dst; - dma_addr_t addr; req = skcipher_request_cast(pd_uinfo->async_req); @@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev, req->cryptlen, req->dst); } else { dst = pd_uinfo->dest_va; - addr = dma_map_page(dev->core_dev->device, sg_page(dst), - dst->offset, dst->length, DMA_FROM_DEVICE); + dma_unmap_page(dev->core_dev->device, pd->dest, dst->length, + DMA_FROM_DEVICE); } if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) { @@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev, struct ahash_request *ahash_req; ahash_req = ahash_request_cast(pd_uinfo->async_req); - ctx = crypto_tfm_ctx(ahash_req->base.tfm); + ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req)); - crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, - crypto_tfm_ctx(ahash_req->base.tfm)); + crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx); crypto4xx_ret_sg_desc(dev, pd_uinfo); if (pd_uinfo->state & PD_ENTRY_BUSY) diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig index ae2710ae8d8f..db6c5b4cdc40 100644 --- a/drivers/crypto/aspeed/Kconfig +++ b/drivers/crypto/aspeed/Kconfig @@ -46,3 +46,14 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO crypto driver. Supports AES/DES symmetric-key encryption and decryption with ECB/CBC/CFB/OFB/CTR options. + +config CRYPTO_DEV_ASPEED_ACRY + bool "Enable Aspeed ACRY RSA Engine" + depends on CRYPTO_DEV_ASPEED + select CRYPTO_ENGINE + select CRYPTO_RSA + help + Select here to enable Aspeed ECC/RSA Engine (ACRY) + RSA driver. + Supports 256 bits to 4096 bits RSA encryption/decryption + and signature/verification. diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile index a0ed40ddaad1..15862752c053 100644 --- a/drivers/crypto/aspeed/Makefile +++ b/drivers/crypto/aspeed/Makefile @@ -5,3 +5,7 @@ obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o aspeed_crypto-objs := aspeed-hace.o \ $(hace-hash-y) \ $(hace-crypto-y) + +aspeed_acry-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o + +obj-$(CONFIG_CRYPTO_DEV_ASPEED) += $(aspeed_acry-y) diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c new file mode 100644 index 000000000000..1f77ebd73489 --- /dev/null +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -0,0 +1,828 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2021 Aspeed Technology Inc. + */ +#include <crypto/akcipher.h> +#include <crypto/algapi.h> +#include <crypto/engine.h> +#include <crypto/internal/akcipher.h> +#include <crypto/internal/rsa.h> +#include <crypto/scatterwalk.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/mfd/syscon.h> +#include <linux/interrupt.h> +#include <linux/count_zeros.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> +#include <linux/regmap.h> + +#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG +#define ACRY_DBG(d, fmt, ...) \ + dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define ACRY_DBG(d, fmt, ...) \ + dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#endif + +/***************************** + * * + * ACRY register definitions * + * * + * ***************************/ +#define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */ +#define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */ +#define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */ +#define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */ +#define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */ +#define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */ +#define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */ + +/* rsa trigger */ +#define ACRY_CMD_RSA_TRIGGER BIT(0) +#define ACRY_CMD_DMA_RSA_TRIGGER BIT(1) + +/* rsa dma cmd */ +#define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4) +#define ACRY_CMD_DMEM_AHB BIT(8) +#define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0 + +/* rsa key len */ +#define RSA_E_BITS_LEN(x) ((x) << 16) +#define RSA_M_BITS_LEN(x) (x) + +/* acry isr */ +#define ACRY_RSA_ISR BIT(1) + +#define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */ +#define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */ +#define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */ + +#define CRYPTO_FLAGS_BUSY BIT(1) +#define BYTES_PER_DWORD 4 + +/***************************** + * * + * AHBC register definitions * + * * + * ***************************/ +#define AHBC_REGION_PROT 0x240 +#define REGION_ACRYM BIT(23) + +#define ast_acry_write(acry, val, offset) \ + writel((val), (acry)->regs + (offset)) + +#define ast_acry_read(acry, offset) \ + readl((acry)->regs + (offset)) + +struct aspeed_acry_dev; + +typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *); + +struct aspeed_acry_dev { + void __iomem *regs; + struct device *dev; + int irq; + struct clk *clk; + struct regmap *ahbc; + + struct akcipher_request *req; + struct tasklet_struct done_task; + aspeed_acry_fn_t resume; + unsigned long flags; + + /* ACRY output SRAM buffer */ + void __iomem *acry_sram; + + /* ACRY input DMA buffer */ + void *buf_addr; + dma_addr_t buf_dma_addr; + + struct crypto_engine *crypt_engine_rsa; + + /* ACRY SRAM memory mapped */ + int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; + int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; + int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN]; +}; + +struct aspeed_acry_ctx { + struct crypto_engine_ctx enginectx; + struct aspeed_acry_dev *acry_dev; + + struct rsa_key key; + int enc; + u8 *n; + u8 *e; + u8 *d; + size_t n_sz; + size_t e_sz; + size_t d_sz; + + aspeed_acry_fn_t trigger; + + struct crypto_akcipher *fallback_tfm; +}; + +struct aspeed_acry_alg { + struct aspeed_acry_dev *acry_dev; + struct akcipher_alg akcipher; +}; + +enum aspeed_rsa_key_mode { + ASPEED_RSA_EXP_MODE = 0, + ASPEED_RSA_MOD_MODE, + ASPEED_RSA_DATA_MODE, +}; + +static inline struct akcipher_request * + akcipher_request_cast(struct crypto_async_request *req) +{ + return container_of(req, struct akcipher_request, base); +} + +static int aspeed_acry_do_fallback(struct akcipher_request *req) +{ + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + int err; + + akcipher_request_set_tfm(req, ctx->fallback_tfm); + + if (ctx->enc) + err = crypto_akcipher_encrypt(req); + else + err = crypto_akcipher_decrypt(req); + + akcipher_request_set_tfm(req, cipher); + + return err; +} + +static bool aspeed_acry_need_fallback(struct akcipher_request *req) +{ + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + + return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN; +} + +static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev, + struct akcipher_request *req) +{ + if (aspeed_acry_need_fallback(req)) { + ACRY_DBG(acry_dev, "SW fallback\n"); + return aspeed_acry_do_fallback(req); + } + + return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req); +} + +static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq) +{ + struct akcipher_request *req = akcipher_request_cast(areq); + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; + + acry_dev->req = req; + acry_dev->flags |= CRYPTO_FLAGS_BUSY; + + return ctx->trigger(acry_dev); +} + +static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err) +{ + struct akcipher_request *req = acry_dev->req; + + acry_dev->flags &= ~CRYPTO_FLAGS_BUSY; + + crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err); + + return err; +} + +/* + * Copy Data to DMA buffer for engine used. + */ +static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev, + u8 *buf, struct scatterlist *src, + size_t nbytes) +{ + static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; + int i = 0, j; + int data_idx; + + ACRY_DBG(acry_dev, "\n"); + + scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0); + + for (j = nbytes - 1; j >= 0; j--) { + data_idx = acry_dev->data_byte_mapping[i]; + buf[data_idx] = dram_buffer[j]; + i++; + } + + for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) { + data_idx = acry_dev->data_byte_mapping[i]; + buf[data_idx] = 0; + } +} + +/* + * Copy Exp/Mod to DMA buffer for engine used. + * + * Params: + * - mode 0 : Exponential + * - mode 1 : Modulus + * + * Example: + * - DRAM memory layout: + * D[0], D[4], D[8], D[12] + * - ACRY SRAM memory layout should reverse the order of source data: + * D[12], D[8], D[4], D[0] + */ +static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf, + const void *xbuf, size_t nbytes, + enum aspeed_rsa_key_mode mode) +{ + const u8 *src = xbuf; + __le32 *dw_buf = buf; + int nbits, ndw; + int i, j, idx; + u32 data = 0; + + ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode); + + if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN) + return -ENOMEM; + + /* Remove the leading zeros */ + while (nbytes > 0 && src[0] == 0) { + src++; + nbytes--; + } + + nbits = nbytes * 8; + if (nbytes > 0) + nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8); + + /* double-world alignment */ + ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD); + + if (nbytes > 0) { + i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD; + i %= BYTES_PER_DWORD; + + for (j = ndw; j > 0; j--) { + for (; i < BYTES_PER_DWORD; i++) { + data <<= 8; + data |= *src++; + } + + i = 0; + + if (mode == ASPEED_RSA_EXP_MODE) + idx = acry_dev->exp_dw_mapping[j - 1]; + else if (mode == ASPEED_RSA_MOD_MODE) + idx = acry_dev->mod_dw_mapping[j - 1]; + + dw_buf[idx] = cpu_to_le32(data); + } + } + + return nbits; +} + +static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev) +{ + struct akcipher_request *req = acry_dev->req; + u8 __iomem *sram_buffer = acry_dev->acry_sram; + struct scatterlist *out_sg = req->dst; + static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; + int leading_zero = 1; + int result_nbytes; + int i = 0, j; + int data_idx; + + /* Set Data Memory to AHB(CPU) Access Mode */ + ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); + + /* Disable ACRY SRAM protection */ + regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, + REGION_ACRYM, 0); + + result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN; + + for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) { + data_idx = acry_dev->data_byte_mapping[j]; + if (readb(sram_buffer + data_idx) == 0 && leading_zero) { + result_nbytes--; + } else { + leading_zero = 0; + dram_buffer[i] = readb(sram_buffer + data_idx); + i++; + } + } + + ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n", + result_nbytes, req->dst_len); + + if (result_nbytes <= req->dst_len) { + scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes, + 1); + req->dst_len = result_nbytes; + + } else { + dev_err(acry_dev->dev, "RSA engine error!\n"); + } + + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); + + return aspeed_acry_complete(acry_dev, 0); +} + +static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev) +{ + struct akcipher_request *req = acry_dev->req; + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + int ne, nm; + + if (!ctx->n || !ctx->n_sz) { + dev_err(acry_dev->dev, "%s: key n is not set\n", __func__); + return -EINVAL; + } + + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); + + /* Copy source data to DMA buffer */ + aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr, + req->src, req->src_len); + + nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n, + ctx->n_sz, ASPEED_RSA_MOD_MODE); + if (ctx->enc) { + if (!ctx->e || !ctx->e_sz) { + dev_err(acry_dev->dev, "%s: key e is not set\n", + __func__); + return -EINVAL; + } + /* Copy key e to DMA buffer */ + ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, + ctx->e, ctx->e_sz, + ASPEED_RSA_EXP_MODE); + } else { + if (!ctx->d || !ctx->d_sz) { + dev_err(acry_dev->dev, "%s: key d is not set\n", + __func__); + return -EINVAL; + } + /* Copy key d to DMA buffer */ + ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, + ctx->key.d, ctx->key.d_sz, + ASPEED_RSA_EXP_MODE); + } + + ast_acry_write(acry_dev, acry_dev->buf_dma_addr, + ASPEED_ACRY_DMA_SRC_BASE); + ast_acry_write(acry_dev, (ne << 16) + nm, + ASPEED_ACRY_RSA_KEY_LEN); + ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE, + ASPEED_ACRY_DMA_LEN); + + acry_dev->resume = aspeed_acry_rsa_transfer; + + /* Enable ACRY SRAM protection */ + regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, + REGION_ACRYM, REGION_ACRYM); + + ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK); + ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA | + ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD); + + /* Trigger RSA engines */ + ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER | + ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER); + + return 0; +} + +static int aspeed_acry_rsa_enc(struct akcipher_request *req) +{ + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; + + ctx->trigger = aspeed_acry_rsa_trigger; + ctx->enc = 1; + + return aspeed_acry_handle_queue(acry_dev, req); +} + +static int aspeed_acry_rsa_dec(struct akcipher_request *req) +{ + struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; + + ctx->trigger = aspeed_acry_rsa_trigger; + ctx->enc = 0; + + return aspeed_acry_handle_queue(acry_dev, req); +} + +static u8 *aspeed_rsa_key_copy(u8 *src, size_t len) +{ + return kmemdup(src, len, GFP_KERNEL); +} + +static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value, + size_t len) +{ + ctx->n_sz = len; + ctx->n = aspeed_rsa_key_copy(value, len); + if (!ctx->n) + return -ENOMEM; + + return 0; +} + +static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value, + size_t len) +{ + ctx->e_sz = len; + ctx->e = aspeed_rsa_key_copy(value, len); + if (!ctx->e) + return -ENOMEM; + + return 0; +} + +static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value, + size_t len) +{ + ctx->d_sz = len; + ctx->d = aspeed_rsa_key_copy(value, len); + if (!ctx->d) + return -ENOMEM; + + return 0; +} + +static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx) +{ + kfree_sensitive(ctx->n); + kfree_sensitive(ctx->e); + kfree_sensitive(ctx->d); + ctx->n_sz = 0; + ctx->e_sz = 0; + ctx->d_sz = 0; +} + +static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen, int priv) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + struct aspeed_acry_dev *acry_dev = ctx->acry_dev; + int ret; + + if (priv) + ret = rsa_parse_priv_key(&ctx->key, key, keylen); + else + ret = rsa_parse_pub_key(&ctx->key, key, keylen); + + if (ret) { + dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n", + ret); + return ret; + } + + /* Aspeed engine supports up to 4096 bits, + * Use software fallback instead. + */ + if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) + return 0; + + ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz); + if (ret) + goto err; + + ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz); + if (ret) + goto err; + + if (priv) { + ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz); + if (ret) + goto err; + } + + return 0; + +err: + dev_err(acry_dev->dev, "rsa set key failed\n"); + aspeed_rsa_key_free(ctx); + + return ret; +} + +static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen); + if (ret) + return ret; + + return aspeed_acry_rsa_setkey(tfm, key, keylen, 0); +} + +static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret; + + ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen); + if (ret) + return ret; + + return aspeed_acry_rsa_setkey(tfm, key, keylen, 1); +} + +static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + + if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) + return crypto_akcipher_maxsize(ctx->fallback_tfm); + + return ctx->n_sz; +} + +static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct aspeed_acry_alg *acry_alg; + + acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher); + + ctx->acry_dev = acry_alg->acry_dev; + + ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + ctx->enginectx.op.do_one_request = aspeed_acry_do_request; + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; + + return 0; +} + +static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); + + crypto_free_akcipher(ctx->fallback_tfm); +} + +static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { + { + .akcipher = { + .encrypt = aspeed_acry_rsa_enc, + .decrypt = aspeed_acry_rsa_dec, + .sign = aspeed_acry_rsa_dec, + .verify = aspeed_acry_rsa_enc, + .set_pub_key = aspeed_acry_rsa_set_pub_key, + .set_priv_key = aspeed_acry_rsa_set_priv_key, + .max_size = aspeed_acry_rsa_max_size, + .init = aspeed_acry_rsa_init_tfm, + .exit = aspeed_acry_rsa_exit_tfm, + .base = { + .cra_name = "rsa", + .cra_driver_name = "aspeed-rsa", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct aspeed_acry_ctx), + }, + }, + }, +}; + +static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev) +{ + int i, rc; + + for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) { + aspeed_acry_akcipher_algs[i].acry_dev = acry_dev; + rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); + if (rc) { + ACRY_DBG(acry_dev, "Failed to register %s\n", + aspeed_acry_akcipher_algs[i].akcipher.base.cra_name); + } + } +} + +static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) + crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); +} + +/* ACRY interrupt service routine. */ +static irqreturn_t aspeed_acry_irq(int irq, void *dev) +{ + struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev; + u32 sts; + + sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS); + ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS); + + ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts); + + if (sts & ACRY_RSA_ISR) { + /* Stop RSA engine */ + ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER); + + if (acry_dev->flags & CRYPTO_FLAGS_BUSY) + tasklet_schedule(&acry_dev->done_task); + else + dev_err(acry_dev->dev, "RSA no active requests.\n"); + } + + return IRQ_HANDLED; +} + +/* + * ACRY SRAM has its own memory layout. + * Set the DRAM to SRAM indexing for future used. + */ +static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev) +{ + int i, j = 0; + + for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) { + acry_dev->exp_dw_mapping[i] = j; + acry_dev->mod_dw_mapping[i] = j + 4; + acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4; + acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1; + acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2; + acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3; + j++; + j = j % 4 ? j : j + 8; + } +} + +static void aspeed_acry_done_task(unsigned long data) +{ + struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data; + + (void)acry_dev->resume(acry_dev); +} + +static const struct of_device_id aspeed_acry_of_matches[] = { + { .compatible = "aspeed,ast2600-acry", }, + {}, +}; + +static int aspeed_acry_probe(struct platform_device *pdev) +{ + struct aspeed_acry_dev *acry_dev; + struct device *dev = &pdev->dev; + struct resource *res; + int rc; + + acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev), + GFP_KERNEL); + if (!acry_dev) + return -ENOMEM; + + acry_dev->dev = dev; + + platform_set_drvdata(pdev, acry_dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + acry_dev->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(acry_dev->regs)) + return PTR_ERR(acry_dev->regs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + acry_dev->acry_sram = devm_ioremap_resource(dev, res); + if (IS_ERR(acry_dev->acry_sram)) + return PTR_ERR(acry_dev->acry_sram); + + /* Get irq number and register it */ + acry_dev->irq = platform_get_irq(pdev, 0); + if (acry_dev->irq < 0) + return -ENXIO; + + rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0, + dev_name(dev), acry_dev); + if (rc) { + dev_err(dev, "Failed to request irq.\n"); + return rc; + } + + acry_dev->clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(acry_dev->clk)) { + dev_err(dev, "Failed to get acry clk\n"); + return PTR_ERR(acry_dev->clk); + } + + acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node, + "aspeed,ahbc"); + if (IS_ERR(acry_dev->ahbc)) { + dev_err(dev, "Failed to get AHBC regmap\n"); + return -ENODEV; + } + + /* Initialize crypto hardware engine structure for RSA */ + acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true); + if (!acry_dev->crypt_engine_rsa) { + rc = -ENOMEM; + goto clk_exit; + } + + rc = crypto_engine_start(acry_dev->crypt_engine_rsa); + if (rc) + goto err_engine_rsa_start; + + tasklet_init(&acry_dev->done_task, aspeed_acry_done_task, + (unsigned long)acry_dev); + + /* Set Data Memory to AHB(CPU) Access Mode */ + ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); + + /* Initialize ACRY SRAM index */ + aspeed_acry_sram_mapping(acry_dev); + + acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE, + &acry_dev->buf_dma_addr, + GFP_KERNEL); + memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); + + aspeed_acry_register(acry_dev); + + dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n"); + + return 0; + +err_engine_rsa_start: + crypto_engine_exit(acry_dev->crypt_engine_rsa); +clk_exit: + clk_disable_unprepare(acry_dev->clk); + + return rc; +} + +static int aspeed_acry_remove(struct platform_device *pdev) +{ + struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev); + + aspeed_acry_unregister(acry_dev); + crypto_engine_exit(acry_dev->crypt_engine_rsa); + tasklet_kill(&acry_dev->done_task); + clk_disable_unprepare(acry_dev->clk); + + return 0; +} + +MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); + +static struct platform_driver aspeed_acry_driver = { + .probe = aspeed_acry_probe, + .remove = aspeed_acry_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_acry_of_matches, + }, +}; + +module_platform_driver(aspeed_acry_driver); + +MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); +MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index 656cb92c8bb6..d2871e1de9c2 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -99,7 +99,6 @@ static int aspeed_hace_probe(struct platform_device *pdev) const struct of_device_id *hace_dev_id; struct aspeed_engine_hash *hash_engine; struct aspeed_hace_dev *hace_dev; - struct resource *res; int rc; hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), @@ -118,11 +117,9 @@ static int aspeed_hace_probe(struct platform_device *pdev) hash_engine = &hace_dev->hash_engine; crypto_engine = &hace_dev->crypto_engine; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - platform_set_drvdata(pdev, hace_dev); - hace_dev->regs = devm_ioremap_resource(&pdev->dev, res); + hace_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(hace_dev->regs)) return PTR_ERR(hace_dev->regs); diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h index f2cde23b56ae..05d0a15d546d 100644 --- a/drivers/crypto/aspeed/aspeed-hace.h +++ b/drivers/crypto/aspeed/aspeed-hace.h @@ -183,7 +183,7 @@ struct aspeed_sham_ctx { struct aspeed_hace_dev *hace_dev; unsigned long flags; /* hmac flag */ - struct aspeed_sha_hmac_ctx base[0]; + struct aspeed_sha_hmac_ctx base[]; }; struct aspeed_sham_reqctx { diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 886bf258544c..ed10f2ae4523 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -554,7 +554,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) } if (dd->is_async) - dd->areq->complete(dd->areq, err); + crypto_request_complete(dd->areq, err); tasklet_schedule(&dd->queue_task); @@ -955,7 +955,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, return ret; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); ctx = crypto_tfm_ctx(areq->tfm); @@ -1879,7 +1879,7 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err; - err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); + err = xts_verify_key(tfm, key, keylen); if (err) return err; @@ -2510,6 +2510,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xff0) { case 0x700: + case 0x600: case 0x500: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 12205e2b53b4..aac64b555204 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -313,11 +313,10 @@ static struct kpp_alg atmel_ecdh_nist_p256 = { static int atmel_ecc_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_client_get_device_id(client); struct atmel_i2c_client_priv *i2c_priv; int ret; - ret = atmel_i2c_probe(client, id); + ret = atmel_i2c_probe(client); if (ret) return ret; diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index 55bff1e13142..83a9093eff25 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -59,7 +59,7 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd) * Read the word from Configuration zone that contains the lock bytes * (UserExtra, Selector, LockValue, LockConfig). */ - cmd->param1 = CONFIG_ZONE; + cmd->param1 = CONFIGURATION_ZONE; cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR); cmd->count = READ_COUNT; @@ -324,7 +324,7 @@ free_cmd: return ret; } -int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) +int atmel_i2c_probe(struct i2c_client *client) { struct atmel_i2c_client_priv *i2c_priv; struct device *dev = &client->dev; diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h index 35f7857a7f7c..c0bd429ee2c7 100644 --- a/drivers/crypto/atmel-i2c.h +++ b/drivers/crypto/atmel-i2c.h @@ -63,7 +63,7 @@ struct atmel_i2c_cmd { #define STATUS_WAKE_SUCCESSFUL 0x11 /* Definitions for eeprom organization */ -#define CONFIG_ZONE 0 +#define CONFIGURATION_ZONE 0 /* Definitions for Indexes common to all commands */ #define RSP_DATA_IDX 1 /* buffer index of data in response */ @@ -167,7 +167,7 @@ struct atmel_i2c_work_data { struct atmel_i2c_cmd cmd; }; -int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); +int atmel_i2c_probe(struct i2c_client *client); void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, void (*cbk)(struct atmel_i2c_work_data *work_data, diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index ca4b01926d1b..e7c1db2739ec 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -292,7 +292,7 @@ static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err) clk_disable(dd->iclk); if ((dd->is_async || dd->force_complete) && req->base.complete) - req->base.complete(&req->base, err); + ahash_request_complete(req, err); /* handle new request */ tasklet_schedule(&dd->queue_task); @@ -1080,7 +1080,7 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, return ret; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); ctx = crypto_tfm_ctx(async_req->tfm); @@ -2099,10 +2099,9 @@ struct atmel_sha_authenc_reqctx { unsigned int digestlen; }; -static void atmel_sha_authenc_complete(struct crypto_async_request *areq, - int err) +static void atmel_sha_authenc_complete(void *data, int err) { - struct ahash_request *req = areq->data; + struct ahash_request *req = data; struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); @@ -2509,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xff0) { case 0x700: + case 0x600: case 0x510: dd->caps.has_dma = 1; dd->caps.has_dualbuff = 1; diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 272a06f0b588..4403dbb0f0b1 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -93,11 +93,10 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, static int atmel_sha204a_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_client_get_device_id(client); struct atmel_i2c_client_priv *i2c_priv; int ret; - ret = atmel_i2c_probe(client, id); + ret = atmel_i2c_probe(client); if (ret) return ret; diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 8b7bc1076e0d..b2d48c1649b9 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -590,7 +590,7 @@ static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err) if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB) atmel_tdes_set_iv_as_last_ciphertext_block(dd); - req->base.complete(&req->base, err); + skcipher_request_complete(req, err); } static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, @@ -619,7 +619,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd, return ret; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); req = skcipher_request_cast(async_req); diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 51c66afbe677..8493a45e1bd4 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1621,7 +1621,7 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, crypto_skcipher_ctx(cipher); int ret; - ret = xts_check_key(&cipher->base, key, keylen); + ret = xts_verify_key(cipher, key, keylen); if (ret) return ret; @@ -2143,13 +2143,13 @@ static void artpec6_crypto_task(unsigned long data) list_for_each_entry_safe(req, n, &complete_in_progress, complete_in_progress) { - req->req->complete(req->req, -EINPROGRESS); + crypto_request_complete(req->req, -EINPROGRESS); } } static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) { - req->complete(req, 0); + crypto_request_complete(req, 0); } static void @@ -2161,7 +2161,7 @@ artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req) scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src, cipher_req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); - req->complete(req, 0); + skcipher_request_complete(cipher_req, 0); } static void @@ -2173,7 +2173,7 @@ artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req) scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst, cipher_req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); - req->complete(req, 0); + skcipher_request_complete(cipher_req, 0); } static void artpec6_crypto_complete_aead(struct crypto_async_request *req) @@ -2211,12 +2211,12 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req) } } - req->complete(req, result); + aead_request_complete(areq, result); } static void artpec6_crypto_complete_hash(struct crypto_async_request *req) { - req->complete(req, 0); + crypto_request_complete(req, 0); } diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c8c799428fe0..70b911baab26 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1614,7 +1614,7 @@ static void finish_req(struct iproc_reqctx_s *rctx, int err) spu_chunk_cleanup(rctx); if (areq) - areq->complete(areq, err); + crypto_request_complete(areq, err); } /** @@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req) return payload_len > ctx->max_payload; } -static void aead_complete(struct crypto_async_request *areq, int err) -{ - struct aead_request *req = - container_of(areq, struct aead_request, base); - struct iproc_reqctx_s *rctx = aead_request_ctx(req); - struct crypto_aead *aead = crypto_aead_reqtfm(req); - - flow_log("%s() err:%d\n", __func__, err); - - areq->tfm = crypto_aead_tfm(aead); - - areq->complete = rctx->old_complete; - areq->data = rctx->old_data; - - areq->complete(areq, err); -} - static int aead_do_fallback(struct aead_request *req, bool is_encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); - int err; - u32 req_flags; + struct aead_request *subreq; flow_log("%s() enc:%u\n", __func__, is_encrypt); - if (ctx->fallback_cipher) { - /* Store the cipher tfm and then use the fallback tfm */ - rctx->old_tfm = tfm; - aead_request_set_tfm(req, ctx->fallback_cipher); - /* - * Save the callback and chain ourselves in, so we can restore - * the tfm - */ - rctx->old_complete = req->base.complete; - rctx->old_data = req->base.data; - req_flags = aead_request_flags(req); - aead_request_set_callback(req, req_flags, aead_complete, req); - err = is_encrypt ? crypto_aead_encrypt(req) : - crypto_aead_decrypt(req); - - if (err == 0) { - /* - * fallback was synchronous (did not return - * -EINPROGRESS). So restore request state here. - */ - aead_request_set_callback(req, req_flags, - rctx->old_complete, req); - req->base.data = rctx->old_data; - aead_request_set_tfm(req, aead); - flow_log("%s() fallback completed successfully\n\n", - __func__); - } - } else { - err = -EINVAL; - } + if (!ctx->fallback_cipher) + return -EINVAL; - return err; + subreq = &rctx->req; + aead_request_set_tfm(subreq, ctx->fallback_cipher); + aead_request_set_callback(subreq, aead_request_flags(req), + req->base.complete, req->base.data); + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + req->iv); + aead_request_set_ad(subreq, req->assoclen); + + return is_encrypt ? crypto_aead_encrypt(req) : + crypto_aead_decrypt(req); } static int aead_enqueue(struct aead_request *req, bool is_encrypt) @@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm) static int aead_cra_init(struct crypto_aead *aead) { + unsigned int reqsize = sizeof(struct iproc_reqctx_s); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; @@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead) flow_log("%s()\n", __func__); - crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s)); ctx->is_esp = false; ctx->salt_len = 0; ctx->salt_offset = 0; @@ -4263,22 +4226,29 @@ static int aead_cra_init(struct crypto_aead *aead) get_random_bytes(ctx->iv, MAX_IV_SIZE); flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); - if (!err) { - if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { - flow_log("%s() creating fallback cipher\n", __func__); - - ctx->fallback_cipher = - crypto_alloc_aead(alg->cra_name, 0, - CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->fallback_cipher)) { - pr_err("%s() Error: failed to allocate fallback for %s\n", - __func__, alg->cra_name); - return PTR_ERR(ctx->fallback_cipher); - } - } + if (err) + goto out; + + if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK)) + goto reqsize; + + flow_log("%s() creating fallback cipher\n", __func__); + + ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_cipher)) { + pr_err("%s() Error: failed to allocate fallback for %s\n", + __func__, alg->cra_name); + return PTR_ERR(ctx->fallback_cipher); } + reqsize += crypto_aead_reqsize(ctx->fallback_cipher); + +reqsize: + crypto_aead_set_reqsize(aead, reqsize); + +out: return err; } diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index d6d87332140a..e36881c983cf 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -339,15 +339,12 @@ struct iproc_reqctx_s { /* hmac context */ bool is_sw_hmac; - /* aead context */ - struct crypto_tfm *old_tfm; - crypto_completion_t old_complete; - void *old_data; - gfp_t gfp; /* Buffers used to build SPU request and response messages */ struct spu_msg_buf msg_buf; + + struct aead_request req; }; /* diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c index f46b161d2cda..87781c1534ee 100644 --- a/drivers/crypto/caam/blob_gen.c +++ b/drivers/crypto/caam/blob_gen.c @@ -83,7 +83,7 @@ int caam_process_blob(struct caam_blob_priv *priv, output_len = info->input_len - CAAM_BLOB_OVERHEAD; } - desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL | GFP_DMA); + desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL); if (!desc) return -ENOMEM; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ecc15bc521db..4a9b998a8d26 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -59,6 +59,8 @@ #include <crypto/engine.h> #include <crypto/xts.h> #include <asm/unaligned.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> /* * crypto alg @@ -1379,8 +1381,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 0, 0, 0); @@ -1608,6 +1609,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; + unsigned int aligned_size; src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { @@ -1681,15 +1683,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, /* * allocate space for base edesc and hw desc commands, link tables, IV */ - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, - GFP_DMA | flags); - if (!edesc) { + aligned_size = ALIGN(ivsize, __alignof__(*edesc)); + aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes; + aligned_size = ALIGN(aligned_size, dma_get_cache_alignment()); + iv = kzalloc(aligned_size, flags); + if (!iv) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } + edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc))); edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->mapped_src_nents = mapped_src_nents; @@ -1701,7 +1706,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, /* Make sure IV is located in a DMAable area */ if (ivsize) { - iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index c37b67be0492..5e218bf20d5b 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -20,6 +20,8 @@ #include "caamalg_desc.h" #include <crypto/xts.h> #include <asm/unaligned.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> /* * crypto alg @@ -959,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return (struct aead_edesc *)drv_ctx; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = qi_cache_alloc(GFP_DMA | flags); + edesc = qi_cache_alloc(flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -1317,8 +1319,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); - if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + - ivsize > CAAM_QI_MEMCACHE_SIZE)) { + if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) + + offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes > + CAAM_QI_MEMCACHE_SIZE)) { dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, @@ -1327,17 +1330,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, } /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_alloc(GFP_DMA | flags); - if (unlikely(!edesc)) { + iv = qi_cache_alloc(flags); + if (unlikely(!iv)) { dev_err(qidev, "could not allocate extended descriptor\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } + edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc))); + /* Make sure IV is located in a DMAable area */ sg_table = &edesc->sgt[0]; - iv = (u8 *)(sg_table + qm_sg_ents); memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 1b0dd742c53f..5c8d35edaa1c 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -16,7 +16,9 @@ #include "caamalg_desc.h" #include "caamhash_desc.h" #include "dpseci-debugfs.h" +#include <linux/dma-mapping.h> #include <linux/fsl/mc.h> +#include <linux/kernel.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> #include <crypto/xts.h> @@ -370,7 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, struct dpaa2_sg_entry *sg_table; /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -1189,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) } /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, @@ -3220,14 +3222,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, int ret = -ENOMEM; struct dpaa2_fl_entry *in_fle, *out_fle; - req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA); + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL); if (!req_ctx) return -ENOMEM; in_fle = &req_ctx->fd_flt[1]; out_fle = &req_ctx->fd_flt[0]; - flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA); + flc = kzalloc(sizeof(*flc), GFP_KERNEL); if (!flc) goto err_flc; @@ -3316,7 +3318,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); if (keylen > blocksize) { - hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + unsigned int aligned_len = + ALIGN(keylen, dma_get_cache_alignment()); + + if (aligned_len < keylen) + return -EOVERFLOW; + + hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); @@ -3411,7 +3419,7 @@ static void ahash_done(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_bi(void *cbk_ctx, u32 status) @@ -3449,7 +3457,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_ctx_src(void *cbk_ctx, u32 status) @@ -3476,7 +3484,7 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) @@ -3514,7 +3522,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); } static int ahash_update_ctx(struct ahash_request *req) @@ -3560,7 +3568,7 @@ static int ahash_update_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -3654,7 +3662,7 @@ static int ahash_final_ctx(struct ahash_request *req) int ret; /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) return -ENOMEM; @@ -3743,7 +3751,7 @@ static int ahash_finup_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; @@ -3836,7 +3844,7 @@ static int ahash_digest(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return ret; @@ -3913,7 +3921,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) int ret = -ENOMEM; /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) return ret; @@ -4012,7 +4020,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -4125,7 +4133,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return ret; @@ -4230,7 +4238,7 @@ static int ahash_update_first(struct ahash_request *req) } /* allocate space for base edesc and link tables */ - edesc = qi_cache_zalloc(GFP_DMA | flags); + edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); @@ -4926,6 +4934,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, { struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; struct device *dev = priv->dev; + unsigned int alignmask; int err; /* @@ -4936,13 +4945,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) return 0; - priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, - GFP_KERNEL | GFP_DMA); + alignmask = DPAA2_CSCN_ALIGN - 1; + alignmask |= dma_get_cache_alignment() - 1; + priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), + GFP_KERNEL); if (!priv->cscn_mem) return -ENOMEM; - priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); - priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, priv->cscn_dma)) { dev_err(dev, "Error mapping CSCN memory area\n"); @@ -5174,7 +5184,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) priv->domain = iommu_get_domain_for_dev(dev); qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, - 0, SLAB_CACHE_DMA, NULL); + 0, 0, NULL); if (!qi_cache) { dev_err(dev, "Can't allocate SEC cache\n"); return -ENOMEM; @@ -5451,7 +5461,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); - if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { dev_dbg_ratelimited(dev, "Dropping request\n"); return -EBUSY; } diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index d35253407ade..abb502bb675c 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -7,13 +7,14 @@ #ifndef _CAAMALG_QI2_H_ #define _CAAMALG_QI2_H_ +#include <crypto/internal/skcipher.h> +#include <linux/compiler_attributes.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> #include <linux/threads.h> #include <linux/netdevice.h> #include "dpseci.h" #include "desc_constr.h" -#include <crypto/skcipher.h> #define DPAA2_CAAM_STORE_SIZE 16 /* NAPI weight *must* be a multiple of the store size. */ @@ -36,8 +37,6 @@ * @tx_queue_attr: array of Tx queue attributes * @cscn_mem: pointer to memory region containing the congestion SCN * it's size is larger than to accommodate alignment - * @cscn_mem_aligned: pointer to congestion SCN; it is computed as - * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN) * @cscn_dma: dma address used by the QMAN to write CSCN messages * @dev: device associated with the DPSECI object * @mc_io: pointer to MC portal's I/O object @@ -58,7 +57,6 @@ struct dpaa2_caam_priv { /* congestion */ void *cscn_mem; - void *cscn_mem_aligned; dma_addr_t cscn_dma; struct device *dev; @@ -158,7 +156,7 @@ struct ahash_edesc { struct caam_flc { u32 flc[16]; u32 sh_desc[MAX_SDLEN]; -} ____cacheline_aligned; +} __aligned(CRYPTO_DMA_ALIGN); enum optype { ENCRYPT = 0, @@ -180,7 +178,7 @@ enum optype { * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc */ struct caam_request { - struct dpaa2_fl_entry fd_flt[2]; + struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN); dma_addr_t fd_flt_dma; struct caam_flc *flc; dma_addr_t flc_dma; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 1050e965a438..82d3c730a502 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -66,6 +66,8 @@ #include "key_gen.h" #include "caamhash_desc.h" #include <crypto/engine.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> #define CAAM_CRA_PRIORITY 3000 @@ -365,7 +367,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, dma_addr_t key_dma; int ret; - desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); return -ENOMEM; @@ -432,7 +434,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, dev_dbg(jrdev, "keylen %d\n", keylen); if (keylen > blocksize) { - hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); + unsigned int aligned_len = + ALIGN(keylen, dma_get_cache_alignment()); + + if (aligned_len < keylen) + return -EOVERFLOW; + + hashed_key = kmemdup(key, keylen, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); @@ -606,7 +614,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, * by CAAM, not crypto engine. */ if (!has_bklog) - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); } @@ -668,7 +676,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, * by CAAM, not crypto engine. */ if (!has_bklog) - req->base.complete(&req->base, ecode); + ahash_request_complete(req, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); @@ -702,7 +710,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, struct ahash_edesc *edesc; unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); - edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sg_size, flags); if (!edesc) { dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); return NULL; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index aef031946f33..e40614fef39d 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -16,6 +16,8 @@ #include "desc_constr.h" #include "sg_sw_sec4.h" #include "caampkc.h" +#include <linux/dma-mapping.h> +#include <linux/kernel.h> #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ @@ -310,8 +312,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc, hw desc commands and link tables */ - edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags); if (!edesc) goto dst_fail; @@ -898,7 +899,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) if (!nbytes) return NULL; - dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); + dst = kzalloc(dstlen, GFP_KERNEL); if (!dst) return NULL; @@ -910,7 +911,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) /** * caam_read_raw_data - Read a raw byte stream as a positive integer. * The function skips buffer's leading zeros, copies the remained data - * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns + * to a buffer allocated in the GFP_KERNEL zone and returns * the address of the new buffer. * * @buf : The data to read @@ -923,7 +924,7 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) if (!*nbytes) return NULL; - return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL); + return kmemdup(buf, *nbytes, GFP_KERNEL); } static int caam_rsa_check_key_length(unsigned int len) @@ -949,13 +950,13 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, return ret; /* Copy key in DMA zone */ - rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); if (!rsa_key->e) goto err; /* * Skip leading zeros and copy the positive integer to a buffer - * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * allocated in the GFP_KERNEL zone. The decryption descriptor * expects a positive integer for the RSA modulus and uses its length as * decryption output length. */ @@ -983,6 +984,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, struct caam_rsa_key *rsa_key = &ctx->key; size_t p_sz = raw_key->p_sz; size_t q_sz = raw_key->q_sz; + unsigned aligned_size; rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); if (!rsa_key->p) @@ -994,11 +996,13 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, goto free_p; rsa_key->q_sz = q_sz; - rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); + aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment()); + rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL); if (!rsa_key->tmp1) goto free_q; - rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); + aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment()); + rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL); if (!rsa_key->tmp2) goto free_tmp1; @@ -1051,17 +1055,17 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, return ret; /* Copy key in DMA zone */ - rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL); + rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL); if (!rsa_key->d) goto err; - rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL); + rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); if (!rsa_key->e) goto err; /* * Skip leading zeros and copy the positive integer to a buffer - * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor + * allocated in the GFP_KERNEL zone. The decryption descriptor * expects a positive integer for the RSA modulus and uses its length as * decryption output length. */ @@ -1185,8 +1189,7 @@ int caam_pkc_init(struct device *ctrldev) return 0; /* allocate zero buffer, used for padding input */ - zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | - GFP_KERNEL); + zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL); if (!zero_buffer) return -ENOMEM; diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c index 4839e66300a2..6e4c1191cb28 100644 --- a/drivers/crypto/caam/caamprng.c +++ b/drivers/crypto/caam/caamprng.c @@ -8,6 +8,8 @@ #include <linux/completion.h> #include <crypto/internal/rng.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> #include "compat.h" #include "regs.h" #include "intern.h" @@ -75,6 +77,7 @@ static int caam_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { + unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment()); struct caam_prng_ctx ctx; struct device *jrdev; dma_addr_t dst_dma; @@ -82,7 +85,10 @@ static int caam_prng_generate(struct crypto_rng *tfm, u8 *buf; int ret; - buf = kzalloc(dlen, GFP_KERNEL); + if (aligned_dlen < dlen) + return -EOVERFLOW; + + buf = kzalloc(aligned_dlen, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -94,7 +100,7 @@ static int caam_prng_generate(struct crypto_rng *tfm, return ret; } - desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); if (!desc) { ret = -ENOMEM; goto out1; @@ -156,7 +162,7 @@ static int caam_prng_seed(struct crypto_rng *tfm, return ret; } - desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); if (!desc) { caam_jr_free(jrdev); return -ENOMEM; diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 1f0e82050976..1fd8ff965006 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -12,6 +12,8 @@ #include <linux/hw_random.h> #include <linux/completion.h> #include <linux/atomic.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> #include <linux/kfifo.h> #include "compat.h" @@ -176,17 +178,18 @@ static int caam_init(struct hwrng *rng) int err; ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, - GFP_DMA | GFP_KERNEL); + GFP_KERNEL); if (!ctx->desc_sync) return -ENOMEM; ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, - GFP_DMA | GFP_KERNEL); + GFP_KERNEL); if (!ctx->desc_async) return -ENOMEM; - if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE, - GFP_DMA | GFP_KERNEL)) + if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE, + dma_get_cache_alignment()), + GFP_KERNEL)) return -ENOMEM; INIT_WORK(&ctx->worker, caam_rng_worker); diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 32253a064d0f..6278afb951c3 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -199,7 +199,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) u32 *desc, status; int sh_idx, ret = 0; - desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA); + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); if (!desc) return -ENOMEM; @@ -276,7 +276,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, int ret = 0, sh_idx; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; - desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA); + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); if (!desc) return -ENOMEM; diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 62ce6421bb3f..824c94d44f94 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -163,7 +163,8 @@ static inline void append_data(u32 * const desc, const void *data, int len) { u32 *offset = desc_end(desc); - if (len) /* avoid sparse warning: memcpy with byte count of 0 */ + /* Avoid gcc warning: memcpy with data == NULL */ + if (!IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG) || data) memcpy(offset, data, len); (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index b0e8a4939b4f..88cc4fe2a585 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -64,7 +64,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, if (local_max > max_keylen) return -EINVAL; - desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); + desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); return ret; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index c36f27376d7e..4c52c9365558 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -614,7 +614,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) struct qman_fq *fq; int ret; - fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); + fq = kzalloc(sizeof(*fq), GFP_KERNEL); if (!fq) return -ENOMEM; @@ -756,7 +756,7 @@ int caam_qi_init(struct platform_device *caam_pdev) } qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); free_rsp_fqs(); diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h index 5894f16f8fe3..a96e3d213c06 100644 --- a/drivers/crypto/caam/qi.h +++ b/drivers/crypto/caam/qi.h @@ -9,6 +9,8 @@ #ifndef __QI_H__ #define __QI_H__ +#include <crypto/algapi.h> +#include <linux/compiler_attributes.h> #include <soc/fsl/qman.h> #include "compat.h" #include "desc.h" @@ -58,8 +60,10 @@ enum optype { * @qidev: device pointer for CAAM/QI backend */ struct caam_drv_ctx { - u32 prehdr[2]; - u32 sh_desc[MAX_SDLEN]; + struct { + u32 prehdr[2]; + u32 sh_desc[MAX_SDLEN]; + } __aligned(CRYPTO_DMA_ALIGN); dma_addr_t context_a; struct qman_fq *req_fq; struct qman_fq *rsp_fq; @@ -67,7 +71,7 @@ struct caam_drv_ctx { int cpu; enum optype op_type; struct device *qidev; -} ____cacheline_aligned; +}; /** * caam_drv_req - The request structure the driver application should fill while @@ -88,7 +92,7 @@ struct caam_drv_req { struct caam_drv_ctx *drv_ctx; caam_qi_cbk cbk; void *app_ctx; -} ____cacheline_aligned; +} __aligned(CRYPTO_DMA_ALIGN); /** * caam_drv_ctx_init - Initialise a CAAM/QI driver context diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 9eca0c302186..ee476c6c7f82 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -28,7 +28,7 @@ static void cvm_callback(u32 status, void *arg) { struct crypto_async_request *req = (struct crypto_async_request *)arg; - req->complete(req, !status); + crypto_request_complete(req, !status); } static inline void update_input_iv(struct cpt_request_info *req_info, @@ -232,13 +232,12 @@ static int cvm_decrypt(struct skcipher_request *req) static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); + struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher); int err; const u8 *key1 = key; const u8 *key2 = key + (keylen / 2); - err = xts_check_key(tfm, key, keylen); + err = xts_verify_key(cipher, key, keylen); if (err) return err; ctx->key_len = keylen; @@ -289,8 +288,7 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen) static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen, u8 cipher_type) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); + struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher); ctx->cipher_type = cipher_type; if (!cvm_validate_keylen(ctx, keylen)) { diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c index 0653484df23f..b0e53034164a 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_aead.c +++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c @@ -199,7 +199,7 @@ static void nitrox_aead_callback(void *arg, int err) err = -EINVAL; } - areq->base.complete(&areq->base, err); + aead_request_complete(areq, err); } static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen) @@ -434,7 +434,7 @@ static void nitrox_rfc4106_callback(void *arg, int err) err = -EINVAL; } - areq->base.complete(&areq->base, err); + aead_request_complete(areq, err); } static int nitrox_rfc4106_enc(struct aead_request *areq) diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c index 248b4fff1c72..138261dcd032 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c +++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c @@ -337,12 +337,11 @@ static int nitrox_3des_decrypt(struct skcipher_request *skreq) static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); struct flexi_crypto_context *fctx; int aes_keylen, ret; - ret = xts_check_key(tfm, key, keylen); + ret = xts_verify_key(cipher, key, keylen); if (ret) return ret; @@ -362,8 +361,7 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); struct flexi_crypto_context *fctx; int aes_keylen; diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 73442a382f68..ecd58b38c46e 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -146,7 +146,7 @@ static void ccp_crypto_complete(void *data, int err) /* Only propagate the -EINPROGRESS if necessary */ if (crypto_cmd->ret == -EBUSY) { crypto_cmd->ret = -EINPROGRESS; - req->complete(req, -EINPROGRESS); + crypto_request_complete(req, -EINPROGRESS); } return; @@ -159,18 +159,18 @@ static void ccp_crypto_complete(void *data, int err) held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); if (backlog) { backlog->ret = -EINPROGRESS; - backlog->req->complete(backlog->req, -EINPROGRESS); + crypto_request_complete(backlog->req, -EINPROGRESS); } /* Transition the state from -EBUSY to -EINPROGRESS first */ if (crypto_cmd->ret == -EBUSY) - req->complete(req, -EINPROGRESS); + crypto_request_complete(req, -EINPROGRESS); /* Completion callbacks */ ret = err; if (ctx->complete) ret = ctx->complete(req, ret); - req->complete(req, ret); + crypto_request_complete(req, ret); /* Submit the next cmd */ while (held) { @@ -186,12 +186,12 @@ static void ccp_crypto_complete(void *data, int err) ctx = crypto_tfm_ctx_dma(held->req->tfm); if (ctx->complete) ret = ctx->complete(held->req, ret); - held->req->complete(held->req, ret); + crypto_request_complete(held->req, ret); next = ccp_crypto_cmd_complete(held, &backlog); if (backlog) { backlog->ret = -EINPROGRESS; - backlog->req->complete(backlog->req, -EINPROGRESS); + crypto_request_complete(backlog->req, -EINPROGRESS); } kfree(held); diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 9f753cb4f5f1..b386a7063818 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp) chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; - if (dma_chan->client_count) - dma_release_channel(dma_chan); - tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } } +static void ccp_dma_release_channels(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + + if (dma_chan->client_count) + dma_release_channel(dma_chan); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) if (!dmaengine) return; - ccp_dma_release(ccp); + ccp_dma_release_channels(ccp); dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 06fc7156c04f..e2f25926eb51 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,6 +26,7 @@ #include <linux/fs_struct.h> #include <asm/smp.h> +#include <asm/cacheflush.h> #include "psp-dev.h" #include "sev-dev.h" @@ -56,6 +57,7 @@ MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on m MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ +MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; @@ -881,7 +883,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) input_address = (void __user *)input.address; if (input.address && input.length) { - id_blob = kzalloc(input.length, GFP_KERNEL); + /* + * The length of the ID shouldn't be assumed by software since + * it may change in the future. The allocation size is limited + * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator. + * If the allocation fails, simply return ENOMEM rather than + * warning in the kernel log. + */ + id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); if (!id_blob) return -ENOMEM; @@ -1327,7 +1336,10 @@ void sev_pci_init(void) /* Obtain the TMR memory area for SEV-ES use */ sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); - if (!sev_es_tmr) + if (sev_es_tmr) + /* Must flush the cache before giving it to the firmware */ + clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); + else dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 084d052fddcc..cde33b2ac71b 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -342,52 +342,52 @@ static int __maybe_unused sp_pci_resume(struct device *dev) #ifdef CONFIG_CRYPTO_DEV_SP_PSP static const struct sev_vdata sevv1 = { - .cmdresp_reg = 0x10580, - .cmdbuff_addr_lo_reg = 0x105e0, - .cmdbuff_addr_hi_reg = 0x105e4, + .cmdresp_reg = 0x10580, /* C2PMSG_32 */ + .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ + .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ }; static const struct sev_vdata sevv2 = { - .cmdresp_reg = 0x10980, - .cmdbuff_addr_lo_reg = 0x109e0, - .cmdbuff_addr_hi_reg = 0x109e4, + .cmdresp_reg = 0x10980, /* C2PMSG_32 */ + .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */ + .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; static const struct tee_vdata teev1 = { - .cmdresp_reg = 0x10544, - .cmdbuff_addr_lo_reg = 0x10548, - .cmdbuff_addr_hi_reg = 0x1054c, - .ring_wptr_reg = 0x10550, - .ring_rptr_reg = 0x10554, + .cmdresp_reg = 0x10544, /* C2PMSG_17 */ + .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ + .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */ + .ring_wptr_reg = 0x10550, /* C2PMSG_20 */ + .ring_rptr_reg = 0x10554, /* C2PMSG_21 */ }; static const struct psp_vdata pspv1 = { .sev = &sevv1, - .feature_reg = 0x105fc, - .inten_reg = 0x10610, - .intsts_reg = 0x10614, + .feature_reg = 0x105fc, /* C2PMSG_63 */ + .inten_reg = 0x10610, /* P2CMSG_INTEN */ + .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv2 = { .sev = &sevv2, - .feature_reg = 0x109fc, - .inten_reg = 0x10690, - .intsts_reg = 0x10694, + .feature_reg = 0x109fc, /* C2PMSG_63 */ + .inten_reg = 0x10690, /* P2CMSG_INTEN */ + .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv3 = { .tee = &teev1, - .feature_reg = 0x109fc, - .inten_reg = 0x10690, - .intsts_reg = 0x10694, + .feature_reg = 0x109fc, /* C2PMSG_63 */ + .inten_reg = 0x10690, /* P2CMSG_INTEN */ + .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv4 = { .sev = &sevv2, .tee = &teev1, - .feature_reg = 0x109fc, - .inten_reg = 0x10690, - .intsts_reg = 0x10694, + .feature_reg = 0x109fc, /* C2PMSG_63 */ + .inten_reg = 0x10690, /* P2CMSG_INTEN */ + .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ }; #endif diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 309da6334a0a..2cd44d7457a4 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -460,7 +460,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, } if (ctx_p->cipher_mode == DRV_CIPHER_XTS && - xts_check_key(tfm, key, keylen)) { + xts_verify_key(sktfm, key, keylen)) { dev_dbg(dev, "weak XTS key"); return -EINVAL; } diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 68d65773ef2b..0eade4fa6695 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -220,7 +220,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req, reqctx->verify = VERIFY_HW; } chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + aead_request_complete(req, err); return err; } @@ -1235,7 +1235,7 @@ complete: complete(&ctx->cbc_aes_aio_done); } chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + skcipher_request_complete(req, err); return err; } @@ -2132,7 +2132,7 @@ unmap: out: chcr_dec_wrcount(dev); - req->base.complete(&req->base, err); + ahash_request_complete(req, err); } /* diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 7e7a8f01ea6b..5a7f6611803c 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1705,7 +1705,7 @@ static void hifn_process_ready(struct skcipher_request *req, int error) hifn_cipher_walk_exit(&rctx->walk); } - req->base.complete(&req->base, error); + skcipher_request_complete(req, error); } static void hifn_clear_rings(struct hifn_device *dev, int error) @@ -2054,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev) break; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); req = skcipher_request_cast(async_req); diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 743ce4fc3158..4137a8bf131f 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -27,7 +27,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_SM4_GENERIC - depends on PCI && PCI_MSI + depends on PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) depends on ACPI @@ -42,7 +42,7 @@ config CRYPTO_DEV_HISI_SEC2 config CRYPTO_DEV_HISI_QM tristate depends on ARM64 || COMPILE_TEST - depends on PCI && PCI_MSI + depends on PCI_MSI depends on UACCE || UACCE=n depends on ACPI help @@ -51,7 +51,7 @@ config CRYPTO_DEV_HISI_QM config CRYPTO_DEV_HISI_ZIP tristate "Support for HiSilicon ZIP accelerator" - depends on PCI && PCI_MSI + depends on PCI_MSI depends on ARM64 || (COMPILE_TEST && 64BIT) depends on !CPU_BIG_ENDIAN || COMPILE_TEST depends on UACCE || UACCE=n @@ -62,7 +62,7 @@ config CRYPTO_DEV_HISI_ZIP config CRYPTO_DEV_HISI_HPRE tristate "Support for HISI HPRE accelerator" - depends on PCI && PCI_MSI + depends on PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) depends on ACPI diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 007ac7a69ce7..59823ad1d9ae 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -95,8 +95,6 @@ #define QM_VFT_CFG_RDY 0x10006c #define QM_VFT_CFG_OP_WR 0x100058 #define QM_VFT_CFG_TYPE 0x10005c -#define QM_SQC_VFT 0x0 -#define QM_CQC_VFT 0x1 #define QM_VFT_CFG 0x100060 #define QM_VFT_CFG_OP_ENABLE 0x100054 #define QM_PM_CTRL 0x100148 @@ -118,7 +116,7 @@ #define QM_SQC_VFT_BASE_SHIFT_V2 28 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) #define QM_SQC_VFT_NUM_SHIFT_V2 45 -#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0) +#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) #define QM_ABNORMAL_INT_SOURCE 0x100000 #define QM_ABNORMAL_INT_MASK 0x100004 @@ -164,7 +162,6 @@ /* interfunction communication */ #define QM_IFC_READY_STATUS 0x100128 -#define QM_IFC_C_STS_M 0x10012C #define QM_IFC_INT_SET_P 0x100130 #define QM_IFC_INT_CFG 0x100134 #define QM_IFC_INT_SOURCE_P 0x100138 @@ -198,7 +195,6 @@ #define PCI_BAR_2 2 #define PCI_BAR_4 4 -#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_READ_LEN 256 @@ -212,8 +208,6 @@ #define QM_DRIVER_REMOVING 0 #define QM_RST_SCHED 1 #define QM_QOS_PARAM_NUM 2 -#define QM_QOS_VAL_NUM 1 -#define QM_QOS_BDF_PARAM_NUM 4 #define QM_QOS_MAX_VAL 1000 #define QM_QOS_RATE 100 #define QM_QOS_EXPAND_RATE 1000 @@ -225,38 +219,34 @@ #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 #define QM_SHAPER_CBS_B 1 -#define QM_SHAPER_CBS_S 16 #define QM_SHAPER_VFT_OFFSET 6 -#define WAIT_FOR_QOS_VF 100 #define QM_QOS_MIN_ERROR_RATE 5 -#define QM_QOS_TYPICAL_NUM 8 #define QM_SHAPER_MIN_CBS_S 8 #define QM_QOS_TICK 0x300U #define QM_QOS_DIVISOR_CLK 0x1f40U #define QM_QOS_MAX_CIR_B 200 #define QM_QOS_MIN_CIR_B 100 #define QM_QOS_MAX_CIR_U 6 -#define QM_QOS_MAX_CIR_S 11 #define QM_AUTOSUSPEND_DELAY 3000 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ - (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ - ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ - ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ + (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_SQC_W13(priority, orders, alg_type) \ - (((priority) << QM_SQ_PRIORITY_SHIFT) | \ - ((orders) << QM_SQ_ORDERS_SHIFT) | \ + (((priority) << QM_SQ_PRIORITY_SHIFT) | \ + ((orders) << QM_SQ_ORDERS_SHIFT) | \ (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ - (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ - ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ - ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ + (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ + ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ + ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ @@ -706,7 +696,7 @@ static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | ((u64)randata << QM_DB_RAND_SHIFT_V2) | - ((u64)index << QM_DB_INDEX_SHIFT_V2) | + ((u64)index << QM_DB_INDEX_SHIFT_V2) | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); writeq(doorbell, io_base); @@ -905,7 +895,7 @@ static void qm_work_process(struct work_struct *work) } } -static bool do_qm_irq(struct hisi_qm *qm) +static bool do_qm_eq_irq(struct hisi_qm *qm) { struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; struct hisi_qm_poll_data *poll_data; @@ -925,12 +915,12 @@ static bool do_qm_irq(struct hisi_qm *qm) return false; } -static irqreturn_t qm_irq(int irq, void *data) +static irqreturn_t qm_eq_irq(int irq, void *data) { struct hisi_qm *qm = data; bool ret; - ret = do_qm_irq(qm); + ret = do_qm_eq_irq(qm); if (ret) return IRQ_HANDLED; @@ -1304,7 +1294,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); - *number = (QM_SQC_VFT_NUM_MASK_v2 & + *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; return 0; @@ -1892,8 +1882,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) * @qm: The qm we create a qp from. * @alg_type: Accelerator specific algorithm type in sqc. * - * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating - * qp memory fails. + * Return created qp, negative error code if failed. */ static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) { @@ -2062,7 +2051,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) * @arg: Accelerator specific argument. * * After this function, qp can receive request from user. Return 0 if - * successful, Return -EBUSY if failed. + * successful, negative error code if failed. */ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) { @@ -3074,7 +3063,6 @@ static int qm_stop_started_qp(struct hisi_qm *qm) return 0; } - /** * qm_clear_queues() - Clear all queues memory in a qm. * @qm: The qm in which the queues will be cleared. @@ -3371,7 +3359,7 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) act_q_num = q_num; } - act_q_num = min_t(int, act_q_num, max_qp_num); + act_q_num = min(act_q_num, max_qp_num); ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); if (ret) { for (j = num_vfs; j > i; j--) @@ -3558,7 +3546,7 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf, qos_val = ir / QM_QOS_RATE; ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); - ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); + ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); err_get_status: clear_bit(QM_RESETTING, &qm->misc_ctl); @@ -4049,13 +4037,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) if (!qm->err_status.is_dev_ecc_mbit && qm->err_status.is_qm_ecc_mbit && qm->err_ini->close_axi_master_ooo) { - qm->err_ini->close_axi_master_ooo(qm); - } else if (qm->err_status.is_dev_ecc_mbit && !qm->err_status.is_qm_ecc_mbit && !qm->err_ini->close_axi_master_ooo) { - nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, qm->io_base + QM_RAS_NFE_ENABLE); @@ -4499,7 +4484,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data) return IRQ_HANDLED; } - /** * hisi_qm_dev_shutdown() - Shutdown device. * @pdev: The device will be shutdown. @@ -4903,7 +4887,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; - ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm); + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); if (ret) dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 490e1542305e..1189effcdad0 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -504,8 +504,8 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, kfifo_avail(&ctx->queue->softqueue) > backlog_req->num_elements)) { sec_send_request(backlog_req, ctx->queue); - backlog_req->req_base->complete(backlog_req->req_base, - -EINPROGRESS); + crypto_request_complete(backlog_req->req_base, + -EINPROGRESS); list_del(&backlog_req->backlog_head); } } @@ -534,7 +534,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, if (skreq->src != skreq->dst) dma_unmap_sg(dev, skreq->dst, sec_req->len_out, DMA_BIDIRECTIONAL); - skreq->base.complete(&skreq->base, sec_req->err); + skcipher_request_complete(skreq, sec_req->err); } } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index f5bfc9755a4a..074e50ef512c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1459,12 +1459,11 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, break; backlog_sk_req = backlog_req->c_req.sk_req; - backlog_sk_req->base.complete(&backlog_sk_req->base, - -EINPROGRESS); + skcipher_request_complete(backlog_sk_req, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); } - sk_req->base.complete(&sk_req->base, err); + skcipher_request_complete(sk_req, err); } static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) @@ -1736,12 +1735,11 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) break; backlog_aead_req = backlog_req->aead_req.aead_req; - backlog_aead_req->base.complete(&backlog_aead_req->base, - -EINPROGRESS); + aead_request_complete(backlog_aead_req, -EINPROGRESS); atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); } - a_req->base.complete(&a_req->base, err); + aead_request_complete(a_req, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 0974b0041405..09586a837b1e 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -249,7 +249,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, dev_err(dev, "Get SGL error!\n"); dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); return ERR_PTR(-ENOMEM); - } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 9629e98bd68b..fe93d19e3044 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -157,9 +157,9 @@ static inline void img_hash_write(struct img_hash_dev *hdev, writel_relaxed(value, hdev->io_base + offset); } -static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev) +static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev) { - return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE)); + return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE)); } static void img_hash_start(struct img_hash_dev *hdev, bool dma) @@ -283,10 +283,10 @@ static int img_hash_finish(struct ahash_request *req) static void img_hash_copy_hash(struct ahash_request *req) { struct img_hash_request_ctx *ctx = ahash_request_ctx(req); - u32 *hash = (u32 *)ctx->digest; + __be32 *hash = (__be32 *)ctx->digest; int i; - for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--) + for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--) hash[i] = img_hash_read_result_queue(ctx->hdev); } @@ -308,7 +308,7 @@ static void img_hash_finish_req(struct ahash_request *req, int err) DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL); if (req->base.complete) - req->base.complete(&req->base, err); + ahash_request_complete(req, err); } static int img_hash_write_via_dma(struct img_hash_dev *hdev) @@ -526,7 +526,7 @@ static int img_hash_handle_queue(struct img_hash_dev *hdev, return res; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); hdev->req = req; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index ae6110376e21..6858753af6b3 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -850,7 +850,7 @@ handle_req: goto request_failed; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); /* In case the send() helper did not issue any command to push * to the engine because the input data was cached, continue to @@ -970,17 +970,6 @@ void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) } while (!cdesc->last_seg); } -void safexcel_inv_complete(struct crypto_async_request *req, int error) -{ - struct safexcel_inv_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - int safexcel_invalidate_cache(struct crypto_async_request *async, struct safexcel_crypto_priv *priv, dma_addr_t ctxr_dma, int ring) @@ -1050,7 +1039,7 @@ handle_results: if (should_complete) { local_bh_disable(); - req->complete(req, ret); + crypto_request_complete(req, ret); local_bh_enable(); } diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 6c2fc662f64f..47ef6c7cd02c 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -884,11 +884,6 @@ struct safexcel_alg_template { } alg; }; -struct safexcel_inv_result { - struct completion completion; - int error; -}; - void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, void *rdp); @@ -927,7 +922,6 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, struct crypto_async_request *req); inline struct crypto_async_request * safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring); -void safexcel_inv_complete(struct crypto_async_request *req, int error); int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key, unsigned int keylen, const char *alg, unsigned int state_sz); diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 32a37e3850c5..272c28b5a088 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -1091,13 +1091,12 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, struct crypto_async_request *base, struct safexcel_cipher_req *sreq, - struct safexcel_inv_result *result) + struct crypto_wait *result) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->base.priv; int ring = ctx->base.ring; - - init_completion(&result->completion); + int err; ctx = crypto_tfm_ctx(base->tfm); ctx->base.exit_inv = true; @@ -1110,13 +1109,13 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm, queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); - wait_for_completion(&result->completion); + err = crypto_wait_req(-EINPROGRESS, result); - if (result->error) { + if (err) { dev_warn(priv->dev, "cipher: sync: invalidate: completion error %d\n", - result->error); - return result->error; + err); + return err; } return 0; @@ -1126,12 +1125,12 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm) { EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); - struct safexcel_inv_result result = {}; + DECLARE_CRYPTO_WAIT(result); memset(req, 0, sizeof(struct skcipher_request)); skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); @@ -1141,12 +1140,12 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm) { EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE); struct safexcel_cipher_req *sreq = aead_request_ctx(req); - struct safexcel_inv_result result = {}; + DECLARE_CRYPTO_WAIT(result); memset(req, 0, sizeof(struct aead_request)); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); aead_request_set_tfm(req, __crypto_aead_cast(tfm)); return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index ca46328472d4..e17577b785c3 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -625,15 +625,16 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) struct safexcel_crypto_priv *priv = ctx->base.priv; EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE); struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req); - struct safexcel_inv_result result = {}; + DECLARE_CRYPTO_WAIT(result); int ring = ctx->base.ring; + int err; memset(req, 0, EIP197_AHASH_REQ_SIZE); /* create invalidation request */ init_completion(&result.completion); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + crypto_req_done, &result); ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); ctx = crypto_tfm_ctx(req->base.tfm); @@ -647,12 +648,11 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); - wait_for_completion(&result.completion); + err = crypto_wait_req(-EINPROGRESS, &result); - if (result.error) { - dev_warn(priv->dev, "hash: completion error (%d)\n", - result.error); - return result.error; + if (err) { + dev_warn(priv->dev, "hash: completion error (%d)\n", err); + return err; } return 0; @@ -1042,27 +1042,11 @@ static int safexcel_hmac_sha1_digest(struct ahash_request *areq) return safexcel_ahash_finup(areq); } -struct safexcel_ahash_result { - struct completion completion; - int error; -}; - -static void safexcel_ahash_complete(struct crypto_async_request *req, int error) -{ - struct safexcel_ahash_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - static int safexcel_hmac_init_pad(struct ahash_request *areq, unsigned int blocksize, const u8 *key, unsigned int keylen, u8 *ipad, u8 *opad) { - struct safexcel_ahash_result result; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret, i; u8 *keydup; @@ -1075,16 +1059,12 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, return -ENOMEM; ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_ahash_complete, &result); + crypto_req_done, &result); sg_init_one(&sg, keydup, keylen); ahash_request_set_crypt(areq, &sg, ipad, keylen); - init_completion(&result.completion); ret = crypto_ahash_digest(areq); - if (ret == -EINPROGRESS || ret == -EBUSY) { - wait_for_completion_interruptible(&result.completion); - ret = result.error; - } + ret = crypto_wait_req(ret, &result); /* Avoid leaking */ kfree_sensitive(keydup); @@ -1109,16 +1089,15 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, static int safexcel_hmac_init_iv(struct ahash_request *areq, unsigned int blocksize, u8 *pad, void *state) { - struct safexcel_ahash_result result; struct safexcel_ahash_req *req; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret; ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_ahash_complete, &result); + crypto_req_done, &result); sg_init_one(&sg, pad, blocksize); ahash_request_set_crypt(areq, &sg, pad, blocksize); - init_completion(&result.completion); ret = crypto_ahash_init(areq); if (ret) @@ -1129,14 +1108,9 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq, req->last_req = true; ret = crypto_ahash_update(areq); - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - return ret; - - wait_for_completion_interruptible(&result.completion); - if (result.error) - return result.error; + ret = crypto_wait_req(ret, &result); - return crypto_ahash_export(areq, state); + return ret ?: crypto_ahash_export(areq, state); } static int __safexcel_hmac_setkey(const char *alg, const u8 *key, diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 984b3cc0237c..b63e2359a133 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -382,7 +382,7 @@ static void one_packet(dma_addr_t phys) if (req_ctx->hmac_virt) finish_scattered_hmac(crypt); - req->base.complete(&req->base, failed); + aead_request_complete(req, failed); break; } case CTL_FLAG_PERFORM_ABLK: { @@ -407,7 +407,7 @@ static void one_packet(dma_addr_t phys) free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); free_buf_chain(dev, req_ctx->src, crypt->src_buf); - req->base.complete(&req->base, failed); + skcipher_request_complete(req, failed); break; } case CTL_FLAG_GEN_ICV: diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 5cd332880653..b61e35b932e5 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -66,7 +66,7 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) return; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); ctx = crypto_tfm_ctx(req->tfm); ctx->ops->step(req); @@ -106,7 +106,7 @@ mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req, { ctx->ops->cleanup(req); local_bh_disable(); - req->complete(req, res); + crypto_request_complete(req, res); local_bh_enable(); } diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index c72b0672fc71..8d84ad45571c 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -1104,47 +1104,27 @@ struct ahash_alg mv_sha256_alg = { } }; -struct mv_cesa_ahash_result { - struct completion completion; - int error; -}; - -static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, - int error) -{ - struct mv_cesa_ahash_result *result = req->data; - - if (error == -EINPROGRESS) - return; - - result->error = error; - complete(&result->completion); -} - static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, void *state, unsigned int blocksize) { - struct mv_cesa_ahash_result result; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - mv_cesa_hmac_ahash_complete, &result); + crypto_req_done, &result); sg_init_one(&sg, pad, blocksize); ahash_request_set_crypt(req, &sg, pad, blocksize); - init_completion(&result.completion); ret = crypto_ahash_init(req); if (ret) return ret; ret = crypto_ahash_update(req); - if (ret && ret != -EINPROGRESS) - return ret; + ret = crypto_wait_req(ret, &result); - wait_for_completion_interruptible(&result.completion); - if (result.error) - return result.error; + if (ret) + return ret; ret = crypto_ahash_export(req, state); if (ret) @@ -1158,7 +1138,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, u8 *ipad, u8 *opad, unsigned int blocksize) { - struct mv_cesa_ahash_result result; + DECLARE_CRYPTO_WAIT(result); struct scatterlist sg; int ret; int i; @@ -1172,17 +1152,12 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, return -ENOMEM; ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - mv_cesa_hmac_ahash_complete, - &result); + crypto_req_done, &result); sg_init_one(&sg, keydup, keylen); ahash_request_set_crypt(req, &sg, ipad, keylen); - init_completion(&result.completion); ret = crypto_ahash_digest(req); - if (ret == -EINPROGRESS) { - wait_for_completion_interruptible(&result.completion); - ret = result.error; - } + ret = crypto_wait_req(ret, &result); /* Set the memory region to 0 to avoid any leak. */ kfree_sensitive(keydup); diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index f0b5537038c2..388a06e180d6 100644 --- a/drivers/crypto/marvell/cesa/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -168,7 +168,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) req); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); } if (res || tdma->cur_dma == tdma_cur) diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 80ba77c793a7..1c2c870e887a 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -138,7 +138,7 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) complete: if (areq) - areq->complete(areq, status); + crypto_request_complete(areq, status); } static void output_iv_copyback(struct crypto_async_request *areq) @@ -188,7 +188,7 @@ static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2) pdev = cpt_info->pdev; do_request_cleanup(pdev, cpt_info); } - areq->complete(areq, status); + crypto_request_complete(areq, status); } } @@ -398,7 +398,7 @@ static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key1 = key; int ret; - ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); + ret = xts_verify_key(tfm, key, keylen); if (ret) return ret; ctx->key_len = keylen; diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile index 965297e96954..f0f2942c1d27 100644 --- a/drivers/crypto/marvell/octeontx2/Makefile +++ b/drivers/crypto/marvell/octeontx2/Makefile @@ -1,11 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o +obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptcommon.o rvu_cptpf.o rvu_cptvf.o +rvu_cptcommon-objs := cn10k_cpt.o otx2_cptlf.o otx2_cpt_mbox_common.o rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \ - otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \ - cn10k_cpt.o otx2_cpt_devlink.o -rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \ - otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \ - otx2_cptvf_algs.o cn10k_cpt.o + otx2_cptpf_ucode.o otx2_cpt_devlink.o +rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o \ + otx2_cptvf_reqmgr.o otx2_cptvf_algs.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c index 1499ef75b5c2..93d22b328991 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c @@ -7,6 +7,9 @@ #include "otx2_cptlf.h" #include "cn10k_cpt.h" +static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, + struct otx2_cptlf_info *lf); + static struct cpt_hw_ops otx2_hw_ops = { .send_cmd = otx2_cpt_send_cmd, .cpt_get_compcode = otx2_cpt_get_compcode, @@ -19,8 +22,8 @@ static struct cpt_hw_ops cn10k_hw_ops = { .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode, }; -void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, - struct otx2_cptlf_info *lf) +static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, + struct otx2_cptlf_info *lf) { void __iomem *lmtline = lf->lmtline; u64 val = (lf->slot & 0x7FF); @@ -68,6 +71,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) return 0; } +EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) { @@ -91,3 +95,4 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) return 0; } +EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT); diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h index c091392b47e0..aaefc7e38e06 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h @@ -28,8 +28,6 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result) return ((struct cn9k_cpt_res_s *)result)->uc_compcode; } -void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, - struct otx2_cptlf_info *lf); int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index 5012b7e669f0..6019066a6451 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -145,8 +145,6 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev); -int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val, int blkaddr); int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, u64 reg, u64 val, int blkaddr); int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index a317319696ef..115997475beb 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -19,6 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev) } return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev) { @@ -36,14 +37,17 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev) return otx2_cpt_send_mbox_msg(mbox, pdev); } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev) { return otx2_cpt_send_mbox_msg(mbox, pdev); } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT); -int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val, int blkaddr) +static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, + struct pci_dev *pdev, u64 reg, + u64 *val, int blkaddr) { struct cpt_rd_wr_reg_msg *reg_msg; @@ -91,6 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, return 0; } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, u64 reg, u64 *val, int blkaddr) @@ -103,6 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, return otx2_cpt_send_mbox_msg(mbox, pdev); } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, u64 reg, u64 val, int blkaddr) @@ -115,6 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, return otx2_cpt_send_mbox_msg(mbox, pdev); } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs) { @@ -170,6 +177,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs) return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs) { @@ -202,6 +210,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs) } return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox) { @@ -216,3 +225,4 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox) return otx2_mbox_check_rsp_msgs(mbox, 0); } +EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index c8350fcd60fa..71e5f79431af 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -274,6 +274,8 @@ void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs) } cptlf_disable_intrs(lfs); } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts, + CRYPTO_DEV_OCTEONTX2_CPT); static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, int lf_num, int irq_offset, @@ -321,6 +323,7 @@ free_irq: otx2_cptlf_unregister_interrupts(lfs); return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT); void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) { @@ -334,6 +337,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) free_cpumask_var(lfs->lf[slot].affinity_mask); } } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs) { @@ -366,6 +370,7 @@ free_affinity_mask: otx2_cptlf_free_irqs_affinity(lfs); return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, int lfs_num) @@ -422,6 +427,7 @@ clear_lfs_num: lfs->lfs_num = 0; return ret; } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT); void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs) { @@ -431,3 +437,8 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs) /* Send request to detach LFs */ otx2_cpt_detach_rsrcs_msg(lfs); } +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT); + +MODULE_AUTHOR("Marvell"); +MODULE_DESCRIPTION("Marvell RVU CPT Common module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index a402ccfac557..ddf6e913c1c4 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -831,6 +831,8 @@ static struct pci_driver otx2_cpt_pci_driver = { module_pci_driver(otx2_cpt_pci_driver); +MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT); + MODULE_AUTHOR("Marvell"); MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index 30b423605c9c..e27ddd3c4e55 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -120,7 +120,7 @@ static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2) otx2_cpt_info_destroy(pdev, inst_info); } if (areq) - areq->complete(areq, status); + crypto_request_complete(areq, status); } static void output_iv_copyback(struct crypto_async_request *areq) @@ -170,7 +170,7 @@ static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2) pdev = inst_info->pdev; otx2_cpt_info_destroy(pdev, inst_info); } - areq->complete(areq, status); + crypto_request_complete(areq, status); } } @@ -412,7 +412,7 @@ static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm, const u8 *key1 = key; int ret; - ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); + ret = xts_verify_key(tfm, key, keylen); if (ret) return ret; ctx->key_len = keylen; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 3411e664cf50..392e9fee05e8 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -429,6 +429,8 @@ static struct pci_driver otx2_cptvf_pci_driver = { module_pci_driver(otx2_cptvf_pci_driver); +MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT); + MODULE_AUTHOR("Marvell"); MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d6f9e2fe863d..1c11946a4f0b 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -413,11 +413,11 @@ static int dcp_chan_thread_aes(void *data) set_current_state(TASK_RUNNING); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); if (arq) { ret = mxs_dcp_aes_block_crypt(arq); - arq->complete(arq, ret); + crypto_request_complete(arq, ret); } } @@ -709,11 +709,11 @@ static int dcp_chan_thread_sha(void *data) set_current_state(TASK_RUNNING); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); if (arq) { ret = dcp_sha_req_to_buf(arq); - arq->complete(arq, ret); + crypto_request_complete(arq, ret); } } diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index f34c75a862f2..8c859872c183 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -72,7 +72,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc); -/** +/* * setup_indirect_dde - Setup an indirect DDE * * The DDE is setup with the DDE count, byte count, and address of @@ -89,7 +89,7 @@ static void setup_indirect_dde(struct data_descriptor_entry *dde, dde->address = cpu_to_be64(nx842_get_pa(ddl)); } -/** +/* * setup_direct_dde - Setup single DDE from buffer * * The DDE is setup with the buffer and length. The buffer must be properly @@ -111,7 +111,7 @@ static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, return l; } -/** +/* * setup_ddl - Setup DDL from buffer * * Returns: @@ -181,9 +181,6 @@ static int setup_ddl(struct data_descriptor_entry *dde, CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ (unsigned long)be64_to_cpu((csb)->address)) -/** - * wait_for_csb - */ static int wait_for_csb(struct nx842_workmem *wmem, struct coprocessor_status_block *csb) { @@ -632,8 +629,8 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen, * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer - * @workmem: working memory buffer pointer, size determined by - * nx842_powernv_driver.workmem_size + * @wmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size * * Returns: see @nx842_powernv_exec() */ diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 3ea334b7f820..35f2d0d8507e 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -123,14 +123,16 @@ struct ibm_nx842_counters { atomic64_t decomp_times[32]; }; -static struct nx842_devdata { +struct nx842_devdata { struct vio_dev *vdev; struct device *dev; struct ibm_nx842_counters *counters; unsigned int max_sg_len; unsigned int max_sync_size; unsigned int max_sync_sg; -} __rcu *devdata; +}; + +static struct nx842_devdata __rcu *devdata; static DEFINE_SPINLOCK(devdata_mutex); #define NX842_COUNTER_INC(_x) \ diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 3b6b0267bbec..d3667dbd9826 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -37,7 +37,7 @@ #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) -/* Minimum ring bufer size for memory allocation */ +/* Minimum ring buffer size for memory allocation */ #define ADF_RING_SIZE_BYTES_MIN(SIZE) \ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \ ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index b4b9f0aa59b9..538dcbfbcd26 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -435,8 +435,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx, } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); - keylen = round_up(keylen, 16); memcpy(cd->ucs_aes.key, key, keylen); + keylen = round_up(keylen, 16); } else { memcpy(cd->aes.key, key, keylen); } @@ -676,7 +676,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp, qat_bl_free_bufl(inst->accel_dev, &qat_req->buf); if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) res = -EBADMSG; - areq->base.complete(&areq->base, res); + aead_request_complete(areq, res); } static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req) @@ -752,7 +752,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE); - sreq->base.complete(&sreq->base, res); + skcipher_request_complete(sreq, res); } void qat_alg_callback(void *resp) diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c index ff5b4347f783..bb80455b3e81 100644 --- a/drivers/crypto/qat/qat_common/qat_algs_send.c +++ b/drivers/crypto/qat/qat_common/qat_algs_send.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2022 Intel Corporation */ +#include <crypto/algapi.h> #include "adf_transport.h" #include "qat_algs_send.h" #include "qat_crypto.h" @@ -34,7 +35,7 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog) break; } list_del(&req->list); - req->base->complete(req->base, -EINPROGRESS); + crypto_request_complete(req->base, -EINPROGRESS); } spin_unlock_bh(&backlog->lock); } diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c index 2e89ff08041b..76baed0a76c0 100644 --- a/drivers/crypto/qat/qat_common/qat_bl.c +++ b/drivers/crypto/qat/qat_common/qat_bl.c @@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; for (i = 0; i < bl->num_bufs; i++) - dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, bl_dma_dir); + dma_unmap_single(dev, bl->buffers[i].addr, + bl->buffers[i].len, bl_dma_dir); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); @@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, if (blp != blpout) { for (i = 0; i < blout->num_mapped_bufs; i++) { - dma_unmap_single(dev, blout->bufers[i].addr, - blout->bufers[i].len, + dma_unmap_single(dev, blout->buffers[i].addr, + blout->buffers[i].len, DMA_FROM_DEVICE); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); @@ -53,6 +53,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, struct qat_request_buffs *buf, dma_addr_t extra_dst_buff, size_t sz_extra_dst_buff, + unsigned int sskip, + unsigned int dskip, gfp_t flags) { struct device *dev = &GET_DEV(accel_dev); @@ -63,8 +65,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n); + size_t sz_out, sz = struct_size(bufl, buffers, n); int node = dev_to_node(&GET_DEV(accel_dev)); + unsigned int left; int bufl_dma_dir; if (unlikely(!n)) @@ -86,7 +89,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; for (i = 0; i < n; i++) - bufl->bufers[i].addr = DMA_MAPPING_ERROR; + bufl->buffers[i].addr = DMA_MAPPING_ERROR; + + left = sskip; for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -94,13 +99,21 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (!sg->length) continue; - bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - bufl_dma_dir); - bufl->bufers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + if (left >= sg->length) { + left -= sg->length; + continue; + } + bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left, + sg->length - left, + bufl_dma_dir); + bufl->buffers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr))) goto err_in; sg_nctr++; + if (left) { + bufl->buffers[y].len -= left; + left = 0; + } } bufl->num_bufs = sg_nctr; blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); @@ -111,12 +124,14 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, buf->sz = sz; /* Handle out of place operation */ if (sgl != sglout) { - struct qat_alg_buf *bufers; + struct qat_alg_buf *buffers; int extra_buff = extra_dst_buff ? 1 : 0; int n_sglout = sg_nents(sglout); n = n_sglout + extra_buff; - sz_out = struct_size(buflout, bufers, n); + sz_out = struct_size(buflout, buffers, n); + left = dskip; + sg_nctr = 0; if (n > QAT_MAX_BUFF_DESC) { @@ -129,9 +144,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, buf->sgl_dst_valid = true; } - bufers = buflout->bufers; + buffers = buflout->buffers; for (i = 0; i < n; i++) - bufers[i].addr = DMA_MAPPING_ERROR; + buffers[i].addr = DMA_MAPPING_ERROR; for_each_sg(sglout, sg, n_sglout, i) { int y = sg_nctr; @@ -139,17 +154,25 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (!sg->length) continue; - bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, bufers[y].addr))) + if (left >= sg->length) { + left -= sg->length; + continue; + } + buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left, + sg->length - left, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, buffers[y].addr))) goto err_out; - bufers[y].len = sg->length; + buffers[y].len = sg->length; sg_nctr++; + if (left) { + buffers[y].len -= left; + left = 0; + } } if (extra_buff) { - bufers[sg_nctr].addr = extra_dst_buff; - bufers[sg_nctr].len = sz_extra_dst_buff; + buffers[sg_nctr].addr = extra_dst_buff; + buffers[sg_nctr].len = sz_extra_dst_buff; } buflout->num_bufs = sg_nctr; @@ -174,11 +197,11 @@ err_out: n = sg_nents(sglout); for (i = 0; i < n; i++) { - if (buflout->bufers[i].addr == extra_dst_buff) + if (buflout->buffers[i].addr == extra_dst_buff) break; - if (!dma_mapping_error(dev, buflout->bufers[i].addr)) - dma_unmap_single(dev, buflout->bufers[i].addr, - buflout->bufers[i].len, + if (!dma_mapping_error(dev, buflout->buffers[i].addr)) + dma_unmap_single(dev, buflout->buffers[i].addr, + buflout->buffers[i].len, DMA_FROM_DEVICE); } @@ -191,9 +214,9 @@ err_in: n = sg_nents(sgl); for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) - dma_unmap_single(dev, bufl->bufers[i].addr, - bufl->bufers[i].len, + if (!dma_mapping_error(dev, bufl->buffers[i].addr)) + dma_unmap_single(dev, bufl->buffers[i].addr, + bufl->buffers[i].len, bufl_dma_dir); if (!buf->sgl_src_valid) @@ -212,15 +235,19 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, { dma_addr_t extra_dst_buff = 0; size_t sz_extra_dst_buff = 0; + unsigned int sskip = 0; + unsigned int dskip = 0; if (params) { extra_dst_buff = params->extra_dst_buff; sz_extra_dst_buff = params->sz_extra_dst_buff; + sskip = params->sskip; + dskip = params->dskip; } return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf, extra_dst_buff, sz_extra_dst_buff, - flags); + sskip, dskip, flags); } static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, @@ -231,9 +258,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, int i; for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bl->bufers[i].addr)) - dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, bl->buffers[i].addr)) + dma_unmap_single(dev, bl->buffers[i].addr, + bl->buffers[i].len, DMA_FROM_DEVICE); } static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, @@ -248,13 +275,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, size_t sz; n = sg_nents(sgl); - sz = struct_size(bufl, bufers, n); + sz = struct_size(bufl, buffers, n); bufl = kzalloc_node(sz, GFP_KERNEL, node); if (unlikely(!bufl)) return -ENOMEM; for (i = 0; i < n; i++) - bufl->bufers[i].addr = DMA_MAPPING_ERROR; + bufl->buffers[i].addr = DMA_MAPPING_ERROR; sg_nctr = 0; for_each_sg(sgl, sg, n, i) { @@ -263,11 +290,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, if (!sg->length) continue; - bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - bufl->bufers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) + bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg), + sg->length, + DMA_FROM_DEVICE); + bufl->buffers[y].len = sg->length; + if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr))) goto err_map; sg_nctr++; } @@ -280,9 +307,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, err_map: for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->bufers[i].addr)) - dma_unmap_single(dev, bufl->bufers[i].addr, - bufl->bufers[i].len, + if (!dma_mapping_error(dev, bufl->buffers[i].addr)) + dma_unmap_single(dev, bufl->buffers[i].addr, + bufl->buffers[i].len, DMA_FROM_DEVICE); kfree(bufl); *bl = NULL; @@ -351,7 +378,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, if (ret) return ret; - new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs); + new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs); /* Map new firmware SGL descriptor */ new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h index 8ca5e52ee9e2..d87e4f35ac39 100644 --- a/drivers/crypto/qat/qat_common/qat_bl.h +++ b/drivers/crypto/qat/qat_common/qat_bl.h @@ -18,7 +18,7 @@ struct qat_alg_buf_list { u64 resrvd; u32 num_bufs; u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; + struct qat_alg_buf buffers[]; } __packed; struct qat_alg_fixed_buf_list { @@ -42,6 +42,8 @@ struct qat_request_buffs { struct qat_sgl_to_bufl_params { dma_addr_t extra_dst_buff; size_t sz_extra_dst_buff; + unsigned int sskip; + unsigned int dskip; }; void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c index 1480d36a8d2b..b533984906ec 100644 --- a/drivers/crypto/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/qat/qat_common/qat_comp_algs.c @@ -13,6 +13,15 @@ #include "qat_compression.h" #include "qat_algs_send.h" +#define QAT_RFC_1950_HDR_SIZE 2 +#define QAT_RFC_1950_FOOTER_SIZE 4 +#define QAT_RFC_1950_CM_DEFLATE 8 +#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 +#define QAT_RFC_1950_CM_MASK 0x0f +#define QAT_RFC_1950_CM_OFFSET 4 +#define QAT_RFC_1950_DICT_MASK 0x20 +#define QAT_RFC_1950_COMP_HDR 0x785e + static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; @@ -21,9 +30,12 @@ enum direction { COMPRESSION = 1, }; +struct qat_compression_req; + struct qat_compression_ctx { u8 comp_ctx[QAT_COMP_CTX_SIZE]; struct qat_compression_instance *inst; + int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); }; struct qat_dst { @@ -94,7 +106,70 @@ static void qat_comp_resubmit(struct work_struct *work) err: qat_bl_free_bufl(accel_dev, qat_bufs); - areq->base.complete(&areq->base, ret); + acomp_request_complete(areq, ret); +} + +static int parse_zlib_header(u16 zlib_h) +{ + int ret = -EINVAL; + __be16 header; + u8 *header_p; + u8 cmf, flg; + + header = cpu_to_be16(zlib_h); + header_p = (u8 *)&header; + + flg = header_p[0]; + cmf = header_p[1]; + + if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K) + return ret; + + if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE) + return ret; + + if (flg & QAT_RFC_1950_DICT_MASK) + return ret; + + return 0; +} + +static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req, + void *resp) +{ + struct acomp_req *areq = qat_req->acompress_req; + enum direction dir = qat_req->dir; + __be32 qat_produced_adler; + + qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp)); + + if (dir == COMPRESSION) { + __be16 zlib_header; + + zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR); + scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1); + areq->dlen += QAT_RFC_1950_HDR_SIZE; + + scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen, + QAT_RFC_1950_FOOTER_SIZE, 1); + areq->dlen += QAT_RFC_1950_FOOTER_SIZE; + } else { + __be32 decomp_adler; + int footer_offset; + int consumed; + + consumed = qat_comp_get_consumed_ctr(resp); + footer_offset = consumed + QAT_RFC_1950_HDR_SIZE; + if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen) + return -EBADMSG; + + scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset, + QAT_RFC_1950_FOOTER_SIZE, 0); + + if (qat_produced_adler != decomp_adler) + return -EBADMSG; + } + return 0; } static void qat_comp_generic_callback(struct qat_compression_req *qat_req, @@ -167,9 +242,12 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, res = 0; areq->dlen = produced; + if (ctx->qat_comp_callback) + res = ctx->qat_comp_callback(qat_req, resp); + end: qat_bl_free_bufl(accel_dev, &qat_req->buf); - areq->base.complete(&areq->base, res); + acomp_request_complete(areq, res); } void qat_comp_alg_callback(void *resp) @@ -215,24 +293,39 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) memset(ctx, 0, sizeof(*ctx)); } -static int qat_comp_alg_compress_decompress(struct acomp_req *areq, - enum direction dir) +static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + ret = qat_comp_alg_init_tfm(acomp_tfm); + ctx->qat_comp_callback = &qat_comp_rfc1950_callback; + + return ret; +} + +static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, + unsigned int shdr, unsigned int sftr, + unsigned int dhdr, unsigned int dftr) { struct qat_compression_req *qat_req = acomp_request_ctx(areq); struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_compression_instance *inst = ctx->inst; - struct qat_sgl_to_bufl_params *p_params = NULL; gfp_t f = qat_algs_alloc_flags(&areq->base); - struct qat_sgl_to_bufl_params params; - unsigned int slen = areq->slen; - unsigned int dlen = areq->dlen; + struct qat_sgl_to_bufl_params params = {0}; + int slen = areq->slen - shdr - sftr; + int dlen = areq->dlen - dhdr - dftr; dma_addr_t sfbuf, dfbuf; u8 *req = qat_req->req; size_t ovf_buff_sz; int ret; + params.sskip = shdr; + params.dskip = dhdr; + if (!areq->src || !slen) return -EINVAL; @@ -254,6 +347,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, if (!areq->dst) return -ENOMEM; + dlen -= dhdr + dftr; areq->dlen = dlen; qat_req->dst.resubmitted = false; } @@ -262,11 +356,10 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, params.extra_dst_buff = inst->dc_data->ovf_buff_p; ovf_buff_sz = inst->dc_data->ovf_buff_sz; params.sz_extra_dst_buff = ovf_buff_sz; - p_params = ¶ms; } ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst, - &qat_req->buf, p_params, f); + &qat_req->buf, ¶ms, f); if (unlikely(ret)) return ret; @@ -299,12 +392,49 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, static int qat_comp_alg_compress(struct acomp_req *req) { - return qat_comp_alg_compress_decompress(req, COMPRESSION); + return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0); } static int qat_comp_alg_decompress(struct acomp_req *req) { - return qat_comp_alg_compress_decompress(req, DECOMPRESSION); + return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); +} + +static int qat_comp_alg_rfc1950_compress(struct acomp_req *req) +{ + if (!req->dst && req->dlen != 0) + return -EINVAL; + + if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) + return -EINVAL; + + return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, + QAT_RFC_1950_HDR_SIZE, + QAT_RFC_1950_FOOTER_SIZE); +} + +static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req) +{ + struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; + u16 zlib_header; + int ret; + + if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) + return -EBADMSG; + + scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0); + + ret = parse_zlib_header(zlib_header); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n"); + return ret; + } + + return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE, + QAT_RFC_1950_FOOTER_SIZE, 0, 0); } static struct acomp_alg qat_acomp[] = { { @@ -322,6 +452,21 @@ static struct acomp_alg qat_acomp[] = { { .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), +}, { + .base = { + .cra_name = "zlib-deflate", + .cra_driver_name = "qat_zlib_deflate", + .cra_priority = 4001, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_ctxsize = sizeof(struct qat_compression_ctx), + .cra_module = THIS_MODULE, + }, + .init = qat_comp_alg_rfc1950_init_tfm, + .exit = qat_comp_alg_exit_tfm, + .compress = qat_comp_alg_rfc1950_compress, + .decompress = qat_comp_alg_rfc1950_decompress, + .dst_free = sgl_free, + .reqsize = sizeof(struct qat_compression_req), } }; int qat_comp_algs_register(void) diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c index 9fd10f4242f8..3f1f35283266 100644 --- a/drivers/crypto/qat/qat_common/qat_compression.c +++ b/drivers/crypto/qat/qat_common/qat_compression.c @@ -72,7 +72,7 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node) } if (!accel_dev) { - pr_info("QAT: Could not find a device on node %d\n", node); + pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node); /* Get any started device */ list_for_each(itr, adf_devmgr_get_head()) { struct adf_accel_dev *tmp_dev; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index e31199eade5b..40c8e74d1cf9 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -70,7 +70,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } if (!accel_dev) { - pr_info("QAT: Could not find a device on node %d\n", node); + pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node); /* Get any started device */ list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { if (adf_dev_started(tmp_dev) && diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index d3780be44a76..74deca4f96e0 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -107,7 +107,7 @@ static int qce_handle_queue(struct qce_device *qce, if (backlog) { spin_lock_bh(&qce->lock); - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); spin_unlock_bh(&qce->lock); } @@ -132,7 +132,7 @@ static void qce_tasklet_req_done(unsigned long data) spin_unlock_irqrestore(&qce->lock, flags); if (req) - req->complete(req, qce->result); + crypto_request_complete(req, qce->result); qce_handle_queue(qce, NULL); } diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index b79e49aa724f..1c4d5fb05d69 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -499,7 +499,7 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) /* Calls the completion. Cannot be called with dev->lock hold. */ static void s5p_aes_complete(struct skcipher_request *req, int err) { - req->base.complete(&req->base, err); + skcipher_request_complete(req, err); } static void s5p_unset_outdata(struct s5p_aes_dev *dev) @@ -1355,7 +1355,7 @@ static void s5p_hash_finish_req(struct ahash_request *req, int err) spin_unlock_irqrestore(&dd->hash_lock, flags); if (req->base.complete) - req->base.complete(&req->base, err); + ahash_request_complete(req, err); } /** @@ -1397,7 +1397,7 @@ retry: return ret; if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); dd->hash_req = req; @@ -1991,7 +1991,7 @@ static void s5p_tasklet_cb(unsigned long data) spin_unlock_irqrestore(&dev->lock, flags); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); dev->req = skcipher_request_cast(async_req); dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 7ab20fb95166..dd4c703cd855 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1049,7 +1049,7 @@ static int sahara_queue_manage(void *data) spin_unlock_bh(&dev->queue_spinlock); if (backlog) - backlog->complete(backlog, -EINPROGRESS); + crypto_request_complete(backlog, -EINPROGRESS); if (async_req) { if (crypto_tfm_alg_type(async_req->tfm) == @@ -1065,7 +1065,7 @@ static int sahara_queue_manage(void *data) ret = sahara_aes_process(req); } - async_req->complete(async_req, ret); + crypto_request_complete(async_req, ret); continue; } diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 4208338e72b6..6b8d731092a4 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -597,7 +597,6 @@ static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) { - unsigned int i; size_t written; size_t len; u32 alen = cryp->areq->assoclen; @@ -623,8 +622,8 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) written = min_t(size_t, AES_BLOCK_SIZE - len, alen); scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); - for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, cryp->caps->din, block[i]); + + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); cryp->header_in -= written; @@ -1363,18 +1362,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) u32 out_tag[AES_BLOCK_32]; /* Get and write tag */ - for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout); - + readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); - - for (i = 0; i < AES_BLOCK_32; i++) - out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout); + readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) ret = -EBADMSG; @@ -1415,12 +1410,9 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) { - unsigned int i; u32 block[AES_BLOCK_32]; - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, cryp->caps->dout); - + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, @@ -1429,14 +1421,11 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { - unsigned int i; u32 block[AES_BLOCK_32] = {0}; scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_in), 0); - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - stm32_cryp_write(cryp, cryp->caps->din, block[i]); - + writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32)); cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } @@ -1480,8 +1469,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) * Same code as stm32_cryp_irq_read_data(), but we want to store * block value */ - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, cryp->caps->dout); + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); @@ -1499,8 +1487,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) write padded data */ - for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, cryp->caps->din, block[i]); + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); @@ -1580,8 +1567,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) * Same code as stm32_cryp_irq_read_data(), but we want to store * block value */ - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) - block[i] = stm32_cryp_read(cryp, cryp->caps->dout); + readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); @@ -1660,15 +1646,14 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) { - unsigned int i; u32 block[AES_BLOCK_32] = {0}; size_t written; written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); scatterwalk_copychunks(block, &cryp->in_walk, written, 0); - for (i = 0; i < AES_BLOCK_32; i++) - stm32_cryp_write(cryp, cryp->caps->din, block[i]); + + writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); cryp->header_in -= written; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index d33006d43f76..7bf805563ac2 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -32,6 +32,7 @@ #define HASH_CR 0x00 #define HASH_DIN 0x04 #define HASH_STR 0x08 +#define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04)) #define HASH_IMR 0x20 #define HASH_SR 0x24 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) @@ -54,6 +55,10 @@ #define HASH_CR_ALGO_SHA224 0x40000 #define HASH_CR_ALGO_SHA256 0x40080 +#define HASH_CR_UX500_EMPTYMSG BIT(20) +#define HASH_CR_UX500_ALGO_SHA1 BIT(7) +#define HASH_CR_UX500_ALGO_SHA256 0x0 + /* Interrupt */ #define HASH_DINIE BIT(0) #define HASH_DCIE BIT(1) @@ -115,6 +120,7 @@ enum stm32_hash_data_format { struct stm32_hash_ctx { struct crypto_engine_ctx enginectx; struct stm32_hash_dev *hdev; + struct crypto_shash *xtfm; unsigned long flags; u8 key[HASH_MAX_KEY_SIZE]; @@ -157,6 +163,10 @@ struct stm32_hash_algs_info { struct stm32_hash_pdata { struct stm32_hash_algs_info *algs_info; size_t algs_info_size; + bool has_sr; + bool has_mdmat; + bool broken_emptymsg; + bool ux500; }; struct stm32_hash_dev { @@ -168,6 +178,7 @@ struct stm32_hash_dev { phys_addr_t phys_base; u32 dma_mode; u32 dma_maxburst; + bool polled; struct ahash_request *req; struct crypto_engine *engine; @@ -208,6 +219,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) { u32 status; + /* The Ux500 lacks the special status register, we poll the DCAL bit instead */ + if (!hdev->pdata->has_sr) + return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status, + !(status & HASH_STR_DCAL), 10, 10000); + return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, !(status & HASH_SR_BUSY), 10, 10000); } @@ -249,7 +265,7 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev) return 0; } -static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) +static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); @@ -263,13 +279,19 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) reg |= HASH_CR_ALGO_MD5; break; case HASH_FLAGS_SHA1: - reg |= HASH_CR_ALGO_SHA1; + if (hdev->pdata->ux500) + reg |= HASH_CR_UX500_ALGO_SHA1; + else + reg |= HASH_CR_ALGO_SHA1; break; case HASH_FLAGS_SHA224: reg |= HASH_CR_ALGO_SHA224; break; case HASH_FLAGS_SHA256: - reg |= HASH_CR_ALGO_SHA256; + if (hdev->pdata->ux500) + reg |= HASH_CR_UX500_ALGO_SHA256; + else + reg |= HASH_CR_ALGO_SHA256; break; default: reg |= HASH_CR_ALGO_MD5; @@ -284,7 +306,15 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) reg |= HASH_CR_LKEY; } - stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); + /* + * On the Ux500 we need to set a special flag to indicate that + * the message is zero length. + */ + if (hdev->pdata->ux500 && bufcnt == 0) + reg |= HASH_CR_UX500_EMPTYMSG; + + if (!hdev->polled) + stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); stm32_hash_write(hdev, HASH_CR, reg); @@ -345,7 +375,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, hdev->flags |= HASH_FLAGS_CPU; - stm32_hash_write_ctrl(hdev); + stm32_hash_write_ctrl(hdev, length); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; @@ -362,6 +392,9 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, stm32_hash_write(hdev, HASH_DIN, buffer[count]); if (final) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + stm32_hash_set_nblw(hdev, length); reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; @@ -399,8 +432,15 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) if (final) { bufcnt = rctx->bufcnt; rctx->bufcnt = 0; - err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, - (rctx->flags & HASH_FLAGS_FINUP)); + err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1); + + /* If we have an IRQ, wait for that, else poll for completion */ + if (hdev->polled) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + hdev->flags |= HASH_FLAGS_OUTPUT_READY; + err = 0; + } } return err; @@ -431,11 +471,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, reg = stm32_hash_read(hdev, HASH_CR); - if (mdma) - reg |= HASH_CR_MDMAT; - else - reg &= ~HASH_CR_MDMAT; - + if (!hdev->pdata->has_mdmat) { + if (mdma) + reg |= HASH_CR_MDMAT; + else + reg &= ~HASH_CR_MDMAT; + } reg |= HASH_CR_DMAE; stm32_hash_write(hdev, HASH_CR, reg); @@ -556,7 +597,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) if (rctx->nents < 0) return -EINVAL; - stm32_hash_write_ctrl(hdev); + stm32_hash_write_ctrl(hdev, rctx->total); if (hdev->flags & HASH_FLAGS_HMAC) { err = stm32_hash_hmac_dma_send(hdev); @@ -743,16 +784,57 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev) else err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1); + /* If we have an IRQ, wait for that, else poll for completion */ + if (hdev->polled) { + if (stm32_hash_wait_busy(hdev)) + return -ETIMEDOUT; + hdev->flags |= HASH_FLAGS_OUTPUT_READY; + /* Caller will call stm32_hash_finish_req() */ + err = 0; + } return err; } +static void stm32_hash_emptymsg_fallback(struct ahash_request *req) +{ + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = rctx->hdev; + int ret; + + dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n", + ctx->keylen); + + if (!ctx->xtfm) { + dev_err(hdev->dev, "no fallback engine\n"); + return; + } + + if (ctx->keylen) { + ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen); + if (ret) { + dev_err(hdev->dev, "failed to set key ret=%d\n", ret); + return; + } + } + + ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest); + if (ret) + dev_err(hdev->dev, "shash digest error\n"); +} + static void stm32_hash_copy_hash(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); + struct stm32_hash_dev *hdev = rctx->hdev; __be32 *hash = (void *)rctx->digest; unsigned int i, hashsize; + if (hdev->pdata->broken_emptymsg && !req->nbytes) + return stm32_hash_emptymsg_fallback(req); + switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { case HASH_FLAGS_MD5: hashsize = MD5_DIGEST_SIZE; @@ -770,9 +852,14 @@ static void stm32_hash_copy_hash(struct ahash_request *req) return; } - for (i = 0; i < hashsize / sizeof(u32); i++) - hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev, - HASH_HREG(i))); + for (i = 0; i < hashsize / sizeof(u32); i++) { + if (hdev->pdata->ux500) + hash[i] = cpu_to_be32(stm32_hash_read(hdev, + HASH_UX500_HREG(i))); + else + hash[i] = cpu_to_be32(stm32_hash_read(hdev, + HASH_HREG(i))); + } } static int stm32_hash_finish(struct ahash_request *req) @@ -961,11 +1048,13 @@ static int stm32_hash_export(struct ahash_request *req, void *out) struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); u32 *preg; unsigned int i; + int ret; pm_runtime_get_sync(hdev->dev); - while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) - cpu_relax(); + ret = stm32_hash_wait_busy(hdev); + if (ret) + return ret; rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, sizeof(u32), @@ -973,7 +1062,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out) preg = rctx->hw_context; - *preg++ = stm32_hash_read(hdev, HASH_IMR); + if (!hdev->pdata->ux500) + *preg++ = stm32_hash_read(hdev, HASH_IMR); *preg++ = stm32_hash_read(hdev, HASH_STR); *preg++ = stm32_hash_read(hdev, HASH_CR); for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) @@ -1002,7 +1092,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) pm_runtime_get_sync(hdev->dev); - stm32_hash_write(hdev, HASH_IMR, *preg++); + if (!hdev->pdata->ux500) + stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); stm32_hash_write(hdev, HASH_CR, *preg); reg = *preg++ | HASH_CR_INIT; @@ -1034,6 +1125,29 @@ static int stm32_hash_setkey(struct crypto_ahash *tfm, return 0; } +static int stm32_hash_init_fallback(struct crypto_tfm *tfm) +{ + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); + const char *name = crypto_tfm_alg_name(tfm); + struct crypto_shash *xtfm; + + /* The fallback is only needed on Ux500 */ + if (!hdev->pdata->ux500) + return 0; + + xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(xtfm)) { + dev_err(hdev->dev, "failed to allocate %s fallback\n", + name); + return PTR_ERR(xtfm); + } + dev_info(hdev->dev, "allocated %s fallback\n", name); + ctx->xtfm = xtfm; + + return 0; +} + static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, const char *algs_hmac_name) { @@ -1050,7 +1164,8 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, ctx->enginectx.op.do_one_request = stm32_hash_one_request; ctx->enginectx.op.prepare_request = stm32_hash_prepare_req; ctx->enginectx.op.unprepare_request = NULL; - return 0; + + return stm32_hash_init_fallback(tfm); } static int stm32_hash_cra_init(struct crypto_tfm *tfm) @@ -1078,6 +1193,14 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) return stm32_hash_cra_init_algs(tfm, "sha256"); } +static void stm32_hash_cra_exit(struct crypto_tfm *tfm) +{ + struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->xtfm) + crypto_free_shash(ctx->xtfm); +} + static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) { struct stm32_hash_dev *hdev = dev_id; @@ -1121,7 +1244,7 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) return IRQ_NONE; } -static struct ahash_alg algs_md5_sha1[] = { +static struct ahash_alg algs_md5[] = { { .init = stm32_hash_init, .update = stm32_hash_update, @@ -1143,6 +1266,7 @@ static struct ahash_alg algs_md5_sha1[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } @@ -1169,10 +1293,14 @@ static struct ahash_alg algs_md5_sha1[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_md5_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } }, +}; + +static struct ahash_alg algs_sha1[] = { { .init = stm32_hash_init, .update = stm32_hash_update, @@ -1194,6 +1322,7 @@ static struct ahash_alg algs_md5_sha1[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } @@ -1220,13 +1349,14 @@ static struct ahash_alg algs_md5_sha1[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha1_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } }, }; -static struct ahash_alg algs_sha224_sha256[] = { +static struct ahash_alg algs_sha224[] = { { .init = stm32_hash_init, .update = stm32_hash_update, @@ -1248,6 +1378,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } @@ -1274,10 +1405,14 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha224_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } }, +}; + +static struct ahash_alg algs_sha256[] = { { .init = stm32_hash_init, .update = stm32_hash_update, @@ -1299,6 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } @@ -1325,6 +1461,7 @@ static struct ahash_alg algs_sha224_sha256[] = { .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha256_init, + .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } } @@ -1370,36 +1507,74 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) return 0; } +static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = { + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), + }, + { + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), + }, +}; + +static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = { + .algs_info = stm32_hash_algs_info_ux500, + .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500), + .broken_emptymsg = true, + .ux500 = true, +}; + static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { { - .algs_list = algs_md5_sha1, - .size = ARRAY_SIZE(algs_md5_sha1), + .algs_list = algs_md5, + .size = ARRAY_SIZE(algs_md5), + }, + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { .algs_info = stm32_hash_algs_info_stm32f4, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), + .has_sr = true, + .has_mdmat = true, }; static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { { - .algs_list = algs_md5_sha1, - .size = ARRAY_SIZE(algs_md5_sha1), + .algs_list = algs_md5, + .size = ARRAY_SIZE(algs_md5), + }, + { + .algs_list = algs_sha1, + .size = ARRAY_SIZE(algs_sha1), + }, + { + .algs_list = algs_sha224, + .size = ARRAY_SIZE(algs_sha224), }, { - .algs_list = algs_sha224_sha256, - .size = ARRAY_SIZE(algs_sha224_sha256), + .algs_list = algs_sha256, + .size = ARRAY_SIZE(algs_sha256), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { .algs_info = stm32_hash_algs_info_stm32f7, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), + .has_sr = true, + .has_mdmat = true, }; static const struct of_device_id stm32_hash_of_match[] = { { + .compatible = "stericsson,ux500-hash", + .data = &stm32_hash_pdata_ux500, + }, + { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4, }, @@ -1452,16 +1627,23 @@ static int stm32_hash_probe(struct platform_device *pdev) if (ret) return ret; - irq = platform_get_irq(pdev, 0); - if (irq < 0) + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) return irq; - ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, - stm32_hash_irq_thread, IRQF_ONESHOT, - dev_name(dev), hdev); - if (ret) { - dev_err(dev, "Cannot grab IRQ\n"); - return ret; + if (irq > 0) { + ret = devm_request_threaded_irq(dev, irq, + stm32_hash_irq_handler, + stm32_hash_irq_thread, + IRQF_ONESHOT, + dev_name(dev), hdev); + if (ret) { + dev_err(dev, "Cannot grab IRQ\n"); + return ret; + } + } else { + dev_info(dev, "No IRQ, use polling mode\n"); + hdev->polled = true; } hdev->clk = devm_clk_get(&pdev->dev, NULL); @@ -1503,9 +1685,11 @@ static int stm32_hash_probe(struct platform_device *pdev) case 0: break; case -ENOENT: - dev_dbg(dev, "DMA mode not available\n"); + case -ENODEV: + dev_info(dev, "DMA mode not available\n"); break; default: + dev_err(dev, "DMA init error %d\n", ret); goto err_dma; } @@ -1524,7 +1708,11 @@ static int stm32_hash_probe(struct platform_device *pdev) if (ret) goto err_engine_start; - hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); + if (hdev->pdata->ux500) + /* FIXME: implement DMA mode for Ux500 */ + hdev->dma_mode = 0; + else + hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); /* Register algos */ ret = stm32_hash_register_algs(hdev); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 71db6450b6aa..bb27f011cf31 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1393,7 +1393,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, alloc_len += sizeof(struct talitos_desc); alloc_len += ivsize; - edesc = kmalloc(alloc_len, GFP_DMA | flags); + edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags); if (!edesc) return ERR_PTR(-ENOMEM); if (ivsize) { @@ -1560,7 +1560,7 @@ static void skcipher_done(struct device *dev, kfree(edesc); - areq->base.complete(&areq->base, err); + skcipher_request_complete(areq, err); } static int common_nonsnoop(struct talitos_edesc *edesc, @@ -1759,7 +1759,7 @@ static void ahash_done(struct device *dev, kfree(edesc); - areq->base.complete(&areq->base, err); + ahash_request_complete(areq, err); } /* diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig deleted file mode 100644 index dcbd7404768f..000000000000 --- a/drivers/crypto/ux500/Kconfig +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Copyright (C) ST-Ericsson SA 2010 -# Author: Shujuan Chen (shujuan.chen@stericsson.com) -# - -config CRYPTO_DEV_UX500_HASH - tristate "UX500 crypto driver for HASH block" - depends on CRYPTO_DEV_UX500 - select CRYPTO_HASH - select CRYPTO_SHA1 - select CRYPTO_SHA256 - help - This selects the hash driver for the UX500_HASH hardware. - Depends on UX500/STM DMA if running in DMA mode. - -config CRYPTO_DEV_UX500_DEBUG - bool "Activate ux500 platform debug-mode for crypto and hash block" - depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH - help - Say Y if you want to add debug prints to ux500_hash and - ux500_cryp devices. diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile deleted file mode 100644 index f1aa4edf66f4..000000000000 --- a/drivers/crypto/ux500/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Copyright (C) ST-Ericsson SA 2010 -# Author: Shujuan Chen (shujuan.chen@stericsson.com) -# - -obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/ diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile deleted file mode 100644 index a8f088724772..000000000000 --- a/drivers/crypto/ux500/hash/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Copyright (C) ST-Ericsson SA 2010 -# Author: Shujuan Chen (shujuan.chen@stericsson.com) -# -ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG -CFLAGS_hash_core.o := -DDEBUG -endif - -obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o -ux500_hash-objs := hash_core.o diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h deleted file mode 100644 index 7c9bcc15125f..000000000000 --- a/drivers/crypto/ux500/hash/hash_alg.h +++ /dev/null @@ -1,398 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen (shujuan.chen@stericsson.com) - * Author: Joakim Bech (joakim.xx.bech@stericsson.com) - * Author: Berne Hebark (berne.hebark@stericsson.com)) - */ -#ifndef _HASH_ALG_H -#define _HASH_ALG_H - -#include <linux/bitops.h> - -#define HASH_BLOCK_SIZE 64 -#define HASH_DMA_FIFO 4 -#define HASH_DMA_ALIGN_SIZE 4 -#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024 -#define HASH_BYTES_PER_WORD 4 - -/* Maximum value of the length's high word */ -#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL - -/* Power on Reset values HASH registers */ -#define HASH_RESET_CR_VALUE 0x0 -#define HASH_RESET_STR_VALUE 0x0 - -/* Number of context swap registers */ -#define HASH_CSR_COUNT 52 - -#define HASH_RESET_CSRX_REG_VALUE 0x0 -#define HASH_RESET_CSFULL_REG_VALUE 0x0 -#define HASH_RESET_CSDATAIN_REG_VALUE 0x0 - -#define HASH_RESET_INDEX_VAL 0x0 -#define HASH_RESET_BIT_INDEX_VAL 0x0 -#define HASH_RESET_BUFFER_VAL 0x0 -#define HASH_RESET_LEN_HIGH_VAL 0x0 -#define HASH_RESET_LEN_LOW_VAL 0x0 - -/* Control register bitfields */ -#define HASH_CR_RESUME_MASK 0x11FCF - -#define HASH_CR_SWITCHON_POS 31 -#define HASH_CR_SWITCHON_MASK BIT(31) - -#define HASH_CR_EMPTYMSG_POS 20 -#define HASH_CR_EMPTYMSG_MASK BIT(20) - -#define HASH_CR_DINF_POS 12 -#define HASH_CR_DINF_MASK BIT(12) - -#define HASH_CR_NBW_POS 8 -#define HASH_CR_NBW_MASK 0x00000F00UL - -#define HASH_CR_LKEY_POS 16 -#define HASH_CR_LKEY_MASK BIT(16) - -#define HASH_CR_ALGO_POS 7 -#define HASH_CR_ALGO_MASK BIT(7) - -#define HASH_CR_MODE_POS 6 -#define HASH_CR_MODE_MASK BIT(6) - -#define HASH_CR_DATAFORM_POS 4 -#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5)) - -#define HASH_CR_DMAE_POS 3 -#define HASH_CR_DMAE_MASK BIT(3) - -#define HASH_CR_INIT_POS 2 -#define HASH_CR_INIT_MASK BIT(2) - -#define HASH_CR_PRIVN_POS 1 -#define HASH_CR_PRIVN_MASK BIT(1) - -#define HASH_CR_SECN_POS 0 -#define HASH_CR_SECN_MASK BIT(0) - -/* Start register bitfields */ -#define HASH_STR_DCAL_POS 8 -#define HASH_STR_DCAL_MASK BIT(8) -#define HASH_STR_DEFAULT 0x0 - -#define HASH_STR_NBLW_POS 0 -#define HASH_STR_NBLW_MASK 0x0000001FUL - -#define HASH_NBLW_MAX_VAL 0x1F - -/* PrimeCell IDs */ -#define HASH_P_ID0 0xE0 -#define HASH_P_ID1 0x05 -#define HASH_P_ID2 0x38 -#define HASH_P_ID3 0x00 -#define HASH_CELL_ID0 0x0D -#define HASH_CELL_ID1 0xF0 -#define HASH_CELL_ID2 0x05 -#define HASH_CELL_ID3 0xB1 - -#define HASH_SET_BITS(reg_name, mask) \ - writel_relaxed((readl_relaxed(reg_name) | mask), reg_name) - -#define HASH_CLEAR_BITS(reg_name, mask) \ - writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name) - -#define HASH_PUT_BITS(reg, val, shift, mask) \ - writel_relaxed(((readl(reg) & ~(mask)) | \ - (((u32)val << shift) & (mask))), reg) - -#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len)) - -#define HASH_INITIALIZE \ - HASH_PUT_BITS( \ - &device_data->base->cr, \ - 0x01, HASH_CR_INIT_POS, \ - HASH_CR_INIT_MASK) - -#define HASH_SET_DATA_FORMAT(data_format) \ - HASH_PUT_BITS( \ - &device_data->base->cr, \ - (u32) (data_format), HASH_CR_DATAFORM_POS, \ - HASH_CR_DATAFORM_MASK) -#define HASH_SET_NBLW(val) \ - HASH_PUT_BITS( \ - &device_data->base->str, \ - (u32) (val), HASH_STR_NBLW_POS, \ - HASH_STR_NBLW_MASK) -#define HASH_SET_DCAL \ - HASH_PUT_BITS( \ - &device_data->base->str, \ - 0x01, HASH_STR_DCAL_POS, \ - HASH_STR_DCAL_MASK) - -/* Hardware access method */ -enum hash_mode { - HASH_MODE_CPU, - HASH_MODE_DMA -}; - -/** - * struct uint64 - Structure to handle 64 bits integers. - * @high_word: Most significant bits. - * @low_word: Least significant bits. - * - * Used to handle 64 bits integers. - */ -struct uint64 { - u32 high_word; - u32 low_word; -}; - -/** - * struct hash_register - Contains all registers in ux500 hash hardware. - * @cr: HASH control register (0x000). - * @din: HASH data input register (0x004). - * @str: HASH start register (0x008). - * @hx: HASH digest register 0..7 (0x00c-0x01C). - * @padding0: Reserved (0x02C). - * @itcr: Integration test control register (0x080). - * @itip: Integration test input register (0x084). - * @itop: Integration test output register (0x088). - * @padding1: Reserved (0x08C). - * @csfull: HASH context full register (0x0F8). - * @csdatain: HASH context swap data input register (0x0FC). - * @csrx: HASH context swap register 0..51 (0x100-0x1CC). - * @padding2: Reserved (0x1D0). - * @periphid0: HASH peripheral identification register 0 (0xFE0). - * @periphid1: HASH peripheral identification register 1 (0xFE4). - * @periphid2: HASH peripheral identification register 2 (0xFE8). - * @periphid3: HASH peripheral identification register 3 (0xFEC). - * @cellid0: HASH PCell identification register 0 (0xFF0). - * @cellid1: HASH PCell identification register 1 (0xFF4). - * @cellid2: HASH PCell identification register 2 (0xFF8). - * @cellid3: HASH PCell identification register 3 (0xFFC). - * - * The device communicates to the HASH via 32-bit-wide control registers - * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure - * with the registers used. - */ -struct hash_register { - u32 cr; - u32 din; - u32 str; - u32 hx[8]; - - u32 padding0[(0x080 - 0x02C) / sizeof(u32)]; - - u32 itcr; - u32 itip; - u32 itop; - - u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)]; - - u32 csfull; - u32 csdatain; - u32 csrx[HASH_CSR_COUNT]; - - u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)]; - - u32 periphid0; - u32 periphid1; - u32 periphid2; - u32 periphid3; - - u32 cellid0; - u32 cellid1; - u32 cellid2; - u32 cellid3; -}; - -/** - * struct hash_state - Hash context state. - * @temp_cr: Temporary HASH Control Register. - * @str_reg: HASH Start Register. - * @din_reg: HASH Data Input Register. - * @csr[52]: HASH Context Swap Registers 0-39. - * @csfull: HASH Context Swap Registers 40 ie Status flags. - * @csdatain: HASH Context Swap Registers 41 ie Input data. - * @buffer: Working buffer for messages going to the hardware. - * @length: Length of the part of message hashed so far (floor(N/64) * 64). - * @index: Valid number of bytes in buffer (N % 64). - * @bit_index: Valid number of bits in buffer (N % 8). - * - * This structure is used between context switches, i.e. when ongoing jobs are - * interupted with new jobs. When this happens we need to store intermediate - * results in software. - * - * WARNING: "index" is the member of the structure, to be sure that "buffer" - * is aligned on a 4-bytes boundary. This is highly implementation dependent - * and MUST be checked whenever this code is ported on new platforms. - */ -struct hash_state { - u32 temp_cr; - u32 str_reg; - u32 din_reg; - u32 csr[52]; - u32 csfull; - u32 csdatain; - u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)]; - struct uint64 length; - u8 index; - u8 bit_index; -}; - -/** - * enum hash_device_id - HASH device ID. - * @HASH_DEVICE_ID_0: Hash hardware with ID 0 - * @HASH_DEVICE_ID_1: Hash hardware with ID 1 - */ -enum hash_device_id { - HASH_DEVICE_ID_0 = 0, - HASH_DEVICE_ID_1 = 1 -}; - -/** - * enum hash_data_format - HASH data format. - * @HASH_DATA_32_BITS: 32 bits data format - * @HASH_DATA_16_BITS: 16 bits data format - * @HASH_DATA_8_BITS: 8 bits data format. - * @HASH_DATA_1_BITS: 1 bit data format. - */ -enum hash_data_format { - HASH_DATA_32_BITS = 0x0, - HASH_DATA_16_BITS = 0x1, - HASH_DATA_8_BITS = 0x2, - HASH_DATA_1_BIT = 0x3 -}; - -/** - * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm. - * @HASH_ALGO_SHA1: Indicates that SHA1 is used. - * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used. - */ -enum hash_algo { - HASH_ALGO_SHA1 = 0x0, - HASH_ALGO_SHA256 = 0x1 -}; - -/** - * enum hash_op - Enumeration for selecting between HASH or HMAC mode. - * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode. - * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC. - */ -enum hash_op { - HASH_OPER_MODE_HASH = 0x0, - HASH_OPER_MODE_HMAC = 0x1 -}; - -/** - * struct hash_config - Configuration data for the hardware. - * @data_format: Format of data entered into the hash data in register. - * @algorithm: Algorithm selection bit. - * @oper_mode: Operating mode selection bit. - */ -struct hash_config { - int data_format; - int algorithm; - int oper_mode; -}; - -/** - * struct hash_dma - Structure used for dma. - * @mask: DMA capabilities bitmap mask. - * @complete: Used to maintain state for a "completion". - * @chan_mem2hash: DMA channel. - * @cfg_mem2hash: DMA channel configuration. - * @sg_len: Scatterlist length. - * @sg: Scatterlist. - * @nents: Number of sg entries. - */ -struct hash_dma { - dma_cap_mask_t mask; - struct completion complete; - struct dma_chan *chan_mem2hash; - void *cfg_mem2hash; - int sg_len; - struct scatterlist *sg; - int nents; -}; - -/** - * struct hash_ctx - The context used for hash calculations. - * @key: The key used in the operation. - * @keylen: The length of the key. - * @state: The state of the current calculations. - * @config: The current configuration. - * @digestsize: The size of current digest. - * @device: Pointer to the device structure. - */ -struct hash_ctx { - u8 *key; - u32 keylen; - struct hash_config config; - int digestsize; - struct hash_device_data *device; -}; - -/** - * struct hash_ctx - The request context used for hash calculations. - * @state: The state of the current calculations. - * @dma_mode: Used in special cases (workaround), e.g. need to change to - * cpu mode, if not supported/working in dma mode. - * @updated: Indicates if hardware is initialized for new operations. - */ -struct hash_req_ctx { - struct hash_state state; - bool dma_mode; - u8 updated; -}; - -/** - * struct hash_device_data - structure for a hash device. - * @base: Pointer to virtual base address of the hash device. - * @phybase: Pointer to physical memory location of the hash device. - * @list_node: For inclusion in klist. - * @dev: Pointer to the device dev structure. - * @ctx_lock: Spinlock for current_ctx. - * @current_ctx: Pointer to the currently allocated context. - * @power_state: TRUE = power state on, FALSE = power state off. - * @power_state_lock: Spinlock for power_state. - * @regulator: Pointer to the device's power control. - * @clk: Pointer to the device's clock control. - * @restore_dev_state: TRUE = saved state, FALSE = no saved state. - * @dma: Structure used for dma. - */ -struct hash_device_data { - struct hash_register __iomem *base; - phys_addr_t phybase; - struct klist_node list_node; - struct device *dev; - spinlock_t ctx_lock; - struct hash_ctx *current_ctx; - bool power_state; - spinlock_t power_state_lock; - struct regulator *regulator; - struct clk *clk; - bool restore_dev_state; - struct hash_state state; /* Used for saving and resuming state */ - struct hash_dma dma; -}; - -int hash_check_hw(struct hash_device_data *device_data); - -int hash_setconfiguration(struct hash_device_data *device_data, - struct hash_config *config); - -void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx); - -void hash_get_digest(struct hash_device_data *device_data, - u8 *digest, int algorithm); - -int hash_hw_update(struct ahash_request *req); - -int hash_save_state(struct hash_device_data *device_data, - struct hash_state *state); - -int hash_resume_state(struct hash_device_data *device_data, - const struct hash_state *state); - -#endif diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c deleted file mode 100644 index f104e8a43036..000000000000 --- a/drivers/crypto/ux500/hash/hash_core.c +++ /dev/null @@ -1,1966 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Cryptographic API. - * Support for Nomadik hardware crypto engine. - - * Copyright (C) ST-Ericsson SA 2010 - * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson - * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson - * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson. - * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson. - * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson. - */ - -#define pr_fmt(fmt) "hashX hashX: " fmt - -#include <linux/clk.h> -#include <linux/device.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/klist.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mod_devicetable.h> -#include <linux/platform_device.h> -#include <linux/crypto.h> - -#include <linux/regulator/consumer.h> -#include <linux/dmaengine.h> -#include <linux/bitops.h> - -#include <crypto/internal/hash.h> -#include <crypto/sha1.h> -#include <crypto/sha2.h> -#include <crypto/scatterwalk.h> -#include <crypto/algapi.h> - -#include <linux/platform_data/crypto-ux500.h> - -#include "hash_alg.h" - -static int hash_mode; -module_param(hash_mode, int, 0); -MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); - -/* HMAC-SHA1, no key */ -static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { - 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, - 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63, - 0x70, 0x69, 0x0e, 0x1d -}; - -/* HMAC-SHA256, no key */ -static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = { - 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec, - 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5, - 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53, - 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad -}; - -/** - * struct hash_driver_data - data specific to the driver. - * - * @device_list: A list of registered devices to choose from. - * @device_allocation: A semaphore initialized with number of devices. - */ -struct hash_driver_data { - struct klist device_list; - struct semaphore device_allocation; -}; - -static struct hash_driver_data driver_data; - -/* Declaration of functions */ -/** - * hash_messagepad - Pads a message and write the nblw bits. - * @device_data: Structure for the hash device. - * @message: Last word of a message - * @index_bytes: The number of bytes in the last message - * - * This function manages the final part of the digest calculation, when less - * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. - * - */ -static void hash_messagepad(struct hash_device_data *device_data, - const u32 *message, u8 index_bytes); - -/** - * release_hash_device - Releases a previously allocated hash device. - * @device_data: Structure for the hash device. - * - */ -static void release_hash_device(struct hash_device_data *device_data) -{ - spin_lock(&device_data->ctx_lock); - device_data->current_ctx->device = NULL; - device_data->current_ctx = NULL; - spin_unlock(&device_data->ctx_lock); - - /* - * The down_interruptible part for this semaphore is called in - * cryp_get_device_data. - */ - up(&driver_data.device_allocation); -} - -static void hash_dma_setup_channel(struct hash_device_data *device_data, - struct device *dev) -{ - struct hash_platform_data *platform_data = dev->platform_data; - struct dma_slave_config conf = { - .direction = DMA_MEM_TO_DEV, - .dst_addr = device_data->phybase + HASH_DMA_FIFO, - .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, - .dst_maxburst = 16, - }; - - dma_cap_zero(device_data->dma.mask); - dma_cap_set(DMA_SLAVE, device_data->dma.mask); - - device_data->dma.cfg_mem2hash = platform_data->mem_to_engine; - device_data->dma.chan_mem2hash = - dma_request_channel(device_data->dma.mask, - platform_data->dma_filter, - device_data->dma.cfg_mem2hash); - - dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf); - - init_completion(&device_data->dma.complete); -} - -static void hash_dma_callback(void *data) -{ - struct hash_ctx *ctx = data; - - complete(&ctx->device->dma.complete); -} - -static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, - int len, enum dma_data_direction direction) -{ - struct dma_async_tx_descriptor *desc = NULL; - struct dma_chan *channel = NULL; - - if (direction != DMA_TO_DEVICE) { - dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", - __func__); - return -EFAULT; - } - - sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); - - channel = ctx->device->dma.chan_mem2hash; - ctx->device->dma.sg = sg; - ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, - ctx->device->dma.sg, ctx->device->dma.nents, - direction); - - if (!ctx->device->dma.sg_len) { - dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", - __func__); - return -EFAULT; - } - - dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", - __func__); - desc = dmaengine_prep_slave_sg(channel, - ctx->device->dma.sg, ctx->device->dma.sg_len, - DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); - if (!desc) { - dev_err(ctx->device->dev, - "%s: dmaengine_prep_slave_sg() failed!\n", __func__); - return -EFAULT; - } - - desc->callback = hash_dma_callback; - desc->callback_param = ctx; - - dmaengine_submit(desc); - dma_async_issue_pending(channel); - - return 0; -} - -static void hash_dma_done(struct hash_ctx *ctx) -{ - struct dma_chan *chan; - - chan = ctx->device->dma.chan_mem2hash; - dmaengine_terminate_all(chan); - dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, - ctx->device->dma.nents, DMA_TO_DEVICE); -} - -static int hash_dma_write(struct hash_ctx *ctx, - struct scatterlist *sg, int len) -{ - int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); - if (error) { - dev_dbg(ctx->device->dev, - "%s: hash_set_dma_transfer() failed\n", __func__); - return error; - } - - return len; -} - -/** - * get_empty_message_digest - Returns a pre-calculated digest for - * the empty message. - * @device_data: Structure for the hash device. - * @zero_hash: Buffer to return the empty message digest. - * @zero_hash_size: Hash size of the empty message digest. - * @zero_digest: True if zero_digest returned. - */ -static int get_empty_message_digest( - struct hash_device_data *device_data, - u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest) -{ - int ret = 0; - struct hash_ctx *ctx = device_data->current_ctx; - *zero_digest = false; - - /** - * Caller responsible for ctx != NULL. - */ - - if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { - if (HASH_ALGO_SHA1 == ctx->config.algorithm) { - memcpy(zero_hash, &sha1_zero_message_hash[0], - SHA1_DIGEST_SIZE); - *zero_hash_size = SHA1_DIGEST_SIZE; - *zero_digest = true; - } else if (HASH_ALGO_SHA256 == - ctx->config.algorithm) { - memcpy(zero_hash, &sha256_zero_message_hash[0], - SHA256_DIGEST_SIZE); - *zero_hash_size = SHA256_DIGEST_SIZE; - *zero_digest = true; - } else { - dev_err(device_data->dev, "%s: Incorrect algorithm!\n", - __func__); - ret = -EINVAL; - goto out; - } - } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) { - if (!ctx->keylen) { - if (HASH_ALGO_SHA1 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hmac_sha1[0], - SHA1_DIGEST_SIZE); - *zero_hash_size = SHA1_DIGEST_SIZE; - *zero_digest = true; - } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hmac_sha256[0], - SHA256_DIGEST_SIZE); - *zero_hash_size = SHA256_DIGEST_SIZE; - *zero_digest = true; - } else { - dev_err(device_data->dev, "%s: Incorrect algorithm!\n", - __func__); - ret = -EINVAL; - goto out; - } - } else { - dev_dbg(device_data->dev, - "%s: Continue hash calculation, since hmac key available\n", - __func__); - } - } -out: - - return ret; -} - -/** - * hash_disable_power - Request to disable power and clock. - * @device_data: Structure for the hash device. - * @save_device_state: If true, saves the current hw state. - * - * This function request for disabling power (regulator) and clock, - * and could also save current hw state. - */ -static int hash_disable_power(struct hash_device_data *device_data, - bool save_device_state) -{ - int ret = 0; - struct device *dev = device_data->dev; - - spin_lock(&device_data->power_state_lock); - if (!device_data->power_state) - goto out; - - if (save_device_state) { - hash_save_state(device_data, - &device_data->state); - device_data->restore_dev_state = true; - } - - clk_disable(device_data->clk); - ret = regulator_disable(device_data->regulator); - if (ret) - dev_err(dev, "%s: regulator_disable() failed!\n", __func__); - - device_data->power_state = false; - -out: - spin_unlock(&device_data->power_state_lock); - - return ret; -} - -/** - * hash_enable_power - Request to enable power and clock. - * @device_data: Structure for the hash device. - * @restore_device_state: If true, restores a previous saved hw state. - * - * This function request for enabling power (regulator) and clock, - * and could also restore a previously saved hw state. - */ -static int hash_enable_power(struct hash_device_data *device_data, - bool restore_device_state) -{ - int ret = 0; - struct device *dev = device_data->dev; - - spin_lock(&device_data->power_state_lock); - if (!device_data->power_state) { - ret = regulator_enable(device_data->regulator); - if (ret) { - dev_err(dev, "%s: regulator_enable() failed!\n", - __func__); - goto out; - } - ret = clk_enable(device_data->clk); - if (ret) { - dev_err(dev, "%s: clk_enable() failed!\n", __func__); - ret = regulator_disable( - device_data->regulator); - goto out; - } - device_data->power_state = true; - } - - if (device_data->restore_dev_state) { - if (restore_device_state) { - device_data->restore_dev_state = false; - hash_resume_state(device_data, &device_data->state); - } - } -out: - spin_unlock(&device_data->power_state_lock); - - return ret; -} - -/** - * hash_get_device_data - Checks for an available hash device and return it. - * @ctx: Structure for the hash context. - * @device_data: Structure for the hash device. - * - * This function check for an available hash device and return it to - * the caller. - * Note! Caller need to release the device, calling up(). - */ -static int hash_get_device_data(struct hash_ctx *ctx, - struct hash_device_data **device_data) -{ - int ret; - struct klist_iter device_iterator; - struct klist_node *device_node; - struct hash_device_data *local_device_data = NULL; - - /* Wait until a device is available */ - ret = down_interruptible(&driver_data.device_allocation); - if (ret) - return ret; /* Interrupted */ - - /* Select a device */ - klist_iter_init(&driver_data.device_list, &device_iterator); - device_node = klist_next(&device_iterator); - while (device_node) { - local_device_data = container_of(device_node, - struct hash_device_data, list_node); - spin_lock(&local_device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (local_device_data->current_ctx) { - device_node = klist_next(&device_iterator); - } else { - local_device_data->current_ctx = ctx; - ctx->device = local_device_data; - spin_unlock(&local_device_data->ctx_lock); - break; - } - spin_unlock(&local_device_data->ctx_lock); - } - klist_iter_exit(&device_iterator); - - if (!device_node) { - /** - * No free device found. - * Since we allocated a device with down_interruptible, this - * should not be able to happen. - * Number of available devices, which are contained in - * device_allocation, is therefore decremented by not doing - * an up(device_allocation). - */ - return -EBUSY; - } - - *device_data = local_device_data; - - return 0; -} - -/** - * hash_hw_write_key - Writes the key to the hardware registries. - * - * @device_data: Structure for the hash device. - * @key: Key to be written. - * @keylen: The lengt of the key. - * - * Note! This function DOES NOT write to the NBLW registry, even though - * specified in the hw design spec. Either due to incorrect info in the - * spec or due to a bug in the hw. - */ -static void hash_hw_write_key(struct hash_device_data *device_data, - const u8 *key, unsigned int keylen) -{ - u32 word = 0; - int nwords = 1; - - HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); - - while (keylen >= 4) { - u32 *key_word = (u32 *)key; - - HASH_SET_DIN(key_word, nwords); - keylen -= 4; - key += 4; - } - - /* Take care of the remaining bytes in the last word */ - if (keylen) { - word = 0; - while (keylen) { - word |= (key[keylen - 1] << (8 * (keylen - 1))); - keylen--; - } - - HASH_SET_DIN(&word, nwords); - } - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - - HASH_SET_DCAL; - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); -} - -/** - * init_hash_hw - Initialise the hash hardware for a new calculation. - * @device_data: Structure for the hash device. - * @ctx: The hash context. - * - * This function will enable the bits needed to clear and start a new - * calculation. - */ -static int init_hash_hw(struct hash_device_data *device_data, - struct hash_ctx *ctx) -{ - int ret = 0; - - ret = hash_setconfiguration(device_data, &ctx->config); - if (ret) { - dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n", - __func__); - return ret; - } - - hash_begin(device_data, ctx); - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) - hash_hw_write_key(device_data, ctx->key, ctx->keylen); - - return ret; -} - -/** - * hash_get_nents - Return number of entries (nents) in scatterlist (sg). - * - * @sg: Scatterlist. - * @size: Size in bytes. - * @aligned: True if sg data aligned to work in DMA mode. - * - */ -static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) -{ - int nents = 0; - bool aligned_data = true; - - while (size > 0 && sg) { - nents++; - size -= sg->length; - - /* hash_set_dma_transfer will align last nent */ - if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || - (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) - aligned_data = false; - - sg = sg_next(sg); - } - - if (aligned) - *aligned = aligned_data; - - if (size != 0) - return -EFAULT; - - return nents; -} - -/** - * hash_dma_valid_data - checks for dma valid sg data. - * @sg: Scatterlist. - * @datasize: Datasize in bytes. - * - * NOTE! This function checks for dma valid sg data, since dma - * only accept datasizes of even wordsize. - */ -static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) -{ - bool aligned; - - /* Need to include at least one nent, else error */ - if (hash_get_nents(sg, datasize, &aligned) < 1) - return false; - - return aligned; -} - -/** - * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256). - * @req: The hash request for the job. - * - * Initialize structures. - */ -static int ux500_hash_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - - if (!ctx->key) - ctx->keylen = 0; - - memset(&req_ctx->state, 0, sizeof(struct hash_state)); - req_ctx->updated = 0; - if (hash_mode == HASH_MODE_DMA) { - if (req->nbytes < HASH_DMA_ALIGN_SIZE) { - req_ctx->dma_mode = false; /* Don't use DMA */ - - pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n", - __func__, HASH_DMA_ALIGN_SIZE); - } else { - if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE && - hash_dma_valid_data(req->src, req->nbytes)) { - req_ctx->dma_mode = true; - } else { - req_ctx->dma_mode = false; - pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n", - __func__, - HASH_DMA_PERFORMANCE_MIN_SIZE); - } - } - } - return 0; -} - -/** - * hash_processblock - This function processes a single block of 512 bits (64 - * bytes), word aligned, starting at message. - * @device_data: Structure for the hash device. - * @message: Block (512 bits) of message to be written to - * the HASH hardware. - * @length: Message length - * - */ -static void hash_processblock(struct hash_device_data *device_data, - const u32 *message, int length) -{ - int len = length / HASH_BYTES_PER_WORD; - /* - * NBLW bits. Reset the number of bits in last word (NBLW). - */ - HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); - - /* - * Write message data to the HASH_DIN register. - */ - HASH_SET_DIN(message, len); -} - -/** - * hash_messagepad - Pads a message and write the nblw bits. - * @device_data: Structure for the hash device. - * @message: Last word of a message. - * @index_bytes: The number of bytes in the last message. - * - * This function manages the final part of the digest calculation, when less - * than 512 bits (64 bytes) remain in message. This means index_bytes < 64. - * - */ -static void hash_messagepad(struct hash_device_data *device_data, - const u32 *message, u8 index_bytes) -{ - int nwords = 1; - - /* - * Clear hash str register, only clear NBLW - * since DCAL will be reset by hardware. - */ - HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); - - /* Main loop */ - while (index_bytes >= 4) { - HASH_SET_DIN(message, nwords); - index_bytes -= 4; - message++; - } - - if (index_bytes) - HASH_SET_DIN(message, nwords); - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - - /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */ - HASH_SET_NBLW(index_bytes * 8); - dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n", - __func__, readl_relaxed(&device_data->base->din), - readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); - HASH_SET_DCAL; - dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n", - __func__, readl_relaxed(&device_data->base->din), - readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK); - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); -} - -/** - * hash_incrementlength - Increments the length of the current message. - * @ctx: Hash context - * @incr: Length of message processed already - * - * Overflow cannot occur, because conditions for overflow are checked in - * hash_hw_update. - */ -static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) -{ - ctx->state.length.low_word += incr; - - /* Check for wrap-around */ - if (ctx->state.length.low_word < incr) - ctx->state.length.high_word++; -} - -/** - * hash_setconfiguration - Sets the required configuration for the hash - * hardware. - * @device_data: Structure for the hash device. - * @config: Pointer to a configuration structure. - */ -int hash_setconfiguration(struct hash_device_data *device_data, - struct hash_config *config) -{ - int ret = 0; - - if (config->algorithm != HASH_ALGO_SHA1 && - config->algorithm != HASH_ALGO_SHA256) - return -EPERM; - - /* - * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data - * to be written to HASH_DIN is considered as 32 bits. - */ - HASH_SET_DATA_FORMAT(config->data_format); - - /* - * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256 - */ - switch (config->algorithm) { - case HASH_ALGO_SHA1: - HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); - break; - - case HASH_ALGO_SHA256: - HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK); - break; - - default: - dev_err(device_data->dev, "%s: Incorrect algorithm\n", - __func__); - return -EPERM; - } - - /* - * MODE bit. This bit selects between HASH or HMAC mode for the - * selected algorithm. 0b0 = HASH and 0b1 = HMAC. - */ - if (HASH_OPER_MODE_HASH == config->oper_mode) - HASH_CLEAR_BITS(&device_data->base->cr, - HASH_CR_MODE_MASK); - else if (HASH_OPER_MODE_HMAC == config->oper_mode) { - HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK); - if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) { - /* Truncate key to blocksize */ - dev_dbg(device_data->dev, "%s: LKEY set\n", __func__); - HASH_SET_BITS(&device_data->base->cr, - HASH_CR_LKEY_MASK); - } else { - dev_dbg(device_data->dev, "%s: LKEY cleared\n", - __func__); - HASH_CLEAR_BITS(&device_data->base->cr, - HASH_CR_LKEY_MASK); - } - } else { /* Wrong hash mode */ - ret = -EPERM; - dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", - __func__); - } - return ret; -} - -/** - * hash_begin - This routine resets some globals and initializes the hash - * hardware. - * @device_data: Structure for the hash device. - * @ctx: Hash context. - */ -void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) -{ - /* HW and SW initializations */ - /* Note: there is no need to initialize buffer and digest members */ - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - - /* - * INIT bit. Set this bit to 0b1 to reset the HASH processor core and - * prepare the initialize the HASH accelerator to compute the message - * digest of a new message. - */ - HASH_INITIALIZE; - - /* - * NBLW bits. Reset the number of bits in last word (NBLW). - */ - HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK); -} - -static int hash_process_data(struct hash_device_data *device_data, - struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, - int msg_length, u8 *data_buffer, u8 *buffer, - u8 *index) -{ - int ret = 0; - u32 count; - - do { - if ((*index + msg_length) < HASH_BLOCK_SIZE) { - for (count = 0; count < msg_length; count++) { - buffer[*index + count] = - *(data_buffer + count); - } - *index += msg_length; - msg_length = 0; - } else { - if (req_ctx->updated) { - ret = hash_resume_state(device_data, - &device_data->state); - memmove(req_ctx->state.buffer, - device_data->state.buffer, - HASH_BLOCK_SIZE); - if (ret) { - dev_err(device_data->dev, - "%s: hash_resume_state() failed!\n", - __func__); - goto out; - } - } else { - ret = init_hash_hw(device_data, ctx); - if (ret) { - dev_err(device_data->dev, - "%s: init_hash_hw() failed!\n", - __func__); - goto out; - } - req_ctx->updated = 1; - } - /* - * If 'data_buffer' is four byte aligned and - * local buffer does not have any data, we can - * write data directly from 'data_buffer' to - * HW peripheral, otherwise we first copy data - * to a local buffer - */ - if (IS_ALIGNED((unsigned long)data_buffer, 4) && - (0 == *index)) - hash_processblock(device_data, - (const u32 *)data_buffer, - HASH_BLOCK_SIZE); - else { - for (count = 0; - count < (u32)(HASH_BLOCK_SIZE - *index); - count++) { - buffer[*index + count] = - *(data_buffer + count); - } - hash_processblock(device_data, - (const u32 *)buffer, - HASH_BLOCK_SIZE); - } - hash_incrementlength(req_ctx, HASH_BLOCK_SIZE); - data_buffer += (HASH_BLOCK_SIZE - *index); - - msg_length -= (HASH_BLOCK_SIZE - *index); - *index = 0; - - ret = hash_save_state(device_data, - &device_data->state); - - memmove(device_data->state.buffer, - req_ctx->state.buffer, - HASH_BLOCK_SIZE); - if (ret) { - dev_err(device_data->dev, "%s: hash_save_state() failed!\n", - __func__); - goto out; - } - } - } while (msg_length != 0); -out: - - return ret; -} - -/** - * hash_dma_final - The hash dma final function for SHA1/SHA256. - * @req: The hash request for the job. - */ -static int hash_dma_final(struct ahash_request *req) -{ - int ret = 0; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - struct hash_device_data *device_data; - u8 digest[SHA256_DIGEST_SIZE]; - int bytes_written = 0; - - ret = hash_get_device_data(ctx, &device_data); - if (ret) - return ret; - - dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, - (unsigned long)ctx); - - if (req_ctx->updated) { - ret = hash_resume_state(device_data, &device_data->state); - - if (ret) { - dev_err(device_data->dev, "%s: hash_resume_state() failed!\n", - __func__); - goto out; - } - } else { - ret = hash_setconfiguration(device_data, &ctx->config); - if (ret) { - dev_err(device_data->dev, - "%s: hash_setconfiguration() failed!\n", - __func__); - goto out; - } - - /* Enable DMA input */ - if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) { - HASH_CLEAR_BITS(&device_data->base->cr, - HASH_CR_DMAE_MASK); - } else { - HASH_SET_BITS(&device_data->base->cr, - HASH_CR_DMAE_MASK); - HASH_SET_BITS(&device_data->base->cr, - HASH_CR_PRIVN_MASK); - } - - HASH_INITIALIZE; - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) - hash_hw_write_key(device_data, ctx->key, ctx->keylen); - - /* Number of bits in last word = (nbytes * 8) % 32 */ - HASH_SET_NBLW((req->nbytes * 8) % 32); - req_ctx->updated = 1; - } - - /* Store the nents in the dma struct. */ - ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); - if (!ctx->device->dma.nents) { - dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", - __func__); - ret = ctx->device->dma.nents; - goto out; - } - - bytes_written = hash_dma_write(ctx, req->src, req->nbytes); - if (bytes_written != req->nbytes) { - dev_err(device_data->dev, "%s: hash_dma_write() failed!\n", - __func__); - ret = bytes_written; - goto out; - } - - wait_for_completion(&ctx->device->dma.complete); - hash_dma_done(ctx); - - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { - unsigned int keylen = ctx->keylen; - u8 *key = ctx->key; - - dev_dbg(device_data->dev, "%s: keylen: %d\n", - __func__, ctx->keylen); - hash_hw_write_key(device_data, key, keylen); - } - - hash_get_digest(device_data, digest, ctx->config.algorithm); - memcpy(req->result, digest, ctx->digestsize); - -out: - release_hash_device(device_data); - - /** - * Allocated in setkey, and only used in HMAC. - */ - kfree(ctx->key); - - return ret; -} - -/** - * hash_hw_final - The final hash calculation function - * @req: The hash request for the job. - */ -static int hash_hw_final(struct ahash_request *req) -{ - int ret = 0; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - struct hash_device_data *device_data; - u8 digest[SHA256_DIGEST_SIZE]; - - ret = hash_get_device_data(ctx, &device_data); - if (ret) - return ret; - - dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__, - (unsigned long)ctx); - - if (req_ctx->updated) { - ret = hash_resume_state(device_data, &device_data->state); - - if (ret) { - dev_err(device_data->dev, - "%s: hash_resume_state() failed!\n", __func__); - goto out; - } - } else if (req->nbytes == 0 && ctx->keylen == 0) { - u8 zero_hash[SHA256_DIGEST_SIZE]; - u32 zero_hash_size = 0; - bool zero_digest = false; - /** - * Use a pre-calculated empty message digest - * (workaround since hw return zeroes, hw bug!?) - */ - ret = get_empty_message_digest(device_data, &zero_hash[0], - &zero_hash_size, &zero_digest); - if (!ret && likely(zero_hash_size == ctx->digestsize) && - zero_digest) { - memcpy(req->result, &zero_hash[0], ctx->digestsize); - goto out; - } else if (!ret && !zero_digest) { - dev_dbg(device_data->dev, - "%s: HMAC zero msg with key, continue...\n", - __func__); - } else { - dev_err(device_data->dev, - "%s: ret=%d, or wrong digest size? %s\n", - __func__, ret, - zero_hash_size == ctx->digestsize ? - "true" : "false"); - /* Return error */ - goto out; - } - } else if (req->nbytes == 0 && ctx->keylen > 0) { - ret = -EPERM; - dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", - __func__); - goto out; - } - - if (!req_ctx->updated) { - ret = init_hash_hw(device_data, ctx); - if (ret) { - dev_err(device_data->dev, - "%s: init_hash_hw() failed!\n", __func__); - goto out; - } - } - - if (req_ctx->state.index) { - hash_messagepad(device_data, req_ctx->state.buffer, - req_ctx->state.index); - } else { - HASH_SET_DCAL; - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - } - - if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { - unsigned int keylen = ctx->keylen; - u8 *key = ctx->key; - - dev_dbg(device_data->dev, "%s: keylen: %d\n", - __func__, ctx->keylen); - hash_hw_write_key(device_data, key, keylen); - } - - hash_get_digest(device_data, digest, ctx->config.algorithm); - memcpy(req->result, digest, ctx->digestsize); - -out: - release_hash_device(device_data); - - /** - * Allocated in setkey, and only used in HMAC. - */ - kfree(ctx->key); - - return ret; -} - -/** - * hash_hw_update - Updates current HASH computation hashing another part of - * the message. - * @req: Byte array containing the message to be hashed (caller - * allocated). - */ -int hash_hw_update(struct ahash_request *req) -{ - int ret = 0; - u8 index = 0; - u8 *buffer; - struct hash_device_data *device_data; - u8 *data_buffer; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - struct crypto_hash_walk walk; - int msg_length; - - index = req_ctx->state.index; - buffer = (u8 *)req_ctx->state.buffer; - - ret = hash_get_device_data(ctx, &device_data); - if (ret) - return ret; - - msg_length = crypto_hash_walk_first(req, &walk); - - /* Empty message ("") is correct indata */ - if (msg_length == 0) { - ret = 0; - goto release_dev; - } - - /* Check if ctx->state.length + msg_length - overflows */ - if (msg_length > (req_ctx->state.length.low_word + msg_length) && - HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) { - pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__); - ret = crypto_hash_walk_done(&walk, -EPERM); - goto release_dev; - } - - /* Main loop */ - while (0 != msg_length) { - data_buffer = walk.data; - ret = hash_process_data(device_data, ctx, req_ctx, msg_length, - data_buffer, buffer, &index); - - if (ret) { - dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n", - __func__); - crypto_hash_walk_done(&walk, ret); - goto release_dev; - } - - msg_length = crypto_hash_walk_done(&walk, 0); - } - - req_ctx->state.index = index; - dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n", - __func__, req_ctx->state.index, req_ctx->state.bit_index); - -release_dev: - release_hash_device(device_data); - - return ret; -} - -/** - * hash_resume_state - Function that resumes the state of an calculation. - * @device_data: Pointer to the device structure. - * @device_state: The state to be restored in the hash hardware - */ -int hash_resume_state(struct hash_device_data *device_data, - const struct hash_state *device_state) -{ - u32 temp_cr; - s32 count; - int hash_mode = HASH_OPER_MODE_HASH; - - if (NULL == device_state) { - dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", - __func__); - return -EPERM; - } - - /* Check correctness of index and length members */ - if (device_state->index > HASH_BLOCK_SIZE || - (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) { - dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", - __func__); - return -EPERM; - } - - /* - * INIT bit. Set this bit to 0b1 to reset the HASH processor core and - * prepare the initialize the HASH accelerator to compute the message - * digest of a new message. - */ - HASH_INITIALIZE; - - temp_cr = device_state->temp_cr; - writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr); - - if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) - hash_mode = HASH_OPER_MODE_HMAC; - else - hash_mode = HASH_OPER_MODE_HASH; - - for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) - break; - - writel_relaxed(device_state->csr[count], - &device_data->base->csrx[count]); - } - - writel_relaxed(device_state->csfull, &device_data->base->csfull); - writel_relaxed(device_state->csdatain, &device_data->base->csdatain); - - writel_relaxed(device_state->str_reg, &device_data->base->str); - writel_relaxed(temp_cr, &device_data->base->cr); - - return 0; -} - -/** - * hash_save_state - Function that saves the state of hardware. - * @device_data: Pointer to the device structure. - * @device_state: The strucure where the hardware state should be saved. - */ -int hash_save_state(struct hash_device_data *device_data, - struct hash_state *device_state) -{ - u32 temp_cr; - u32 count; - int hash_mode = HASH_OPER_MODE_HASH; - - if (NULL == device_state) { - dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n", - __func__); - return -ENOTSUPP; - } - - /* Write dummy value to force digest intermediate calculation. This - * actually makes sure that there isn't any ongoing calculation in the - * hardware. - */ - while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK) - cpu_relax(); - - temp_cr = readl_relaxed(&device_data->base->cr); - - device_state->str_reg = readl_relaxed(&device_data->base->str); - - device_state->din_reg = readl_relaxed(&device_data->base->din); - - if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK) - hash_mode = HASH_OPER_MODE_HMAC; - else - hash_mode = HASH_OPER_MODE_HASH; - - for (count = 0; count < HASH_CSR_COUNT; count++) { - if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH)) - break; - - device_state->csr[count] = - readl_relaxed(&device_data->base->csrx[count]); - } - - device_state->csfull = readl_relaxed(&device_data->base->csfull); - device_state->csdatain = readl_relaxed(&device_data->base->csdatain); - - device_state->temp_cr = temp_cr; - - return 0; -} - -/** - * hash_check_hw - This routine checks for peripheral Ids and PCell Ids. - * @device_data: - * - */ -int hash_check_hw(struct hash_device_data *device_data) -{ - /* Checking Peripheral Ids */ - if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) && - HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) && - HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) && - HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) && - HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) && - HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) && - HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) && - HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) { - return 0; - } - - dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__); - return -ENOTSUPP; -} - -/** - * hash_get_digest - Gets the digest. - * @device_data: Pointer to the device structure. - * @digest: User allocated byte array for the calculated digest. - * @algorithm: The algorithm in use. - */ -void hash_get_digest(struct hash_device_data *device_data, - u8 *digest, int algorithm) -{ - u32 temp_hx_val, count; - int loop_ctr; - - if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) { - dev_err(device_data->dev, "%s: Incorrect algorithm %d\n", - __func__, algorithm); - return; - } - - if (algorithm == HASH_ALGO_SHA1) - loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32); - else - loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32); - - dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n", - __func__, (unsigned long)digest); - - /* Copy result into digest array */ - for (count = 0; count < loop_ctr; count++) { - temp_hx_val = readl_relaxed(&device_data->base->hx[count]); - digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF); - digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF); - digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF); - digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF); - } -} - -/** - * ahash_update - The hash update function for SHA1/SHA2 (SHA256). - * @req: The hash request for the job. - */ -static int ahash_update(struct ahash_request *req) -{ - int ret = 0; - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - - if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) - ret = hash_hw_update(req); - /* Skip update for DMA, all data will be passed to DMA in final */ - - if (ret) { - pr_err("%s: hash_hw_update() failed!\n", __func__); - } - - return ret; -} - -/** - * ahash_final - The hash final function for SHA1/SHA2 (SHA256). - * @req: The hash request for the job. - */ -static int ahash_final(struct ahash_request *req) -{ - int ret = 0; - struct hash_req_ctx *req_ctx = ahash_request_ctx(req); - - pr_debug("%s: data size: %d\n", __func__, req->nbytes); - - if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode) - ret = hash_dma_final(req); - else - ret = hash_hw_final(req); - - if (ret) { - pr_err("%s: hash_hw/dma_final() failed\n", __func__); - } - - return ret; -} - -static int hash_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen, int alg) -{ - int ret = 0; - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - - /** - * Freed in final. - */ - ctx->key = kmemdup(key, keylen, GFP_KERNEL); - if (!ctx->key) { - pr_err("%s: Failed to allocate ctx->key for %d\n", - __func__, alg); - return -ENOMEM; - } - ctx->keylen = keylen; - - return ret; -} - -static int ahash_sha1_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA1; - ctx->config.oper_mode = HASH_OPER_MODE_HASH; - ctx->digestsize = SHA1_DIGEST_SIZE; - - return ux500_hash_init(req); -} - -static int ahash_sha256_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA256; - ctx->config.oper_mode = HASH_OPER_MODE_HASH; - ctx->digestsize = SHA256_DIGEST_SIZE; - - return ux500_hash_init(req); -} - -static int ahash_sha1_digest(struct ahash_request *req) -{ - int ret2, ret1; - - ret1 = ahash_sha1_init(req); - if (ret1) - goto out; - - ret1 = ahash_update(req); - ret2 = ahash_final(req); - -out: - return ret1 ? ret1 : ret2; -} - -static int ahash_sha256_digest(struct ahash_request *req) -{ - int ret2, ret1; - - ret1 = ahash_sha256_init(req); - if (ret1) - goto out; - - ret1 = ahash_update(req); - ret2 = ahash_final(req); - -out: - return ret1 ? ret1 : ret2; -} - -static int ahash_noimport(struct ahash_request *req, const void *in) -{ - return -ENOSYS; -} - -static int ahash_noexport(struct ahash_request *req, void *out) -{ - return -ENOSYS; -} - -static int hmac_sha1_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA1; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->digestsize = SHA1_DIGEST_SIZE; - - return ux500_hash_init(req); -} - -static int hmac_sha256_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct hash_ctx *ctx = crypto_ahash_ctx(tfm); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = HASH_ALGO_SHA256; - ctx->config.oper_mode = HASH_OPER_MODE_HMAC; - ctx->digestsize = SHA256_DIGEST_SIZE; - - return ux500_hash_init(req); -} - -static int hmac_sha1_digest(struct ahash_request *req) -{ - int ret2, ret1; - - ret1 = hmac_sha1_init(req); - if (ret1) - goto out; - - ret1 = ahash_update(req); - ret2 = ahash_final(req); - -out: - return ret1 ? ret1 : ret2; -} - -static int hmac_sha256_digest(struct ahash_request *req) -{ - int ret2, ret1; - - ret1 = hmac_sha256_init(req); - if (ret1) - goto out; - - ret1 = ahash_update(req); - ret2 = ahash_final(req); - -out: - return ret1 ? ret1 : ret2; -} - -static int hmac_sha1_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen) -{ - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1); -} - -static int hmac_sha256_setkey(struct crypto_ahash *tfm, - const u8 *key, unsigned int keylen) -{ - return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256); -} - -struct hash_algo_template { - struct hash_config conf; - struct ahash_alg hash; -}; - -static int hash_cra_init(struct crypto_tfm *tfm) -{ - struct hash_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_alg *alg = tfm->__crt_alg; - struct hash_algo_template *hash_alg; - - hash_alg = container_of(__crypto_ahash_alg(alg), - struct hash_algo_template, - hash); - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct hash_req_ctx)); - - ctx->config.data_format = HASH_DATA_8_BITS; - ctx->config.algorithm = hash_alg->conf.algorithm; - ctx->config.oper_mode = hash_alg->conf.oper_mode; - - ctx->digestsize = hash_alg->hash.halg.digestsize; - - return 0; -} - -static struct hash_algo_template hash_algs[] = { - { - .conf.algorithm = HASH_ALGO_SHA1, - .conf.oper_mode = HASH_OPER_MODE_HASH, - .hash = { - .init = ux500_hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = ahash_sha1_digest, - .export = ahash_noexport, - .import = ahash_noimport, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "sha1", - .cra_driver_name = "sha1-ux500", - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } - } - }, - { - .conf.algorithm = HASH_ALGO_SHA256, - .conf.oper_mode = HASH_OPER_MODE_HASH, - .hash = { - .init = ux500_hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = ahash_sha256_digest, - .export = ahash_noexport, - .import = ahash_noimport, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "sha256", - .cra_driver_name = "sha256-ux500", - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } - } - }, - { - .conf.algorithm = HASH_ALGO_SHA1, - .conf.oper_mode = HASH_OPER_MODE_HMAC, - .hash = { - .init = ux500_hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = hmac_sha1_digest, - .setkey = hmac_sha1_setkey, - .export = ahash_noexport, - .import = ahash_noimport, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "hmac-sha1-ux500", - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } - } - }, - { - .conf.algorithm = HASH_ALGO_SHA256, - .conf.oper_mode = HASH_OPER_MODE_HMAC, - .hash = { - .init = ux500_hash_init, - .update = ahash_update, - .final = ahash_final, - .digest = hmac_sha256_digest, - .setkey = hmac_sha256_setkey, - .export = ahash_noexport, - .import = ahash_noimport, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct hash_ctx), - .halg.base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "hmac-sha256-ux500", - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct hash_ctx), - .cra_init = hash_cra_init, - .cra_module = THIS_MODULE, - } - } - } -}; - -static int ahash_algs_register_all(struct hash_device_data *device_data) -{ - int ret; - int i; - int count; - - for (i = 0; i < ARRAY_SIZE(hash_algs); i++) { - ret = crypto_register_ahash(&hash_algs[i].hash); - if (ret) { - count = i; - dev_err(device_data->dev, "%s: alg registration failed\n", - hash_algs[i].hash.halg.base.cra_driver_name); - goto unreg; - } - } - return 0; -unreg: - for (i = 0; i < count; i++) - crypto_unregister_ahash(&hash_algs[i].hash); - return ret; -} - -static void ahash_algs_unregister_all(struct hash_device_data *device_data) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hash_algs); i++) - crypto_unregister_ahash(&hash_algs[i].hash); -} - -/** - * ux500_hash_probe - Function that probes the hash hardware. - * @pdev: The platform device. - */ -static int ux500_hash_probe(struct platform_device *pdev) -{ - int ret = 0; - struct resource *res = NULL; - struct hash_device_data *device_data; - struct device *dev = &pdev->dev; - - device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL); - if (!device_data) { - ret = -ENOMEM; - goto out; - } - - device_data->dev = dev; - device_data->current_ctx = NULL; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__); - ret = -ENODEV; - goto out; - } - - device_data->phybase = res->start; - device_data->base = devm_ioremap_resource(dev, res); - if (IS_ERR(device_data->base)) { - ret = PTR_ERR(device_data->base); - goto out; - } - spin_lock_init(&device_data->ctx_lock); - spin_lock_init(&device_data->power_state_lock); - - /* Enable power for HASH1 hardware block */ - device_data->regulator = regulator_get(dev, "v-ape"); - if (IS_ERR(device_data->regulator)) { - dev_err(dev, "%s: regulator_get() failed!\n", __func__); - ret = PTR_ERR(device_data->regulator); - device_data->regulator = NULL; - goto out; - } - - /* Enable the clock for HASH1 hardware block */ - device_data->clk = devm_clk_get(dev, NULL); - if (IS_ERR(device_data->clk)) { - dev_err(dev, "%s: clk_get() failed!\n", __func__); - ret = PTR_ERR(device_data->clk); - goto out_regulator; - } - - ret = clk_prepare(device_data->clk); - if (ret) { - dev_err(dev, "%s: clk_prepare() failed!\n", __func__); - goto out_regulator; - } - - /* Enable device power (and clock) */ - ret = hash_enable_power(device_data, false); - if (ret) { - dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); - goto out_clk_unprepare; - } - - ret = hash_check_hw(device_data); - if (ret) { - dev_err(dev, "%s: hash_check_hw() failed!\n", __func__); - goto out_power; - } - - if (hash_mode == HASH_MODE_DMA) - hash_dma_setup_channel(device_data, dev); - - platform_set_drvdata(pdev, device_data); - - /* Put the new device into the device list... */ - klist_add_tail(&device_data->list_node, &driver_data.device_list); - /* ... and signal that a new device is available. */ - up(&driver_data.device_allocation); - - ret = ahash_algs_register_all(device_data); - if (ret) { - dev_err(dev, "%s: ahash_algs_register_all() failed!\n", - __func__); - goto out_power; - } - - dev_info(dev, "successfully registered\n"); - return 0; - -out_power: - hash_disable_power(device_data, false); - -out_clk_unprepare: - clk_unprepare(device_data->clk); - -out_regulator: - regulator_put(device_data->regulator); - -out: - return ret; -} - -/** - * ux500_hash_remove - Function that removes the hash device from the platform. - * @pdev: The platform device. - */ -static int ux500_hash_remove(struct platform_device *pdev) -{ - struct hash_device_data *device_data; - struct device *dev = &pdev->dev; - - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); - return -ENOMEM; - } - - /* Try to decrease the number of available devices. */ - if (down_trylock(&driver_data.device_allocation)) - return -EBUSY; - - /* Check that the device is free */ - spin_lock(&device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (device_data->current_ctx) { - /* The device is busy */ - spin_unlock(&device_data->ctx_lock); - /* Return the device to the pool. */ - up(&driver_data.device_allocation); - return -EBUSY; - } - - spin_unlock(&device_data->ctx_lock); - - /* Remove the device from the list */ - if (klist_node_attached(&device_data->list_node)) - klist_remove(&device_data->list_node); - - /* If this was the last device, remove the services */ - if (list_empty(&driver_data.device_list.k_list)) - ahash_algs_unregister_all(device_data); - - if (hash_disable_power(device_data, false)) - dev_err(dev, "%s: hash_disable_power() failed\n", - __func__); - - clk_unprepare(device_data->clk); - regulator_put(device_data->regulator); - - return 0; -} - -/** - * ux500_hash_shutdown - Function that shutdown the hash device. - * @pdev: The platform device - */ -static void ux500_hash_shutdown(struct platform_device *pdev) -{ - struct hash_device_data *device_data; - - device_data = platform_get_drvdata(pdev); - if (!device_data) { - dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n", - __func__); - return; - } - - /* Check that the device is free */ - spin_lock(&device_data->ctx_lock); - /* current_ctx allocates a device, NULL = unallocated */ - if (!device_data->current_ctx) { - if (down_trylock(&driver_data.device_allocation)) - dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n", - __func__); - /** - * (Allocate the device) - * Need to set this to non-null (dummy) value, - * to avoid usage if context switching. - */ - device_data->current_ctx++; - } - spin_unlock(&device_data->ctx_lock); - - /* Remove the device from the list */ - if (klist_node_attached(&device_data->list_node)) - klist_remove(&device_data->list_node); - - /* If this was the last device, remove the services */ - if (list_empty(&driver_data.device_list.k_list)) - ahash_algs_unregister_all(device_data); - - if (hash_disable_power(device_data, false)) - dev_err(&pdev->dev, "%s: hash_disable_power() failed\n", - __func__); -} - -#ifdef CONFIG_PM_SLEEP -/** - * ux500_hash_suspend - Function that suspends the hash device. - * @dev: Device to suspend. - */ -static int ux500_hash_suspend(struct device *dev) -{ - int ret; - struct hash_device_data *device_data; - struct hash_ctx *temp_ctx = NULL; - - device_data = dev_get_drvdata(dev); - if (!device_data) { - dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); - return -ENOMEM; - } - - spin_lock(&device_data->ctx_lock); - if (!device_data->current_ctx) - device_data->current_ctx++; - spin_unlock(&device_data->ctx_lock); - - if (device_data->current_ctx == ++temp_ctx) { - if (down_interruptible(&driver_data.device_allocation)) - dev_dbg(dev, "%s: down_interruptible() failed\n", - __func__); - ret = hash_disable_power(device_data, false); - - } else { - ret = hash_disable_power(device_data, true); - } - - if (ret) - dev_err(dev, "%s: hash_disable_power()\n", __func__); - - return ret; -} - -/** - * ux500_hash_resume - Function that resume the hash device. - * @dev: Device to resume. - */ -static int ux500_hash_resume(struct device *dev) -{ - int ret = 0; - struct hash_device_data *device_data; - struct hash_ctx *temp_ctx = NULL; - - device_data = dev_get_drvdata(dev); - if (!device_data) { - dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__); - return -ENOMEM; - } - - spin_lock(&device_data->ctx_lock); - if (device_data->current_ctx == ++temp_ctx) - device_data->current_ctx = NULL; - spin_unlock(&device_data->ctx_lock); - - if (!device_data->current_ctx) - up(&driver_data.device_allocation); - else - ret = hash_enable_power(device_data, true); - - if (ret) - dev_err(dev, "%s: hash_enable_power() failed!\n", __func__); - - return ret; -} -#endif - -static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume); - -static const struct of_device_id ux500_hash_match[] = { - { .compatible = "stericsson,ux500-hash" }, - { }, -}; -MODULE_DEVICE_TABLE(of, ux500_hash_match); - -static struct platform_driver hash_driver = { - .probe = ux500_hash_probe, - .remove = ux500_hash_remove, - .shutdown = ux500_hash_shutdown, - .driver = { - .name = "hash1", - .of_match_table = ux500_hash_match, - .pm = &ux500_hash_pm, - } -}; - -/** - * ux500_hash_mod_init - The kernel module init function. - */ -static int __init ux500_hash_mod_init(void) -{ - klist_init(&driver_data.device_list, NULL, NULL); - /* Initialize the semaphore to 0 devices (locked state) */ - sema_init(&driver_data.device_allocation, 0); - - return platform_driver_register(&hash_driver); -} - -/** - * ux500_hash_mod_fini - The kernel module exit function. - */ -static void __exit ux500_hash_mod_fini(void) -{ - platform_driver_unregister(&hash_driver); -} - -module_init(ux500_hash_mod_init); -module_exit(ux500_hash_mod_fini); - -MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); -MODULE_LICENSE("GPL"); - -MODULE_ALIAS_CRYPTO("sha1-all"); -MODULE_ALIAS_CRYPTO("sha256-all"); -MODULE_ALIAS_CRYPTO("hmac-sha1-all"); -MODULE_ALIAS_CRYPTO("hmac-sha256-all"); diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index b2979be613b8..6963344f6a3a 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -116,7 +116,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher struct virtio_crypto_session_input *input; struct virtio_crypto_ctrl_request *vc_ctrl_req; - pkey = kmemdup(key, keylen, GFP_ATOMIC); + pkey = kmemdup(key, keylen, GFP_KERNEL); if (!pkey) return -ENOMEM; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2653516bcdef..3aeeb8f2802f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1458,8 +1458,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, return r; } -static void kcryptd_async_done(struct crypto_async_request *async_req, - int error); +static void kcryptd_async_done(void *async_req, int error); static int crypt_alloc_req_skcipher(struct crypt_config *cc, struct convert_context *ctx) @@ -2147,10 +2146,9 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_dec_pending(io); } -static void kcryptd_async_done(struct crypto_async_request *async_req, - int error) +static void kcryptd_async_done(void *data, int error) { - struct dm_crypt_request *dmreq = async_req->data; + struct dm_crypt_request *dmreq = data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 1388ee35571e..c58156deb2b1 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -955,9 +955,9 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio async_tx_issue_pending_all(); } -static void complete_journal_encrypt(struct crypto_async_request *req, int err) +static void complete_journal_encrypt(void *data, int err) { - struct journal_completion *comp = req->data; + struct journal_completion *comp = data; if (unlikely(err)) { if (likely(err == -EINPROGRESS)) { complete(&comp->ic->crypto_backoff); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index bf8ac7a3ded7..becb04123d3e 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -528,9 +528,9 @@ static void count_tx(struct net_device *dev, int ret, int len) } } -static void macsec_encrypt_done(struct crypto_async_request *base, int err) +static void macsec_encrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; @@ -835,9 +835,9 @@ static void count_rx(struct net_device *dev, int len) u64_stats_update_end(&stats->syncp); } -static void macsec_decrypt_done(struct crypto_async_request *base, int err) +static void macsec_decrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; |