diff options
Diffstat (limited to 'drivers/crypto')
42 files changed, 1115 insertions, 283 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index de50c00ba218..19b7fb4a93e8 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -190,7 +190,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req err = -EFAULT; goto theend; } - cet->t_key = cpu_to_le32(rctx->addr_key); + cet->t_key = desc_addr_val_le32(ce, rctx->addr_key); ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { @@ -208,7 +208,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req err = -ENOMEM; goto theend_iv; } - cet->t_iv = cpu_to_le32(rctx->addr_iv); + cet->t_iv = desc_addr_val_le32(ce, rctx->addr_iv); } if (areq->src == areq->dst) { @@ -236,7 +236,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req len = areq->cryptlen; for_each_sg(areq->src, sg, nr_sgs, i) { - cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_src[i].len = cpu_to_le32(todo / 4); dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, @@ -251,7 +251,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req len = areq->cryptlen; for_each_sg(areq->dst, sg, nr_sgd, i) { - cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg)); + cet->t_dst[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_dst[i].len = cpu_to_le32(todo / 4); dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 0408b2d5d533..e55e58e164db 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -92,6 +92,30 @@ static const struct ce_variant ce_h6_variant = { .trng = CE_ALG_TRNG_V2, }; +static const struct ce_variant ce_h616_variant = { + .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, + }, + .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, + CE_ALG_SHA384, CE_ALG_SHA512 + }, + .op_mode = { CE_OP_ECB, CE_OP_CBC + }, + .cipher_t_dlen_in_bytes = true, + .hash_t_dlen_in_bits = true, + .prng_t_dlen_in_bytes = true, + .trng_t_dlen_in_bytes = true, + .needs_word_addresses = true, + .ce_clks = { + { "bus", 0, 200000000 }, + { "mod", 300000000, 0 }, + { "ram", 0, 400000000 }, + { "trng", 0, 0 }, + }, + .esr = ESR_H6, + .prng = CE_ALG_PRNG_V2, + .trng = CE_ALG_TRNG_V2, +}; + static const struct ce_variant ce_a64_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, @@ -172,7 +196,7 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) writel(v, ce->base + CE_ICR); reinit_completion(&ce->chanlist[flow].complete); - writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); + writel(desc_addr_val(ce, ce->chanlist[flow].t_phy), ce->base + CE_TDQ); ce->chanlist[flow].status = 0; /* Be sure all data is written before enabling the task */ @@ -1097,6 +1121,8 @@ static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { .data = &ce_h5_variant }, { .compatible = "allwinner,sun50i-h6-crypto", .data = &ce_h6_variant }, + { .compatible = "allwinner,sun50i-h616-crypto", + .data = &ce_h616_variant }, {} }; MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index ee2a28c906ed..6072dd9f390b 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -403,7 +403,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) len = areq->nbytes; for_each_sg(areq->src, sg, nr_sgs, i) { - cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); + cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_src[i].len = cpu_to_le32(todo / 4); len -= todo; @@ -414,7 +414,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) goto theend; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); - cet->t_dst[0].addr = cpu_to_le32(addr_res); + cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res); cet->t_dst[0].len = cpu_to_le32(digestsize / 4); if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); @@ -445,7 +445,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); - cet->t_src[i].addr = cpu_to_le32(addr_pad); + cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad); cet->t_src[i].len = cpu_to_le32(j); if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index 80815379f6fc..762459867b6c 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -132,10 +132,10 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, cet->t_sym_ctl = cpu_to_le32(sym); cet->t_asym_ctl = 0; - cet->t_key = cpu_to_le32(dma_iv); - cet->t_iv = cpu_to_le32(dma_iv); + cet->t_key = desc_addr_val_le32(ce, dma_iv); + cet->t_iv = desc_addr_val_le32(ce, dma_iv); - cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); ce->chanlist[flow].timeout = 2000; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index 9c35f2a83eda..e1e8bc15202e 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -77,7 +77,7 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; - cet->t_dst[0].addr = cpu_to_le32(dma_dst); + cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); ce->chanlist[flow].timeout = todo; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 93d4985def87..3b5c2af013d0 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -149,6 +149,7 @@ struct ce_variant { bool hash_t_dlen_in_bits; bool prng_t_dlen_in_bytes; bool trng_t_dlen_in_bytes; + bool needs_word_addresses; struct ce_clock ce_clks[CE_MAX_CLOCKS]; int esr; unsigned char prng; @@ -241,6 +242,20 @@ struct sun8i_ce_dev { #endif }; +static inline u32 desc_addr_val(struct sun8i_ce_dev *dev, dma_addr_t addr) +{ + if (dev->variant->needs_word_addresses) + return addr / 4; + + return addr; +} + +static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev, + dma_addr_t addr) +{ + return cpu_to_le32(desc_addr_val(dev, addr)); +} + /* * struct sun8i_cipher_req_ctx - context for a skcipher request * @op_dir: direction (encrypt vs decrypt) for this request diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index 24ffdf505023..a02d496f4c41 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -106,7 +106,7 @@ static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp) if (cmd.data[0] == 0xff) { dev_err(&client->dev, "failed, device not ready\n"); - return -ret; + return -EINVAL; } memcpy(otp, cmd.data+1, 4); @@ -232,4 +232,5 @@ module_init(atmel_sha204a_init); module_exit(atmel_sha204a_exit); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_DESCRIPTION("Microchip / Atmel SHA204A (I2C) driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index dbc1d483f2af..75440ea6206e 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2811,13 +2811,6 @@ static struct aead_alg aead_algos[] = { #ifdef CONFIG_DEBUG_FS -struct dbgfs_u32 { - char *name; - mode_t mode; - u32 *flag; - char *desc; -}; - static struct dentry *dbgfs_root; static void artpec6_crypto_init_debugfs(void) diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index c631f99e415f..05210a0edb8a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -10,7 +10,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore platform driver backend" - depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST select SOC_BUS select CRYPTO_DEV_FSL_CAAM_COMMON imply FSL_MC_BUS diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a4f6884416a0..207dc422785a 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4990,11 +4990,23 @@ err_dma_map: return err; } +static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + int i; + + for_each_cpu(i, cpus) { + ppriv = per_cpu_ptr(priv->ppriv, i); + free_netdev(ppriv->net_dev); + } +} + static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; + cpumask_t clean_mask; int err, cpu; u8 i; @@ -5073,6 +5085,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) } } + cpumask_clear(&clean_mask); i = 0; for_each_online_cpu(cpu) { u8 j; @@ -5096,15 +5109,23 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) priv->rx_queue_attr[j].fqid, priv->tx_queue_attr[j].fqid); - ppriv->net_dev.dev = *dev; - INIT_LIST_HEAD(&ppriv->net_dev.napi_list); - netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi, + ppriv->net_dev = alloc_netdev_dummy(0); + if (!ppriv->net_dev) { + err = -ENOMEM; + goto err_alloc_netdev; + } + cpumask_set_cpu(cpu, &clean_mask); + ppriv->net_dev->dev = *dev; + + netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, DPAA2_CAAM_NAPI_WEIGHT); } return 0; +err_alloc_netdev: + free_dpaa2_pcpu_netdev(priv, &clean_mask); err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: @@ -5153,6 +5174,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) ppriv = per_cpu_ptr(priv->ppriv, i); napi_disable(&ppriv->napi); netif_napi_del(&ppriv->napi); + free_netdev(ppriv->net_dev); } return 0; diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index abb502bb675c..61d1219a202f 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -81,7 +81,7 @@ struct dpaa2_caam_priv { */ struct dpaa2_caam_priv_per_cpu { struct napi_struct napi; - struct net_device net_dev; + struct net_device *net_dev; int req_fqid; int rsp_fqid; int prio; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bd418dea586d..d4b39184dbdb 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -80,6 +80,7 @@ static void build_deinstantiation_desc(u32 *desc, int handle) append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); } +#ifdef CONFIG_OF static const struct of_device_id imx8m_machine_match[] = { { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, @@ -88,6 +89,7 @@ static const struct of_device_id imx8m_machine_match[] = { { .compatible = "fsl,imx8ulp", }, { } }; +#endif /* * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 46a083849a8e..ba8fb5d8a7b2 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -57,7 +57,7 @@ struct caam_napi { */ struct caam_qi_pcpu_priv { struct caam_napi caam_napi; - struct net_device net_dev; + struct net_device *net_dev; struct qman_fq *rsp_fq; } ____cacheline_aligned; @@ -144,7 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, { const struct qm_fd *fd; struct caam_drv_req *drv_req; - struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); fd = &msg->ern.fd; @@ -530,6 +530,7 @@ static void caam_qi_shutdown(void *data) if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); + free_netdev(per_cpu(pcpu_qipriv.net_dev, i)); } qman_delete_cgr_safe(&priv->cgr); @@ -573,7 +574,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); struct caam_drv_req *drv_req; const struct qm_fd *fd; - struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); u32 status; @@ -718,12 +719,24 @@ static void free_rsp_fqs(void) kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); } +static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus) +{ + struct caam_qi_pcpu_priv *priv; + int i; + + for_each_cpu(i, cpus) { + priv = per_cpu_ptr(&pcpu_qipriv, i); + free_netdev(priv->net_dev); + } +} + int caam_qi_init(struct platform_device *caam_pdev) { int err, i; struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); + cpumask_t clean_mask; ctrlpriv = dev_get_drvdata(ctrldev); qidev = ctrldev; @@ -743,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev) return err; } + cpumask_clear(&clean_mask); + /* * Enable the NAPI contexts on each of the core which has an affine * portal. @@ -751,10 +766,16 @@ int caam_qi_init(struct platform_device *caam_pdev) struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); struct caam_napi *caam_napi = &priv->caam_napi; struct napi_struct *irqtask = &caam_napi->irqtask; - struct net_device *net_dev = &priv->net_dev; + struct net_device *net_dev; + net_dev = alloc_netdev_dummy(0); + if (!net_dev) { + err = -ENOMEM; + goto fail; + } + cpumask_set_cpu(i, &clean_mask); + priv->net_dev = net_dev; net_dev->dev = *qidev; - INIT_LIST_HEAD(&net_dev->napi_list); netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll, CAAM_NAPI_WEIGHT); @@ -766,16 +787,22 @@ int caam_qi_init(struct platform_device *caam_pdev) dma_get_cache_alignment(), 0, NULL); if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); - free_rsp_fqs(); - return -ENOMEM; + err = -ENOMEM; + goto fail2; } caam_debugfs_qi_init(ctrlpriv); err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); if (err) - return err; + goto fail2; dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); return 0; + +fail2: + free_rsp_fqs(); +fail: + free_caam_qi_pcpu_netdev(&clean_mask); + return err; } diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e..394484929dae 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,7 +12,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + hsti.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c index d373caab52f8..5b105a23f699 100644 --- a/drivers/crypto/ccp/dbc.c +++ b/drivers/crypto/ccp/dbc.c @@ -223,7 +223,7 @@ int dbc_dev_init(struct psp_device *psp) dbc_dev->dev = dev; dbc_dev->psp = psp; - if (PSP_CAPABILITY(psp, DBC_THRU_EXT)) { + if (psp->capability.dbc_thru_ext) { dbc_dev->use_ext = true; dbc_dev->payload_size = &dbc_dev->mbox->ext_req.header.payload_size; dbc_dev->result = &dbc_dev->mbox->ext_req.header.status; diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c new file mode 100644 index 000000000000..1b39a4fb55c0 --- /dev/null +++ b/drivers/crypto/ccp/hsti.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Processor device driver, security attributes + * + * Copyright (C) 2023-2024 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#include <linux/device.h> + +#include "psp-dev.h" +#include "hsti.h" + +#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8 + +struct hsti_request { + struct psp_req_buffer_hdr header; + u32 hsti; +} __packed; + +#define security_attribute_show(name) \ +static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct sp_device *sp = dev_get_drvdata(d); \ + struct psp_device *psp = sp->psp_data; \ + return sysfs_emit(buf, "%d\n", psp->capability.name); \ +} + +security_attribute_show(fused_part) +static DEVICE_ATTR_RO(fused_part); +security_attribute_show(debug_lock_on) +static DEVICE_ATTR_RO(debug_lock_on); +security_attribute_show(tsme_status) +static DEVICE_ATTR_RO(tsme_status); +security_attribute_show(anti_rollback_status) +static DEVICE_ATTR_RO(anti_rollback_status); +security_attribute_show(rpmc_production_enabled) +static DEVICE_ATTR_RO(rpmc_production_enabled); +security_attribute_show(rpmc_spirom_available) +static DEVICE_ATTR_RO(rpmc_spirom_available); +security_attribute_show(hsp_tpm_available) +static DEVICE_ATTR_RO(hsp_tpm_available); +security_attribute_show(rom_armor_enforced) +static DEVICE_ATTR_RO(rom_armor_enforced); + +static struct attribute *psp_security_attrs[] = { + &dev_attr_fused_part.attr, + &dev_attr_debug_lock_on.attr, + &dev_attr_tsme_status.attr, + &dev_attr_anti_rollback_status.attr, + &dev_attr_rpmc_production_enabled.attr, + &dev_attr_rpmc_spirom_available.attr, + &dev_attr_hsp_tpm_available.attr, + &dev_attr_rom_armor_enforced.attr, + NULL +}; + +static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct sp_device *sp = dev_get_drvdata(dev); + struct psp_device *psp = sp->psp_data; + + if (psp && psp->capability.security_reporting) + return 0444; + + return 0; +} + +struct attribute_group psp_security_attr_group = { + .attrs = psp_security_attrs, + .is_visible = psp_security_is_visible, +}; + +static int psp_poulate_hsti(struct psp_device *psp) +{ + struct hsti_request *req; + int ret; + + /* Are the security attributes already reported? */ + if (psp->capability.security_reporting) + return 0; + + /* Allocate command-response buffer */ + req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO); + if (!req) + return -ENOMEM; + + req->header.payload_size = sizeof(req); + + ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req); + if (ret) + goto out; + + if (req->header.status != 0) { + dev_dbg(psp->dev, "failed to populate HSTI state: %d\n", req->header.status); + ret = -EINVAL; + goto out; + } + + psp->capability.security_reporting = 1; + psp->capability.raw |= req->hsti << PSP_CAPABILITY_PSP_SECURITY_OFFSET; + +out: + kfree(req); + + return ret; +} + +int psp_init_hsti(struct psp_device *psp) +{ + int ret; + + if (PSP_FEATURE(psp, HSTI)) { + ret = psp_poulate_hsti(psp); + if (ret) + return ret; + } + + /* + * At this stage, if security information hasn't been populated by + * either the PSP or by the driver through the platform command, + * then there is nothing more to do. + */ + if (!psp->capability.security_reporting) + return 0; + + if (psp->capability.tsme_status) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); + else + dev_notice(psp->dev, "psp: TSME enabled\n"); + } + + return 0; +} diff --git a/drivers/crypto/ccp/hsti.h b/drivers/crypto/ccp/hsti.h new file mode 100644 index 000000000000..6a70f922d2c4 --- /dev/null +++ b/drivers/crypto/ccp/hsti.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD Secure Processor device driver, security attributes + * + * Copyright (C) 2023-2024 Advanced Micro Devices, Inc. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#ifndef __HSTI_H +#define __HSTI_H + +extern struct attribute_group psp_security_attr_group; + +int psp_init_hsti(struct psp_device *psp); + +#endif /* __HSTI_H */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 56bf832c2947..1c5a7189631e 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -19,6 +19,7 @@ #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" +#include "hsti.h" struct psp_device *psp_master; @@ -154,16 +155,7 @@ static unsigned int psp_get_capability(struct psp_device *psp) dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n"); return -ENODEV; } - psp->capability = val; - - /* Detect TSME and/or SME status */ - if (PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING) && - psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET)) { - if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) - dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); - else - dev_notice(psp->dev, "psp: TSME enabled\n"); - } + psp->capability.raw = val; return 0; } @@ -171,7 +163,7 @@ static unsigned int psp_get_capability(struct psp_device *psp) static int psp_check_sev_support(struct psp_device *psp) { /* Check if device supports SEV feature */ - if (!PSP_CAPABILITY(psp, SEV)) { + if (!psp->capability.sev) { dev_dbg(psp->dev, "psp does not support SEV\n"); return -ENODEV; } @@ -182,7 +174,7 @@ static int psp_check_sev_support(struct psp_device *psp) static int psp_check_tee_support(struct psp_device *psp) { /* Check if device supports TEE feature */ - if (!PSP_CAPABILITY(psp, TEE)) { + if (!psp->capability.tee) { dev_dbg(psp->dev, "psp does not support TEE\n"); return -ENODEV; } @@ -214,12 +206,17 @@ static int psp_init(struct psp_device *psp) /* dbc must come after platform access as it tests the feature */ if (PSP_FEATURE(psp, DBC) || - PSP_CAPABILITY(psp, DBC_THRU_EXT)) { + psp->capability.dbc_thru_ext) { ret = dbc_dev_init(psp); if (ret) return ret; } + /* HSTI uses platform access on some systems. */ + ret = psp_init_hsti(psp); + if (ret) + return ret; + return 0; } diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index ae582ba63729..e43ce87ede76 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -26,6 +26,29 @@ extern struct psp_device *psp_master; typedef void (*psp_irq_handler_t)(int, void *, unsigned int); +union psp_cap_register { + unsigned int raw; + struct { + unsigned int sev :1, + tee :1, + dbc_thru_ext :1, + rsvd1 :4, + security_reporting :1, + fused_part :1, + rsvd2 :1, + debug_lock_on :1, + rsvd3 :2, + tsme_status :1, + rsvd4 :1, + anti_rollback_status :1, + rpmc_production_enabled :1, + rpmc_spirom_available :1, + hsp_tpm_available :1, + rom_armor_enforced :1, + rsvd5 :12; + }; +}; + struct psp_device { struct list_head entry; @@ -46,7 +69,7 @@ struct psp_device { void *platform_access_data; void *dbc_data; - unsigned int capability; + union psp_cap_register capability; }; void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, @@ -55,27 +78,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp); struct psp_device *psp_get_master_device(void); -#define PSP_CAPABILITY_SEV BIT(0) -#define PSP_CAPABILITY_TEE BIT(1) -#define PSP_CAPABILITY_DBC_THRU_EXT BIT(2) -#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7) - -#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8 -/* - * The PSP doesn't directly store these bits in the capability register - * but instead copies them from the results of query command. - * - * The offsets from the query command are below, and shifted when used. - */ -#define PSP_SECURITY_FUSED_PART BIT(0) -#define PSP_SECURITY_DEBUG_LOCK_ON BIT(2) -#define PSP_SECURITY_TSME_STATUS BIT(5) -#define PSP_SECURITY_ANTI_ROLLBACK_STATUS BIT(7) -#define PSP_SECURITY_RPMC_PRODUCTION_ENABLED BIT(8) -#define PSP_SECURITY_RPMC_SPIROM_AVAILABLE BIT(9) -#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10) -#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11) - /** * enum psp_cmd - PSP mailbox commands * @PSP_CMD_TEE_RING_INIT: Initialize TEE ring buffer diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 2102377f727b..9810edbb272d 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1642,10 +1642,16 @@ fw_err: static int __sev_snp_shutdown_locked(int *error, bool panic) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; struct sev_data_snp_shutdown_ex data; int ret; + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + if (!sev->snp_initialized) return 0; @@ -2027,6 +2033,39 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error); } +static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable) +{ + struct sev_device *sev = psp_master->sev_data; + struct sev_user_data_snp_vlek_load input; + void *blob; + int ret; + + if (!sev->snp_initialized || !argp->data) + return -EINVAL; + + if (!writable) + return -EPERM; + + if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input))) + return -EFAULT; + + if (input.len != sizeof(input) || input.vlek_wrapped_version != 0) + return -EINVAL; + + blob = psp_copy_user_blob(input.vlek_wrapped_address, + sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick)); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + input.vlek_wrapped_address = __psp_pa(blob); + + ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error); + + kfree(blob); + + return ret; +} + static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -2087,6 +2126,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case SNP_SET_CONFIG: ret = sev_ioctl_do_snp_set_config(&input, writable); break; + case SNP_VLEK_LOAD: + ret = sev_ioctl_do_snp_vlek_load(&input, writable); + break; default: ret = -EINVAL; goto out; diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 03d5b9e04084..0895de823674 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -29,8 +29,8 @@ #define CACHE_WB_NO_ALLOC 0xb7 #define PLATFORM_FEATURE_DBC 0x1 +#define PLATFORM_FEATURE_HSTI 0x2 -#define PSP_CAPABILITY(psp, cap) (psp->capability & PSP_CAPABILITY_##cap) #define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat) /* Structure to hold CCP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 300dda14182b..248d98fd8c48 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -24,6 +24,7 @@ #include "ccp-dev.h" #include "psp-dev.h" +#include "hsti.h" /* used for version string AA.BB.CC.DD */ #define AA GENMASK(31, 24) @@ -39,62 +40,6 @@ struct sp_pci { }; static struct sp_device *sp_dev_master; -#define security_attribute_show(name, def) \ -static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct sp_device *sp = dev_get_drvdata(d); \ - struct psp_device *psp = sp->psp_data; \ - int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \ - return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \ -} - -security_attribute_show(fused_part, FUSED_PART) -static DEVICE_ATTR_RO(fused_part); -security_attribute_show(debug_lock_on, DEBUG_LOCK_ON) -static DEVICE_ATTR_RO(debug_lock_on); -security_attribute_show(tsme_status, TSME_STATUS) -static DEVICE_ATTR_RO(tsme_status); -security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) -static DEVICE_ATTR_RO(anti_rollback_status); -security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) -static DEVICE_ATTR_RO(rpmc_production_enabled); -security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) -static DEVICE_ATTR_RO(rpmc_spirom_available); -security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) -static DEVICE_ATTR_RO(hsp_tpm_available); -security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) -static DEVICE_ATTR_RO(rom_armor_enforced); - -static struct attribute *psp_security_attrs[] = { - &dev_attr_fused_part.attr, - &dev_attr_debug_lock_on.attr, - &dev_attr_tsme_status.attr, - &dev_attr_anti_rollback_status.attr, - &dev_attr_rpmc_production_enabled.attr, - &dev_attr_rpmc_spirom_available.attr, - &dev_attr_hsp_tpm_available.attr, - &dev_attr_rom_armor_enforced.attr, - NULL -}; - -static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx) -{ - struct device *dev = kobj_to_dev(kobj); - struct sp_device *sp = dev_get_drvdata(dev); - struct psp_device *psp = sp->psp_data; - - if (psp && PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING)) - return 0444; - - return 0; -} - -static struct attribute_group psp_security_attr_group = { - .attrs = psp_security_attrs, - .is_visible = psp_security_is_visible, -}; - #define version_attribute_show(name, _offset) \ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ char *buf) \ @@ -134,8 +79,7 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a psp->vdata->bootloader_info_reg) val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); - if (attr == &dev_attr_tee_version.attr && - PSP_CAPABILITY(psp, TEE) && + if (attr == &dev_attr_tee_version.attr && psp->capability.tee && psp->vdata->tee->info_reg) val = ioread32(psp->io_regs + psp->vdata->tee->info_reg); @@ -152,7 +96,9 @@ static struct attribute_group psp_firmware_attr_group = { }; static const struct attribute_group *psp_groups[] = { +#ifdef CONFIG_CRYPTO_DEV_SP_PSP &psp_security_attr_group, +#endif &psp_firmware_attr_group, NULL, }; @@ -451,10 +397,12 @@ static const struct psp_vdata pspv1 = { static const struct psp_vdata pspv2 = { .sev = &sevv2, + .platform_access = &pa_v1, .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ + .platform_features = PLATFORM_FEATURE_HSTI, }; static const struct psp_vdata pspv3 = { @@ -467,7 +415,8 @@ static const struct psp_vdata pspv3 = { .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ - .platform_features = PLATFORM_FEATURE_DBC, + .platform_features = PLATFORM_FEATURE_DBC | + PLATFORM_FEATURE_HSTI, }; static const struct psp_vdata pspv4 = { diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index cd66a580e8b6..3fb667a17bbb 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -261,12 +261,6 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) kfree_sensitive(ctx_p->user.key); } -struct tdes_keys { - u8 key1[DES_KEY_SIZE]; - u8 key2[DES_KEY_SIZE]; - u8 key3[DES_KEY_SIZE]; -}; - static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num) { switch (slot_num) { diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index b4a4ec35bce0..925991526745 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -495,16 +495,6 @@ struct hifn_crypt_command { #define HIFN_CRYPT_CMD_SRCLEN_M 0xc000 #define HIFN_CRYPT_CMD_SRCLEN_S 14 -/* - * Structure to help build up the command data structure. - */ -struct hifn_mac_command { - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; -}; - #define HIFN_MAC_CMD_ALG_MASK 0x0001 #define HIFN_MAC_CMD_ALG_SHA1 0x0000 #define HIFN_MAC_CMD_ALG_MD5 0x0001 @@ -526,13 +516,6 @@ struct hifn_mac_command { #define HIFN_MAC_CMD_POS_IPSEC 0x0200 #define HIFN_MAC_CMD_NEW_KEY 0x0800 -struct hifn_comp_command { - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; -}; - #define HIFN_COMP_CMD_SRCLEN_M 0xc000 #define HIFN_COMP_CMD_SRCLEN_S 14 #define HIFN_COMP_CMD_ONE 0x0100 /* must be one */ diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 3dac8d8e8568..f614fd228b56 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3793,14 +3793,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) goto err_put_sync; } - qm->vfs_num = num_vfs; - ret = pci_enable_sriov(pdev, num_vfs); if (ret) { pci_err(pdev, "Can't enable VF!\n"); qm_clear_vft_config(qm); goto err_put_sync; } + qm->vfs_num = num_vfs; pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); @@ -3822,7 +3821,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int ret; if (pci_vfs_assigned(pdev)) { pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); @@ -3837,13 +3835,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) pci_disable_sriov(pdev); - ret = qm_clear_vft_config(qm); - if (ret) - return ret; - + qm->vfs_num = 0; qm_pm_put_sync(qm); - return 0; + return qm_clear_vft_config(qm); } EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c94a7b20d07e..7c2d803886fd 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -37,7 +37,7 @@ #define HZIP_QM_IDEL_STATUS 0x3040e4 #define HZIP_CORE_DFX_BASE 0x301000 -#define HZIP_CLOCK_GATED_CONTL 0X301004 +#define HZIP_CORE_DFX_DECOMP_BASE 0x304000 #define HZIP_CORE_DFX_COMP_0 0x302000 #define HZIP_CORE_DFX_COMP_1 0x303000 #define HZIP_CORE_DFX_DECOMP_0 0x304000 @@ -48,6 +48,7 @@ #define HZIP_CORE_DFX_DECOMP_5 0x309000 #define HZIP_CORE_REGS_BASE_LEN 0xB0 #define HZIP_CORE_REGS_DFX_LEN 0x28 +#define HZIP_CORE_ADDR_INTRVL 0x1000 #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 @@ -269,28 +270,6 @@ static const u32 zip_pre_store_caps[] = { ZIP_DEV_ALG_BITMAP, }; -enum { - HZIP_COMP_CORE0, - HZIP_COMP_CORE1, - HZIP_DECOMP_CORE0, - HZIP_DECOMP_CORE1, - HZIP_DECOMP_CORE2, - HZIP_DECOMP_CORE3, - HZIP_DECOMP_CORE4, - HZIP_DECOMP_CORE5, -}; - -static const u64 core_offsets[] = { - [HZIP_COMP_CORE0] = 0x302000, - [HZIP_COMP_CORE1] = 0x303000, - [HZIP_DECOMP_CORE0] = 0x304000, - [HZIP_DECOMP_CORE1] = 0x305000, - [HZIP_DECOMP_CORE2] = 0x306000, - [HZIP_DECOMP_CORE3] = 0x307000, - [HZIP_DECOMP_CORE4] = 0x308000, - [HZIP_DECOMP_CORE5] = 0x309000, -}; - static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_GET_BD_NUM ", 0x00}, {"HZIP_GET_RIGHT_BD ", 0x04}, @@ -807,6 +786,18 @@ static int hisi_zip_regs_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs); +static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num) +{ + u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; + + if (core_num < zip_comp_core_num) + return qm->io_base + HZIP_CORE_DFX_BASE + + (core_num + 1) * HZIP_CORE_ADDR_INTRVL; + + return qm->io_base + HZIP_CORE_DFX_DECOMP_BASE + + (core_num - zip_comp_core_num) * HZIP_CORE_ADDR_INTRVL; +} + static int hisi_zip_core_debug_init(struct hisi_qm *qm) { u32 zip_core_num, zip_comp_core_num; @@ -832,7 +823,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm) regset->regs = hzip_dfx_regs; regset->nregs = ARRAY_SIZE(hzip_dfx_regs); - regset->base = qm->io_base + core_offsets[i]; + regset->base = get_zip_core_addr(qm, i); regset->dev = dev; tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); @@ -921,13 +912,14 @@ debugfs_remove: /* hisi_zip_debug_regs_clear() - clear the zip debug regs */ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) { + u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; int i, j; /* enable register read_clear bit */ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); - for (i = 0; i < ARRAY_SIZE(core_offsets); i++) + for (i = 0; i < zip_core_num; i++) for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++) - readl(qm->io_base + core_offsets[i] + + readl(get_zip_core_addr(qm, i) + hzip_dfx_regs[j].offset); /* disable register read_clear bit */ @@ -970,7 +962,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) } for (i = 0; i < zip_core_num; i++) { - io_base = qm->io_base + core_offsets[i]; + io_base = get_zip_core_addr(qm, i); for (j = 0; j < core_dfx_regs_num; j++) { idx = com_dfx_regs_num + i * core_dfx_regs_num + j; debug->last_words[idx] = readl_relaxed( @@ -1022,7 +1014,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) else scnprintf(buf, sizeof(buf), "Decomp_core-%d", i - zip_comp_core_num); - base = qm->io_base + core_offsets[i]; + base = get_zip_core_addr(qm, i); pci_info(qm->pdev, "==>%s:\n", buf); /* dump last word for dfx regs during control resetting */ diff --git a/drivers/crypto/intel/keembay/ocs-hcu.c b/drivers/crypto/intel/keembay/ocs-hcu.c index deb9bd460ee6..55a41e6ab103 100644 --- a/drivers/crypto/intel/keembay/ocs-hcu.c +++ b/drivers/crypto/intel/keembay/ocs-hcu.c @@ -837,4 +837,5 @@ complete: return IRQ_HANDLED; } +MODULE_DESCRIPTION("Intel Keem Bay OCS HCU Crypto Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c index 8836f015c39c..2cf102ad4ca8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg.c @@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, * 3. if the key exists with the same value, then return without doing * anything (the newly created key_val is freed). */ + down_write(&cfg->lock); if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) { if (strncmp(temp_val, key_val->val, sizeof(temp_val))) { adf_cfg_keyval_remove(key, section); } else { kfree(key_val); - return 0; + goto out; } } - down_write(&cfg->lock); adf_cfg_keyval_add(key_val, section); + +out: up_write(&cfg->lock); return 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c index 29c4422f243c..26a1662fafbb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c @@ -31,19 +31,22 @@ static const struct file_operations adf_ctl_ops = { .compat_ioctl = compat_ptr_ioctl, }; +static const struct class adf_ctl_class = { + .name = DEVICE_NAME, +}; + struct adf_ctl_drv_info { unsigned int major; struct cdev drv_cdev; - struct class *drv_class; }; static struct adf_ctl_drv_info adf_ctl_drv; static void adf_chr_drv_destroy(void) { - device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0)); + device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0)); cdev_del(&adf_ctl_drv.drv_cdev); - class_destroy(adf_ctl_drv.drv_class); + class_unregister(&adf_ctl_class); unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1); } @@ -51,17 +54,17 @@ static int adf_chr_drv_create(void) { dev_t dev_id; struct device *drv_device; + int ret; if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { pr_err("QAT: unable to allocate chrdev region\n"); return -EFAULT; } - adf_ctl_drv.drv_class = class_create(DEVICE_NAME); - if (IS_ERR(adf_ctl_drv.drv_class)) { - pr_err("QAT: class_create failed for adf_ctl\n"); + ret = class_register(&adf_ctl_class); + if (ret) goto err_chrdev_unreg; - } + adf_ctl_drv.major = MAJOR(dev_id); cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops); if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) { @@ -69,7 +72,7 @@ static int adf_chr_drv_create(void) goto err_class_destr; } - drv_device = device_create(adf_ctl_drv.drv_class, NULL, + drv_device = device_create(&adf_ctl_class, NULL, MKDEV(adf_ctl_drv.major, 0), NULL, DEVICE_NAME); if (IS_ERR(drv_device)) { @@ -80,7 +83,7 @@ static int adf_chr_drv_create(void) err_cdev_del: cdev_del(&adf_ctl_drv.drv_cdev); err_class_destr: - class_destroy(adf_ctl_drv.drv_class); + class_unregister(&adf_ctl_class); err_chrdev_unreg: unregister_chrdev_region(dev_id, 1); return -EFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index f07b748795f7..96ddd1c419c4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -59,7 +59,7 @@ static int adf_get_vf_real_id(u32 fake) } /** - * adf_clean_vf_map() - Cleans VF id mapings + * adf_clean_vf_map() - Cleans VF id mappings * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs * diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c index 70ef11963938..43af81fcab86 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c @@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); - errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); + /* Update only section of errmsk3 related to VF2PF */ + errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); + errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); /* Return the sources of the (new) interrupt(s) */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 346ef8bee99d..e782c23fc1bf 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -1106,6 +1106,7 @@ int adf_rl_init(struct adf_accel_dev *accel_dev) mutex_init(&rl->rl_lock); rl->device_data = &accel_dev->hw_device->rl_data; rl->accel_dev = accel_dev; + init_rwsem(&rl->user_input.lock); accel_dev->rate_limiting = rl; err_ret: diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 6e24d57e6b98..c0661ff5e929 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -193,8 +193,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); - errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); - errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); + /* Update only section of errmsk3 and errmsk5 related to VF2PF */ + errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); + errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); + + errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); + errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 057d73c370b7..c82775dbb557 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -225,7 +225,8 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct skcipher_request *req, int init) { - dma_addr_t key_phys, src_phys, dst_phys; + dma_addr_t key_phys = 0; + dma_addr_t src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 59d472cb11e7..251e088a53df 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -720,10 +720,6 @@ static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tf return container_of(alg, struct n2_skcipher_alg, skcipher); } -struct n2_skcipher_request_context { - struct skcipher_walk walk; -}; - static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 78a4930c6480..461eca40e878 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2496,4 +2496,5 @@ static struct platform_driver sa_ul_driver = { }, }; module_platform_driver(sa_ul_driver); +MODULE_DESCRIPTION("K3 SA2UL crypto accelerator driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h index 494a74f52706..5ed4ba5da7f9 100644 --- a/drivers/crypto/starfive/jh7110-cryp.h +++ b/drivers/crypto/starfive/jh7110-cryp.h @@ -30,6 +30,7 @@ #define MAX_KEY_SIZE SHA512_BLOCK_SIZE #define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE #define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE +#define STARFIVE_RSA_MAX_KEYSZ 256 union starfive_aes_csr { u32 v; @@ -217,12 +218,11 @@ struct starfive_cryp_request_ctx { struct scatterlist *out_sg; struct ahash_request ahash_fbk_req; size_t total; - size_t nents; unsigned int blksize; unsigned int digsize; unsigned long in_sg_len; unsigned char *adata; - u8 rsa_data[] __aligned(sizeof(u32)); + u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32)); }; struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx); diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c index 33093ba4b13a..a778c4846025 100644 --- a/drivers/crypto/starfive/jh7110-rsa.c +++ b/drivers/crypto/starfive/jh7110-rsa.c @@ -31,7 +31,6 @@ /* A * A * R mod N ==> A */ #define CRYPTO_CMD_AARN 0x7 -#define STARFIVE_RSA_MAX_KEYSZ 256 #define STARFIVE_RSA_RESET 0x2 static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) @@ -74,7 +73,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx, { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; - int count = rctx->total / sizeof(u32) - 1; + int count = (ALIGN(rctx->total, 4) / 4) - 1; int loop; u32 temp; u8 opsize; @@ -251,12 +250,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_rsa_key *key = &ctx->rsa_key; - int ret = 0; + int ret = 0, shift = 0; writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); - rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, - rctx->rsa_data, rctx->total); + if (!IS_ALIGNED(rctx->total, sizeof(u32))) { + shift = sizeof(u32) - (rctx->total & 0x3); + memset(rctx->rsa_data, 0, shift); + } + + rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg), + rctx->rsa_data + shift, rctx->total); if (enc) { key->bitlen = key->e_bitlen; @@ -305,7 +309,6 @@ static int starfive_rsa_enc(struct akcipher_request *req) rctx->in_sg = req->src; rctx->out_sg = req->dst; rctx->total = req->src_len; - rctx->nents = sg_nents(rctx->in_sg); ctx->rctx = rctx; return starfive_rsa_enc_core(ctx, 1); diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 11ad4ffdce0d..937f6dab8955 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -11,8 +11,11 @@ #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> +#include <linux/bottom_half.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/interrupt.h> @@ -40,6 +43,8 @@ /* Mode mask = bits [15..0] */ #define FLG_MODE_MASK GENMASK(15, 0) /* Bit [31..16] status */ +#define FLG_IN_OUT_DMA BIT(16) +#define FLG_HEADER_DMA BIT(17) /* Registers */ #define CRYP_CR 0x00000000 @@ -121,8 +126,12 @@ #define CR_PH_MASK 0x00030000 #define CR_NBPBL_SHIFT 20 -#define SR_BUSY 0x00000010 -#define SR_OFNE 0x00000004 +#define SR_IFNF BIT(1) +#define SR_OFNE BIT(2) +#define SR_BUSY BIT(8) + +#define DMACR_DIEN BIT(0) +#define DMACR_DOEN BIT(1) #define IMSCR_IN BIT(0) #define IMSCR_OUT BIT(1) @@ -133,7 +142,15 @@ /* Misc */ #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) #define GCM_CTR_INIT 2 -#define CRYP_AUTOSUSPEND_DELAY 50 +#define CRYP_AUTOSUSPEND_DELAY 50 + +#define CRYP_DMA_BURST_REG 4 + +enum stm32_dma_mode { + NO_DMA, + DMA_PLAIN_SG, + DMA_NEED_SG_TRUNC +}; struct stm32_cryp_caps { bool aeads_support; @@ -146,6 +163,7 @@ struct stm32_cryp_caps { u32 sr; u32 din; u32 dout; + u32 dmacr; u32 imsc; u32 mis; u32 k1l; @@ -172,6 +190,7 @@ struct stm32_cryp { struct list_head list; struct device *dev; void __iomem *regs; + phys_addr_t phys_base; struct clk *clk; unsigned long flags; u32 irq_status; @@ -190,8 +209,20 @@ struct stm32_cryp { size_t header_in; size_t payload_out; + /* DMA process fields */ + struct scatterlist *in_sg; + struct scatterlist *header_sg; struct scatterlist *out_sg; + size_t in_sg_len; + size_t header_sg_len; + size_t out_sg_len; + struct completion dma_completion; + + struct dma_chan *dma_lch_in; + struct dma_chan *dma_lch_out; + enum stm32_dma_mode dma_mode; + /* IT process fields */ struct scatter_walk in_walk; struct scatter_walk out_walk; @@ -291,12 +322,20 @@ static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp) !(status & CR_CRYPEN), 10, 100000); } +static inline int stm32_cryp_wait_input(struct stm32_cryp *cryp) +{ + u32 status; + + return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status, + status & SR_IFNF, 1, 10); +} + static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) { u32 status; - return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, - status & SR_OFNE, 10, 100000); + return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status, + status & SR_OFNE, 1, 10); } static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp) @@ -311,8 +350,13 @@ static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp) cryp->regs + cryp->caps->cr); } +static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp); +static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp); +static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp); static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); +static int stm32_cryp_dma_start(struct stm32_cryp *cryp); +static int stm32_cryp_it_start(struct stm32_cryp *cryp); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { @@ -813,11 +857,238 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) if (is_gcm(cryp) || is_ccm(cryp)) crypto_finalize_aead_request(cryp->engine, cryp->areq, err); else - crypto_finalize_skcipher_request(cryp->engine, cryp->req, - err); + crypto_finalize_skcipher_request(cryp->engine, cryp->req, err); +} + +static void stm32_cryp_header_dma_callback(void *param) +{ + struct stm32_cryp *cryp = (struct stm32_cryp *)param; + int ret; + u32 reg; + + dma_unmap_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN)); + + kfree(cryp->header_sg); + + reg = stm32_cryp_read(cryp, cryp->caps->cr); + + if (cryp->header_in) { + stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN); + + ret = stm32_cryp_wait_input(cryp); + if (ret) { + dev_err(cryp->dev, "input header ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_write_gcmccm_header(cryp); + WARN_ON(cryp->header_in); + } + + if (stm32_cryp_get_input_text_len(cryp)) { + /* Phase 3 : payload */ + reg = stm32_cryp_read(cryp, cryp->caps->cr); + stm32_cryp_write(cryp, cryp->caps->cr, reg & ~CR_CRYPEN); + + reg &= ~CR_PH_MASK; + reg |= CR_PH_PAYLOAD | CR_CRYPEN; + stm32_cryp_write(cryp, cryp->caps->cr, reg); + + if (cryp->flags & FLG_IN_OUT_DMA) { + ret = stm32_cryp_dma_start(cryp); + if (ret) + stm32_cryp_finish_req(cryp, ret); + } else { + stm32_cryp_it_start(cryp); + } + } else { + /* + * Phase 4 : tag. + * Nothing to read, nothing to write => end request + */ + stm32_cryp_finish_req(cryp, 0); + } +} + +static void stm32_cryp_dma_callback(void *param) +{ + struct stm32_cryp *cryp = (struct stm32_cryp *)param; + int ret; + u32 reg; + + complete(&cryp->dma_completion); /* completion to indicate no timeout */ + + dma_sync_sg_for_device(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + + if (cryp->in_sg != cryp->out_sg) + dma_unmap_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + + dma_unmap_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN)); + + reg = stm32_cryp_read(cryp, cryp->caps->cr); + + if (is_gcm(cryp) || is_ccm(cryp)) { + kfree(cryp->in_sg); + kfree(cryp->out_sg); + } else { + if (cryp->in_sg != cryp->req->src) + kfree(cryp->in_sg); + if (cryp->out_sg != cryp->req->dst) + kfree(cryp->out_sg); + } + + if (cryp->payload_in) { + stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN); + + ret = stm32_cryp_wait_input(cryp); + if (ret) { + dev_err(cryp->dev, "input ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_write_data(cryp); + + ret = stm32_cryp_wait_output(cryp); + if (ret) { + dev_err(cryp->dev, "output ready timeout after dma\n"); + stm32_cryp_finish_req(cryp, ret); + return; + } + stm32_cryp_irq_read_data(cryp); + } + + stm32_cryp_finish_req(cryp, 0); +} + +static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp) +{ + int ret; + struct dma_async_tx_descriptor *tx_in; + u32 reg; + size_t align_size; + + ret = dma_map_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + + dma_sync_sg_for_device(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE); + + tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->header_sg, cryp->header_sg_len, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(cryp->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + tx_in->callback_param = cryp; + tx_in->callback = stm32_cryp_header_dma_callback; + + /* Advance scatterwalk to not DMA'ed data */ + align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); + scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + cryp->header_in -= align_size; + + ret = dma_submit_error(dmaengine_submit(tx_in)); + if (ret < 0) { + dev_err(cryp->dev, "DMA in submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_in); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DIEN); + + return 0; +} + +static int stm32_cryp_dma_start(struct stm32_cryp *cryp) +{ + int ret; + size_t align_size; + struct dma_async_tx_descriptor *tx_in, *tx_out; + u32 reg; + + if (cryp->in_sg != cryp->out_sg) { + ret = dma_map_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + } + + ret = dma_map_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE); + if (!ret) { + dev_err(cryp->dev, "dma_map_sg() error\n"); + return -ENOMEM; + } + + dma_sync_sg_for_device(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE); + + tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->in_sg, cryp->in_sg_len, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_in) { + dev_err(cryp->dev, "IN prep_slave_sg() failed\n"); + return -EINVAL; + } + + /* No callback necessary */ + tx_in->callback_param = cryp; + tx_in->callback = NULL; + + tx_out = dmaengine_prep_slave_sg(cryp->dma_lch_out, cryp->out_sg, cryp->out_sg_len, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx_out) { + dev_err(cryp->dev, "OUT prep_slave_sg() failed\n"); + return -EINVAL; + } + + reinit_completion(&cryp->dma_completion); + tx_out->callback = stm32_cryp_dma_callback; + tx_out->callback_param = cryp; + + /* Advance scatterwalk to not DMA'ed data */ + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + cryp->payload_in -= align_size; + + ret = dma_submit_error(dmaengine_submit(tx_in)); + if (ret < 0) { + dev_err(cryp->dev, "DMA in submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_in); + + /* Advance scatterwalk to not DMA'ed data */ + scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2); + cryp->payload_out -= align_size; + ret = dma_submit_error(dmaengine_submit(tx_out)); + if (ret < 0) { + dev_err(cryp->dev, "DMA out submit failed\n"); + return ret; + } + dma_async_issue_pending(cryp->dma_lch_out); + + reg = stm32_cryp_read(cryp, cryp->caps->dmacr); + stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DOEN | DMACR_DIEN); + + if (!wait_for_completion_timeout(&cryp->dma_completion, msecs_to_jiffies(1000))) { + dev_err(cryp->dev, "DMA out timed out\n"); + dmaengine_terminate_sync(cryp->dma_lch_out); + return -ETIMEDOUT; + } + + return 0; } -static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) +static int stm32_cryp_it_start(struct stm32_cryp *cryp) { /* Enable interrupt and let the IRQ handler do everything */ stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT); @@ -1149,13 +1420,256 @@ static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); } +static enum stm32_dma_mode stm32_cryp_dma_check_sg(struct scatterlist *test_sg, size_t len, + size_t block_size) +{ + struct scatterlist *sg; + int i; + + if (len <= 16) + return NO_DMA; /* Faster */ + + for_each_sg(test_sg, sg, sg_nents(test_sg), i) { + if (!IS_ALIGNED(sg->length, block_size) && !sg_is_last(sg)) + return NO_DMA; + + if (sg->offset % sizeof(u32)) + return NO_DMA; + + if (sg_is_last(sg) && !IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) + return DMA_NEED_SG_TRUNC; + } + + return DMA_PLAIN_SG; +} + +static enum stm32_dma_mode stm32_cryp_dma_check(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + enum stm32_dma_mode ret = DMA_PLAIN_SG; + + if (!is_aes(cryp)) + return NO_DMA; + + if (!cryp->dma_lch_in || !cryp->dma_lch_out) + return NO_DMA; + + ret = stm32_cryp_dma_check_sg(in_sg, cryp->payload_in, AES_BLOCK_SIZE); + if (ret == NO_DMA) + return ret; + + ret = stm32_cryp_dma_check_sg(out_sg, cryp->payload_out, AES_BLOCK_SIZE); + if (ret == NO_DMA) + return ret; + + /* Check CTR counter overflow */ + if (is_aes(cryp) && is_ctr(cryp)) { + u32 c; + __be32 iv3; + + memcpy(&iv3, &cryp->req->iv[3 * sizeof(u32)], sizeof(iv3)); + c = be32_to_cpu(iv3); + if ((c + cryp->payload_in) < cryp->payload_in) + return NO_DMA; + } + + /* Workaround */ + if (is_aes(cryp) && is_ctr(cryp) && ret == DMA_NEED_SG_TRUNC) + return NO_DMA; + + return ret; +} + +static int stm32_cryp_truncate_sg(struct scatterlist **new_sg, size_t *new_sg_len, + struct scatterlist *sg, off_t skip, size_t size) +{ + struct scatterlist *cur; + int alloc_sg_len; + + *new_sg_len = 0; + + if (!sg || !size) { + *new_sg = NULL; + return 0; + } + + alloc_sg_len = sg_nents_for_len(sg, skip + size); + if (alloc_sg_len < 0) + return alloc_sg_len; + + /* We allocate to much sg entry, but it is easier */ + *new_sg = kmalloc_array((size_t)alloc_sg_len, sizeof(struct scatterlist), GFP_KERNEL); + if (!*new_sg) + return -ENOMEM; + + sg_init_table(*new_sg, (unsigned int)alloc_sg_len); + + cur = *new_sg; + while (sg && size) { + unsigned int len = sg->length; + unsigned int offset = sg->offset; + + if (skip > len) { + skip -= len; + sg = sg_next(sg); + continue; + } + + if (skip) { + len -= skip; + offset += skip; + skip = 0; + } + + if (size < len) + len = size; + + if (len > 0) { + (*new_sg_len)++; + size -= len; + sg_set_page(cur, sg_page(sg), len, offset); + if (size == 0) + sg_mark_end(cur); + cur = sg_next(cur); + } + + sg = sg_next(sg); + } + + return 0; +} + +static int stm32_cryp_cipher_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + size_t align_size; + int ret; + + cryp->dma_mode = stm32_cryp_dma_check(cryp, in_sg, out_sg); + + scatterwalk_start(&cryp->in_walk, in_sg); + scatterwalk_start(&cryp->out_walk, out_sg); + + if (cryp->dma_mode == NO_DMA) { + cryp->flags &= ~FLG_IN_OUT_DMA; + + if (is_ctr(cryp)) + memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); + + } else if (cryp->dma_mode == DMA_NEED_SG_TRUNC) { + + cryp->flags |= FLG_IN_OUT_DMA; + + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, 0, align_size); + if (ret) + return ret; + + ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, 0, + align_size); + if (ret) { + kfree(cryp->in_sg); + return ret; + } + } else { + cryp->flags |= FLG_IN_OUT_DMA; + + cryp->in_sg = in_sg; + cryp->out_sg = out_sg; + + ret = sg_nents_for_len(cryp->in_sg, cryp->payload_in); + if (ret < 0) + return ret; + cryp->in_sg_len = (size_t)ret; + + ret = sg_nents_for_len(out_sg, cryp->payload_out); + if (ret < 0) + return ret; + cryp->out_sg_len = (size_t)ret; + } + + return 0; +} + +static int stm32_cryp_aead_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg, + struct scatterlist *out_sg) +{ + size_t align_size; + off_t skip; + int ret, ret2; + + cryp->header_sg = NULL; + cryp->in_sg = NULL; + cryp->out_sg = NULL; + + if (!cryp->dma_lch_in || !cryp->dma_lch_out) { + cryp->dma_mode = NO_DMA; + cryp->flags &= ~(FLG_IN_OUT_DMA | FLG_HEADER_DMA); + + return 0; + } + + /* CCM hw_init may have advanced in header */ + skip = cryp->areq->assoclen - cryp->header_in; + + align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->header_sg, &cryp->header_sg_len, in_sg, skip, + align_size); + if (ret) + return ret; + + ret = stm32_cryp_dma_check_sg(cryp->header_sg, align_size, AES_BLOCK_SIZE); + if (ret == NO_DMA) { + /* We cannot DMA the header */ + kfree(cryp->header_sg); + cryp->header_sg = NULL; + + cryp->flags &= ~FLG_HEADER_DMA; + } else { + cryp->flags |= FLG_HEADER_DMA; + } + + /* Now skip all header to be at payload start */ + skip = cryp->areq->assoclen; + align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); + ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, skip, align_size); + if (ret) { + kfree(cryp->header_sg); + return ret; + } + + /* For out buffer align_size is same as in buffer */ + ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, skip, align_size); + if (ret) { + kfree(cryp->header_sg); + kfree(cryp->in_sg); + return ret; + } + + ret = stm32_cryp_dma_check_sg(cryp->in_sg, align_size, AES_BLOCK_SIZE); + ret2 = stm32_cryp_dma_check_sg(cryp->out_sg, align_size, AES_BLOCK_SIZE); + if (ret == NO_DMA || ret2 == NO_DMA) { + kfree(cryp->in_sg); + cryp->in_sg = NULL; + + kfree(cryp->out_sg); + cryp->out_sg = NULL; + + cryp->flags &= ~FLG_IN_OUT_DMA; + } else { + cryp->flags |= FLG_IN_OUT_DMA; + } + + return 0; +} + static int stm32_cryp_prepare_req(struct skcipher_request *req, struct aead_request *areq) { struct stm32_cryp_ctx *ctx; struct stm32_cryp *cryp; struct stm32_cryp_reqctx *rctx; - struct scatterlist *in_sg; + struct scatterlist *in_sg, *out_sg; int ret; if (!req && !areq) @@ -1169,8 +1683,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); rctx->mode &= FLG_MODE_MASK; - ctx->cryp = cryp; - cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode; cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; cryp->ctx = ctx; @@ -1182,6 +1694,15 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, cryp->payload_in = req->cryptlen; cryp->payload_out = req->cryptlen; cryp->authsize = 0; + + in_sg = req->src; + out_sg = req->dst; + + ret = stm32_cryp_cipher_prepare(cryp, in_sg, out_sg); + if (ret) + return ret; + + ret = stm32_cryp_hw_init(cryp); } else { /* * Length of input and output data: @@ -1211,23 +1732,22 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, cryp->header_in = areq->assoclen; cryp->payload_out = cryp->payload_in; } - } - in_sg = req ? req->src : areq->src; - scatterwalk_start(&cryp->in_walk, in_sg); - - cryp->out_sg = req ? req->dst : areq->dst; - scatterwalk_start(&cryp->out_walk, cryp->out_sg); + in_sg = areq->src; + out_sg = areq->dst; - if (is_gcm(cryp) || is_ccm(cryp)) { + scatterwalk_start(&cryp->in_walk, in_sg); + scatterwalk_start(&cryp->out_walk, out_sg); /* In output, jump after assoc data */ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); - } - if (is_ctr(cryp)) - memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); + ret = stm32_cryp_hw_init(cryp); + if (ret) + return ret; + + ret = stm32_cryp_aead_prepare(cryp, in_sg, out_sg); + } - ret = stm32_cryp_hw_init(cryp); return ret; } @@ -1239,12 +1759,24 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq) struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; + int ret; if (!cryp) return -ENODEV; - return stm32_cryp_prepare_req(req, NULL) ?: - stm32_cryp_cpu_start(cryp); + ret = stm32_cryp_prepare_req(req, NULL); + if (ret) + return ret; + + if (cryp->flags & FLG_IN_OUT_DMA) + ret = stm32_cryp_dma_start(cryp); + else + ret = stm32_cryp_it_start(cryp); + + if (ret == -ETIMEDOUT) + stm32_cryp_finish_req(cryp, ret); + + return ret; } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) @@ -1262,13 +1794,20 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) if (err) return err; - if (unlikely(!cryp->payload_in && !cryp->header_in)) { + if (!stm32_cryp_get_input_text_len(cryp) && !cryp->header_in && + !(cryp->flags & FLG_HEADER_DMA)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); return 0; } - return stm32_cryp_cpu_start(cryp); + if (cryp->flags & FLG_HEADER_DMA) + return stm32_cryp_header_dma_start(cryp); + + if (!cryp->header_in && cryp->flags & FLG_IN_OUT_DMA) + return stm32_cryp_dma_start(cryp); + + return stm32_cryp_it_start(cryp); } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) @@ -1665,8 +2204,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) it_mask &= ~IMSCR_OUT; stm32_cryp_write(cryp, cryp->caps->imsc, it_mask); - if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) + if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) { + local_bh_disable(); stm32_cryp_finish_req(cryp, 0); + local_bh_enable(); + } return IRQ_HANDLED; } @@ -1680,13 +2222,72 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg) return IRQ_WAKE_THREAD; } +static int stm32_cryp_dma_init(struct stm32_cryp *cryp) +{ + struct dma_slave_config dma_conf; + struct dma_chan *chan; + int ret; + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_MEM_TO_DEV; + dma_conf.dst_addr = cryp->phys_base + cryp->caps->din; + dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.dst_maxburst = CRYP_DMA_BURST_REG; + dma_conf.device_fc = false; + + chan = dma_request_chan(cryp->dev, "in"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + cryp->dma_lch_in = chan; + ret = dmaengine_slave_config(cryp->dma_lch_in, &dma_conf); + if (ret) { + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + dev_err(cryp->dev, "Couldn't configure DMA in slave.\n"); + return ret; + } + + memset(&dma_conf, 0, sizeof(dma_conf)); + + dma_conf.direction = DMA_DEV_TO_MEM; + dma_conf.src_addr = cryp->phys_base + cryp->caps->dout; + dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_conf.src_maxburst = CRYP_DMA_BURST_REG; + dma_conf.device_fc = false; + + chan = dma_request_chan(cryp->dev, "out"); + if (IS_ERR(chan)) { + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + return PTR_ERR(chan); + } + + cryp->dma_lch_out = chan; + + ret = dmaengine_slave_config(cryp->dma_lch_out, &dma_conf); + if (ret) { + dma_release_channel(cryp->dma_lch_out); + cryp->dma_lch_out = NULL; + dev_err(cryp->dev, "Couldn't configure DMA out slave.\n"); + dma_release_channel(cryp->dma_lch_in); + cryp->dma_lch_in = NULL; + return ret; + } + + init_completion(&cryp->dma_completion); + + return 0; +} + static struct skcipher_engine_alg crypto_algs[] = { { .base = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "stm32-ecb-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1707,8 +2308,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "stm32-cbc-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1730,8 +2331,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "stm32-ctr-aes", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1753,8 +2354,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "stm32-ecb-des", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1775,8 +2376,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "stm32-cbc-des", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1798,8 +2399,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "stm32-ecb-des3", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1820,8 +2421,8 @@ static struct skcipher_engine_alg crypto_algs[] = { .base = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "stm32-cbc-des3", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_priority = 300, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, @@ -1854,8 +2455,8 @@ static struct aead_engine_alg aead_algs[] = { .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "stm32-gcm-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), .cra_alignmask = 0, @@ -1877,8 +2478,8 @@ static struct aead_engine_alg aead_algs[] = { .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "stm32-ccm-aes", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), .cra_alignmask = 0, @@ -1901,6 +2502,7 @@ static const struct stm32_cryp_caps ux500_data = { .sr = UX500_CRYP_SR, .din = UX500_CRYP_DIN, .dout = UX500_CRYP_DOUT, + .dmacr = UX500_CRYP_DMACR, .imsc = UX500_CRYP_IMSC, .mis = UX500_CRYP_MIS, .k1l = UX500_CRYP_K1L, @@ -1923,6 +2525,7 @@ static const struct stm32_cryp_caps f7_data = { .sr = CRYP_SR, .din = CRYP_DIN, .dout = CRYP_DOUT, + .dmacr = CRYP_DMACR, .imsc = CRYP_IMSCR, .mis = CRYP_MISR, .k1l = CRYP_K1LR, @@ -1945,6 +2548,7 @@ static const struct stm32_cryp_caps mp1_data = { .sr = CRYP_SR, .din = CRYP_DIN, .dout = CRYP_DOUT, + .dmacr = CRYP_DMACR, .imsc = CRYP_IMSCR, .mis = CRYP_MISR, .k1l = CRYP_K1LR, @@ -1985,6 +2589,8 @@ static int stm32_cryp_probe(struct platform_device *pdev) if (IS_ERR(cryp->regs)) return PTR_ERR(cryp->regs); + cryp->phys_base = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -2030,6 +2636,17 @@ static int stm32_cryp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cryp); + ret = stm32_cryp_dma_init(cryp); + switch (ret) { + case 0: + break; + case -ENODEV: + dev_dbg(dev, "DMA mode not available\n"); + break; + default: + goto err_dma; + } + spin_lock(&cryp_list.lock); list_add(&cryp->list, &cryp_list.dev_list); spin_unlock(&cryp_list.lock); @@ -2075,6 +2692,12 @@ err_engine1: spin_lock(&cryp_list.lock); list_del(&cryp->list); spin_unlock(&cryp_list.lock); + + if (cryp->dma_lch_in) + dma_release_channel(cryp->dma_lch_in); + if (cryp->dma_lch_out) + dma_release_channel(cryp->dma_lch_out); +err_dma: err_rst: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); @@ -2101,6 +2724,12 @@ static void stm32_cryp_remove(struct platform_device *pdev) list_del(&cryp->list); spin_unlock(&cryp_list.lock); + if (cryp->dma_lch_in) + dma_release_channel(cryp->dma_lch_in); + + if (cryp->dma_lch_out) + dma_release_channel(cryp->dma_lch_out); + pm_runtime_disable(cryp->dev); pm_runtime_put_noidle(cryp->dev); diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 9955874b3dc3..f94c0331b148 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -326,7 +326,6 @@ static void tegra_se_remove(struct platform_device *pdev) crypto_engine_stop(se->engine); crypto_engine_exit(se->engine); - iommu_fwspec_free(se->dev); host1x_client_unregister(&se->client); } diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 30cd040aa03b..d0278eb568b9 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -96,11 +96,10 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq) static int virtcrypto_find_vqs(struct virtio_crypto *vi) { - vq_callback_t **callbacks; + struct virtqueue_info *vqs_info; struct virtqueue **vqs; int ret = -ENOMEM; int i, total_vqs; - const char **names; struct device *dev = &vi->vdev->dev; /* @@ -114,26 +113,23 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); if (!vqs) goto err_vq; - callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL); - if (!callbacks) - goto err_callback; - names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL); - if (!names) - goto err_names; + vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL); + if (!vqs_info) + goto err_vqs_info; /* Parameters for control virtqueue */ - callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback; - names[total_vqs - 1] = "controlq"; + vqs_info[total_vqs - 1].callback = virtcrypto_ctrlq_callback; + vqs_info[total_vqs - 1].name = "controlq"; /* Allocate/initialize parameters for data virtqueues */ for (i = 0; i < vi->max_data_queues; i++) { - callbacks[i] = virtcrypto_dataq_callback; + vqs_info[i].callback = virtcrypto_dataq_callback; snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), "dataq.%d", i); - names[i] = vi->data_vq[i].name; + vqs_info[i].name = vi->data_vq[i].name; } - ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); + ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL); if (ret) goto err_find; @@ -153,18 +149,15 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) (unsigned long)&vi->data_vq[i]); } - kfree(names); - kfree(callbacks); + kfree(vqs_info); kfree(vqs); return 0; err_engine: err_find: - kfree(names); -err_names: - kfree(callbacks); -err_callback: + kfree(vqs_info); +err_vqs_info: kfree(vqs); err_vq: return ret; diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index e61405718840..7f0ec6887a39 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -446,4 +446,5 @@ static struct platform_driver zynqmp_aes_driver = { }; module_platform_driver(zynqmp_aes_driver); +MODULE_DESCRIPTION("Xilinx ZynqMP AES Driver"); MODULE_LICENSE("GPL"); |