summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c21
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h1
-rw-r--r--drivers/crypto/atmel-aes.c6
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c2
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/caam/caampkc.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/sev-dev.c259
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c7
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c2
-rw-r--r--drivers/crypto/hisilicon/qm.c483
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c12
-rw-r--r--drivers/crypto/keembay/keembay-ocs-ecc.c1
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c108
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c315
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c5
-rw-r--r--drivers/crypto/omap-aes.c2
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/qat/Kconfig1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c154
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_drv.c33
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c12
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c12
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/Makefile6
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h47
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c47
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h42
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c105
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h22
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_pfvf.c381
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_pfvf.h29
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.c69
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.c148
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_pfvf.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c11
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c111
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c416
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h93
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_msg.h259
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h18
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c346
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_utils.c65
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_utils.h31
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c167
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h23
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c368
-rw-r--r--drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h17
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c59
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c106
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h4
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h13
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c25
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c41
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c44
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c15
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h1
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/sa2ul.c19
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c988
-rw-r--r--drivers/crypto/stm32/stm32-hash.c6
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c26
93 files changed, 4072 insertions, 1855 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 00194d1d9ae6..d8623c7e0d1d 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -106,6 +106,24 @@ static const struct ce_variant ce_a64_variant = {
.trng = CE_ID_NOTSUPP,
};
+static const struct ce_variant ce_d1_variant = {
+ .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
+ },
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
+ .op_mode = { CE_OP_ECB, CE_OP_CBC
+ },
+ .ce_clks = {
+ { "bus", 0, 200000000 },
+ { "mod", 300000000, 0 },
+ { "ram", 0, 400000000 },
+ },
+ .esr = ESR_D1,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ALG_TRNG,
+};
+
static const struct ce_variant ce_r40_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
@@ -192,6 +210,7 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
break;
case ESR_A64:
+ case ESR_D1:
case ESR_H5:
case ESR_R40:
v >>= (flow * 4);
@@ -990,6 +1009,8 @@ static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
.data = &ce_h3_variant },
{ .compatible = "allwinner,sun8i-r40-crypto",
.data = &ce_r40_variant },
+ { .compatible = "allwinner,sun20i-d1-crypto",
+ .data = &ce_d1_variant },
{ .compatible = "allwinner,sun50i-a64-crypto",
.data = &ce_a64_variant },
{ .compatible = "allwinner,sun50i-h5-crypto",
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index cec781d5063c..624a5926f21f 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -94,6 +94,7 @@
#define ESR_R40 2
#define ESR_H5 3
#define ESR_H6 4
+#define ESR_D1 5
#define PRNG_DATA_SIZE (160 / 8)
#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 9391ccc03382..fe0558403191 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -960,6 +960,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
ctx = crypto_tfm_ctx(areq->tfm);
dd->areq = areq;
+ dd->ctx = ctx;
start_async = (areq != new_areq);
dd->is_async = start_async;
@@ -1274,7 +1275,6 @@ static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_start;
return 0;
@@ -1291,7 +1291,6 @@ static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_ctr_start;
return 0;
@@ -1783,7 +1782,6 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_gcm_start;
return 0;
@@ -1927,7 +1925,6 @@ static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_xts_start;
return 0;
@@ -2154,7 +2151,6 @@ static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
auth_reqsize));
ctx->base.dd = dd;
- ctx->base.dd->ctx = &ctx->base;
ctx->base.start = atmel_aes_authenc_start;
return 0;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 8697ae53b063..d3d8bb0a6990 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1533,6 +1533,9 @@ static int aead_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
aead_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
@@ -1762,6 +1765,9 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
skcipher_unmap(ctx->jrdev, rctx->edesc, req);
kfree(rctx->edesc);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 8b8ed77d8715..6753f0e6e55d 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -5470,7 +5470,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
dpaa2_fd_set_flc(&fd, req->flc_dma);
- ppriv = this_cpu_ptr(priv->ppriv);
+ ppriv = raw_cpu_ptr(priv->ppriv);
for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
&fd);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e8a6d8bc43b5..36ef738e4a18 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -765,6 +765,9 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
ahash_unmap(jrdev, state->edesc, req, 0);
kfree(state->edesc);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index bf6275ffc4aa..886727576710 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -380,6 +380,9 @@ static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
+ if (ret == -ENOSPC && engine->retry_support)
+ return ret;
+
if (ret != -EINPROGRESS) {
rsa_pub_unmap(jrdev, req_ctx->edesc, req);
rsa_io_unmap(jrdev, req_ctx->edesc, req);
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 112b12a32542..c246920e6f54 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -104,17 +104,14 @@ static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
u32 nr_queues)
{
u32 i;
- size_t size;
int ret;
struct pending_queue *queue = NULL;
pqinfo->nr_queues = nr_queues;
pqinfo->qlen = qlen;
- size = (qlen * sizeof(struct pending_entry));
-
for_each_pending_queue(pqinfo, queue, i) {
- queue->head = kzalloc((size), GFP_KERNEL);
+ queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 9ce4b68e9c48..c531d13d971f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -31,7 +31,7 @@
#define MAX_CCPS 32
/* Limit CCP use to a specifed number of queues per device */
-static unsigned int nqueues = 0;
+static unsigned int nqueues;
module_param(nqueues, uint, 0444);
MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)");
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e09925d86bf3..8fd774a10edc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <linux/cpufeature.h>
+#include <linux/fs.h>
#include <asm/smp.h>
@@ -43,6 +44,14 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+static char *init_ex_path;
+module_param(init_ex_path, charp, 0444);
+MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX");
+
+static bool psp_init_on_probe = true;
+module_param(psp_init_on_probe, bool, 0444);
+MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -58,6 +67,14 @@ static int psp_timeout;
#define SEV_ES_TMR_SIZE (1024 * 1024)
static void *sev_es_tmr;
+/* INIT_EX NV Storage:
+ * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page
+ * allocator to allocate the memory, which will return aligned memory for the
+ * specified allocation order.
+ */
+#define NV_LENGTH (32 * 1024)
+static void *sev_init_ex_buffer;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -107,6 +124,7 @@ static int sev_cmd_buffer_len(int cmd)
{
switch (cmd) {
case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex);
case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
@@ -141,6 +159,112 @@ static int sev_cmd_buffer_len(int cmd)
return 0;
}
+static void *sev_fw_alloc(unsigned long len)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL, get_order(len));
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+static int sev_read_init_ex_file(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct file *fp;
+ ssize_t nread;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return -EOPNOTSUPP;
+
+ fp = filp_open(init_ex_path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ return ret;
+ }
+
+ nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
+ if (nread != NV_LENGTH) {
+ dev_err(sev->dev,
+ "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ NV_LENGTH, nread);
+ return -EIO;
+ }
+
+ dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
+ filp_close(fp, NULL);
+
+ return 0;
+}
+
+static void sev_write_init_ex_file(void)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct file *fp;
+ loff_t offset = 0;
+ ssize_t nwrite;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return;
+
+ fp = filp_open(init_ex_path, O_CREAT | O_WRONLY, 0600);
+ if (IS_ERR(fp)) {
+ dev_err(sev->dev,
+ "SEV: could not open file for write, error %ld\n",
+ PTR_ERR(fp));
+ return;
+ }
+
+ nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
+ vfs_fsync(fp, 0);
+ filp_close(fp, NULL);
+
+ if (nwrite != NV_LENGTH) {
+ dev_err(sev->dev,
+ "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
+ NV_LENGTH, nwrite);
+ return;
+ }
+
+ dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+}
+
+static void sev_write_init_ex_file_if_required(int cmd_id)
+{
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (!sev_init_ex_buffer)
+ return;
+
+ /*
+ * Only a few platform commands modify the SPI/NV area, but none of the
+ * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN,
+ * PEK_CERT_IMPORT, and PDH_GEN do.
+ */
+ switch (cmd_id) {
+ case SEV_CMD_FACTORY_RESET:
+ case SEV_CMD_INIT_EX:
+ case SEV_CMD_PDH_GEN:
+ case SEV_CMD_PEK_CERT_IMPORT:
+ case SEV_CMD_PEK_GEN:
+ break;
+ default:
+ return;
+ }
+
+ sev_write_init_ex_file();
+}
+
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct psp_device *psp = psp_master;
@@ -210,6 +334,8 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
+ } else {
+ sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -236,37 +362,85 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
-static int __sev_platform_init_locked(int *error)
+static int __sev_init_locked(int *error)
{
- struct psp_device *psp = psp_master;
struct sev_data_init data;
- struct sev_device *sev;
- int rc = 0;
- if (!psp || !psp->sev_data)
- return -ENODEV;
+ memset(&data, 0, sizeof(data));
+ if (sev_es_tmr) {
+ /*
+ * Do not include the encryption mask on the physical
+ * address of the TMR (firmware should clear it anyway).
+ */
+ data.tmr_address = __pa(sev_es_tmr);
- sev = psp->sev_data;
+ data.flags |= SEV_INIT_FLAGS_SEV_ES;
+ data.tmr_len = SEV_ES_TMR_SIZE;
+ }
- if (sev->state == SEV_STATE_INIT)
- return 0;
+ return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
+}
+
+static int __sev_init_ex_locked(int *error)
+{
+ struct sev_data_init_ex data;
+ int ret;
memset(&data, 0, sizeof(data));
- if (sev_es_tmr) {
- u64 tmr_pa;
+ data.length = sizeof(data);
+ data.nv_address = __psp_pa(sev_init_ex_buffer);
+ data.nv_len = NV_LENGTH;
+
+ ret = sev_read_init_ex_file();
+ if (ret)
+ return ret;
+ if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
* address of the TMR (firmware should clear it anyway).
*/
- tmr_pa = __pa(sev_es_tmr);
+ data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_address = tmr_pa;
data.tmr_len = SEV_ES_TMR_SIZE;
}
- rc = __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
+ return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ int rc, psp_ret;
+ int (*init_function)(int *error);
+
+ if (!psp || !psp->sev_data)
+ return -ENODEV;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
+ __sev_init_locked;
+ rc = init_function(&psp_ret);
+ if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
+ /*
+ * Initialization command returned an integrity check failure
+ * status code, meaning that firmware load and validation of SEV
+ * related persistent data has failed. Retrying the
+ * initialization function should succeed by replacing the state
+ * with a reset state.
+ */
+ dev_dbg(sev->dev, "SEV: retrying INIT command");
+ rc = init_function(&psp_ret);
+ }
+ if (error)
+ *error = psp_ret;
+
if (rc)
return rc;
@@ -280,7 +454,10 @@ static int __sev_platform_init_locked(int *error)
dev_dbg(sev->dev, "SEV firmware initialized\n");
- return rc;
+ dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ return 0;
}
int sev_platform_init(int *error)
@@ -1034,6 +1211,12 @@ static void sev_firmware_shutdown(struct sev_device *sev)
get_order(SEV_ES_TMR_SIZE));
sev_es_tmr = NULL;
}
+
+ if (sev_init_ex_buffer) {
+ free_pages((unsigned long)sev_init_ex_buffer,
+ get_order(NV_LENGTH));
+ sev_init_ex_buffer = NULL;
+ }
}
void sev_dev_destroy(struct psp_device *psp)
@@ -1064,7 +1247,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct page *tmr_page;
int error, rc;
if (!sev)
@@ -1079,37 +1261,32 @@ void sev_pci_init(void)
sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
+ /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
+ * instead of INIT.
+ */
+ if (init_ex_path) {
+ sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH);
+ if (!sev_init_ex_buffer) {
+ dev_err(sev->dev,
+ "SEV: INIT_EX NV memory allocation failed\n");
+ goto err;
+ }
+ }
+
/* Obtain the TMR memory area for SEV-ES use */
- tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
- if (tmr_page) {
- sev_es_tmr = page_address(tmr_page);
- } else {
- sev_es_tmr = NULL;
+ sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
+ if (!sev_es_tmr)
dev_warn(sev->dev,
"SEV: TMR allocation failed, SEV-ES support unavailable\n");
- }
-
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
- /*
- * INIT command returned an integrity check failure
- * status code, meaning that firmware load and
- * validation of SEV related persistent data has
- * failed and persistent state has been erased.
- * Retrying INIT command here should succeed.
- */
- dev_dbg(sev->dev, "SEV: retrying INIT command");
- rc = sev_platform_init(&error);
- }
- if (rc) {
- dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
+ if (!psp_init_on_probe)
return;
- }
- dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
- sev->api_minor, sev->build);
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc)
+ dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
+ error, rc);
return;
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 33fb27745d52..887162df50f9 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -101,7 +101,6 @@ void cc_req_mgr_fini(struct cc_drvdata *drvdata)
dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
#ifdef COMP_IN_WQ
- flush_workqueue(req_mgr_h->workq);
destroy_workqueue(req_mgr_h->workq);
#else
/* Kill tasklet */
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a032c192ef1d..97d54c1465c2 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1177,13 +1177,10 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
static void hpre_key_to_big_end(u8 *data, int len)
{
int i, j;
- u8 tmp;
for (i = 0; i < len / 2; i++) {
j = len - i - 1;
- tmp = data[j];
- data[j] = data[i];
- data[i] = tmp;
+ swap(data[j], data[i]);
}
}
@@ -1865,7 +1862,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
*/
if (memcmp(ptr, p, ctx->key_sz) == 0) {
dev_err(dev, "gx is p!\n");
- return -EINVAL;
+ goto err;
} else if (memcmp(ptr, p, ctx->key_sz) > 0) {
hpre_curve25519_src_modulo_p(ptr);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 65a641396c07..ebfab3e14499 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -103,7 +103,7 @@
#define HPRE_QM_PM_FLR BIT(11)
#define HPRE_QM_SRIOV_FLR BIT(12)
-#define HPRE_SHAPER_TYPE_RATE 128
+#define HPRE_SHAPER_TYPE_RATE 640
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 52d6cca6262e..c5b84a5ea350 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -89,6 +89,10 @@
#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
+#define QM_AEQE_CQN_MASK GENMASK(15, 0)
+#define QM_CQ_OVERFLOW 0
+#define QM_EQ_OVERFLOW 1
+#define QM_CQE_ERROR 2
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
@@ -122,6 +126,8 @@
#define QM_CQC_VFT 0x1
#define QM_VFT_CFG 0x100060
#define QM_VFT_CFG_OP_ENABLE 0x100054
+#define QM_PM_CTRL 0x100148
+#define QM_IDLE_DISABLE BIT(9)
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
@@ -501,10 +507,30 @@ static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
-static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000,
- 10000, 25000, 50000, 100000};
-static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16,
- 17, 18, 19};
+struct qm_typical_qos_table {
+ u32 start;
+ u32 end;
+ u32 val;
+};
+
+/* the qos step is 100 */
+static struct qm_typical_qos_table shaper_cir_s[] = {
+ {100, 100, 4},
+ {200, 200, 3},
+ {300, 500, 2},
+ {600, 1000, 1},
+ {1100, 100000, 0},
+};
+
+static struct qm_typical_qos_table shaper_cbs_s[] = {
+ {100, 200, 9},
+ {300, 500, 11},
+ {600, 1000, 12},
+ {1100, 10000, 16},
+ {10100, 25000, 17},
+ {25100, 50000, 18},
+ {50100, 100000, 19}
+};
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
@@ -585,6 +611,75 @@ static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
return avail;
}
+static u32 qm_get_hw_error_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+}
+
+static u32 qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ return qm->err_ini->get_dev_hw_err_status(qm);
+}
+
+/* Check if the error causes the master ooo block */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ u32 val, dev_val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = qm_get_hw_error_status(qm);
+ dev_val = qm_get_dev_err_status(qm);
+
+ if (qm->ver < QM_HW_V3)
+ return (val & QM_ECC_MBIT) ||
+ (dev_val & qm->err_info.ecc_2bits_mask);
+
+ return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
+ (dev_val & (~qm->err_info.dev_ce_mask));
+}
+
+static int qm_wait_reset_finish(struct hisi_qm *qm)
+{
+ int delay = 0;
+
+ /* All reset requests need to be queued for processing */
+ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ /*
+ * PF and VF on host doesnot support resetting at the
+ * same time on Kunpeng920.
+ */
+ if (qm->ver < QM_HW_V3)
+ return qm_wait_reset_finish(pf_qm);
+
+ return qm_wait_reset_finish(qm);
+}
+
+static void qm_reset_bit_clear(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (qm->ver < QM_HW_V3)
+ clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
+
+ clear_bit(QM_RESETTING, &qm->misc_ctl);
+}
+
static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
u64 base, u16 queue, bool op)
{
@@ -707,6 +802,19 @@ static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
qm->ops->qm_db(qm, qn, cmd, index, priority);
}
+static void qm_disable_clock_gate(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl(qm->io_base + QM_PM_CTRL);
+ val |= QM_IDLE_DISABLE;
+ writel(val, qm->io_base + QM_PM_CTRL);
+}
+
static int qm_dev_mem_reset(struct hisi_qm *qm)
{
u32 val;
@@ -899,24 +1007,71 @@ static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
mb();
}
-static irqreturn_t qm_aeq_irq(int irq, void *data)
+static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
+{
+ struct hisi_qp *qp = &qm->qp_array[qp_id];
+
+ qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_stop_qp(qp);
+ qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
+}
+
+static void qm_reset_function(struct hisi_qm *qm)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ struct device *dev = &qm->pdev->dev;
+ int ret;
+
+ if (qm_check_dev_error(pf_qm))
+ return;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ dev_err(dev, "reset function not ready\n");
+ return;
+ }
+
+ ret = hisi_qm_stop(qm, QM_FLR);
+ if (ret) {
+ dev_err(dev, "failed to stop qm when reset function\n");
+ goto clear_bit;
+ }
+
+ ret = hisi_qm_start(qm);
+ if (ret)
+ dev_err(dev, "failed to start qm when reset function\n");
+
+clear_bit:
+ qm_reset_bit_clear(qm);
+}
+
+static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
- u32 type;
-
- atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
- return IRQ_NONE;
+ u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
- if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(&qm->pdev->dev, "%s overflow\n",
- qm_fifo_overflow[type]);
- else
+ qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+
+ switch (type) {
+ case QM_EQ_OVERFLOW:
+ dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
+ qm_reset_function(qm);
+ return IRQ_HANDLED;
+ case QM_CQ_OVERFLOW:
+ dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
+ qp_id);
+ fallthrough;
+ case QM_CQE_ERROR:
+ qm_disable_qp(qm, qp_id);
+ break;
+ default:
dev_err(&qm->pdev->dev, "unknown error type %u\n",
type);
+ break;
+ }
if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
@@ -926,13 +1081,24 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
-
- qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
}
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
+
return IRQ_HANDLED;
}
+static irqreturn_t qm_aeq_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
+ if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
static void qm_irq_unregister(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -988,12 +1154,14 @@ static void qm_init_prefetch(struct hisi_qm *qm)
}
/*
+ * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
+ * is the expected qos calculated.
* the formula:
* IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
*
- * IR_b * (2 ^ IR_u) * 8
- * IR(Mbps) * 10 ^ -3 = -------------------------
- * Tick * (2 ^ IR_s)
+ * IR_b * (2 ^ IR_u) * 8000
+ * IR(Mbps) = -------------------------
+ * Tick * (2 ^ IR_s)
*/
static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
{
@@ -1003,17 +1171,28 @@ static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
static u32 acc_shaper_calc_cbs_s(u32 ir)
{
+ int table_size = ARRAY_SIZE(shaper_cbs_s);
int i;
- if (ir < typical_qos_val[0])
- return QM_SHAPER_MIN_CBS_S;
+ for (i = 0; i < table_size; i++) {
+ if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
+ return shaper_cbs_s[i].val;
+ }
+
+ return QM_SHAPER_MIN_CBS_S;
+}
+
+static u32 acc_shaper_calc_cir_s(u32 ir)
+{
+ int table_size = ARRAY_SIZE(shaper_cir_s);
+ int i;
- for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) {
- if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i])
- return typical_qos_cbs_s[i - 1];
+ for (i = 0; i < table_size; i++) {
+ if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
+ return shaper_cir_s[i].val;
}
- return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1];
+ return 0;
}
static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
@@ -1022,25 +1201,18 @@ static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
u32 error_rate;
factor->cbs_s = acc_shaper_calc_cbs_s(ir);
+ cir_s = acc_shaper_calc_cir_s(ir);
for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
- for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) {
- /** the formula is changed to:
- * IR_b * (2 ^ IR_u) * DIVISOR_CLK
- * IR(Mbps) = -------------------------
- * 768 * (2 ^ IR_s)
- */
- ir_calc = acc_shaper_para_calc(cir_b, cir_u,
- cir_s);
- error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
- if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
- factor->cir_b = cir_b;
- factor->cir_u = cir_u;
- factor->cir_s = cir_s;
-
- return 0;
- }
+ ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
+
+ error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
+ if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
+ factor->cir_b = cir_b;
+ factor->cir_u = cir_u;
+ factor->cir_s = cir_s;
+ return 0;
}
}
}
@@ -1126,10 +1298,10 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
{
+ u32 qos = qm->factor[fun_num].func_qos;
int ret, i;
- qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL;
- ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]);
+ ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
if (ret) {
dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
return ret;
@@ -2082,35 +2254,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
-static u32 qm_get_hw_error_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
-}
-
-static u32 qm_get_dev_err_status(struct hisi_qm *qm)
-{
- return qm->err_ini->get_dev_hw_err_status(qm);
-}
-
-/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
-{
- u32 val, dev_val;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
-
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
-}
-
static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
{
struct qm_mailbox mailbox;
@@ -3399,6 +3542,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
}
+ up_write(&qm->qps_lock);
qm_irq_unregister(qm);
hisi_qm_pci_uninit(qm);
@@ -3406,8 +3550,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
uacce_remove(qm->uacce);
qm->uacce = NULL;
}
-
- up_write(&qm->qps_lock);
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -3473,6 +3615,22 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm)
status->aeqc_phase = true;
}
+static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
+{
+ /* Clear eq/aeq interrupt source */
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
+ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+
+ writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
+ writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
+}
+
+static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
+{
+ writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
+ writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
+}
+
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -3556,10 +3714,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
WARN_ON(!qm->qdma.va);
if (qm->fun_type == QM_HW_PF) {
- ret = qm_dev_mem_reset(qm);
- if (ret)
- return ret;
-
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
if (ret)
return ret;
@@ -3578,9 +3732,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return ret;
qm_init_prefetch(qm);
-
- writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
- writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
+ qm_enable_eq_aeq_interrupts(qm);
return 0;
}
@@ -3728,10 +3880,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
}
- /* Mask eq and aeq irq */
- writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
- writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
-
+ qm_disable_eq_aeq_interrupts(qm);
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0);
if (ret < 0) {
@@ -4231,66 +4380,69 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
return 0;
}
+static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
+ unsigned long *val,
+ unsigned int *fun_index)
+{
+ char tbuf_bdf[QM_DBG_READ_LEN] = {0};
+ char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
+ u32 tmp1, device, function;
+ int ret, bus;
+
+ ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
+ if (ret != QM_QOS_PARAM_NUM)
+ return -EINVAL;
+
+ ret = qm_qos_value_init(val_buf, val);
+ if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
+ pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
+ return -EINVAL;
+ }
+
+ ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
+ if (ret != QM_QOS_BDF_PARAM_NUM) {
+ pci_err(qm->pdev, "input pci bdf value is error!\n");
+ return -EINVAL;
+ }
+
+ *fun_index = PCI_DEVFN(device, function);
+
+ return 0;
+}
+
static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct hisi_qm *qm = filp->private_data;
char tbuf[QM_DBG_READ_LEN];
- int tmp1, bus, device, function;
- char tbuf_bdf[QM_DBG_READ_LEN] = {0};
- char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
unsigned int fun_index;
- unsigned long val = 0;
+ unsigned long val;
int len, ret;
if (qm->fun_type == QM_HW_VF)
return -EINVAL;
- /* Mailbox and reset cannot be operated at the same time */
- if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
- pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
- return -EAGAIN;
- }
-
- if (*pos != 0) {
- ret = 0;
- goto err_get_status;
- }
+ if (*pos != 0)
+ return 0;
- if (count >= QM_DBG_READ_LEN) {
- ret = -ENOSPC;
- goto err_get_status;
- }
+ if (count >= QM_DBG_READ_LEN)
+ return -ENOSPC;
len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
- if (len < 0) {
- ret = len;
- goto err_get_status;
- }
+ if (len < 0)
+ return len;
tbuf[len] = '\0';
- ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf);
- if (ret != QM_QOS_PARAM_NUM) {
- ret = -EINVAL;
- goto err_get_status;
- }
-
- ret = qm_qos_value_init(val_buf, &val);
- if (val == 0 || val > QM_QOS_MAX_VAL || ret) {
- pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
- ret = -EINVAL;
- goto err_get_status;
- }
+ ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
+ if (ret)
+ return ret;
- ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function);
- if (ret != QM_QOS_BDF_PARAM_NUM) {
- pci_err(qm->pdev, "input pci bdf value is error!\n");
- ret = -EINVAL;
- goto err_get_status;
+ /* Mailbox and reset cannot be operated at the same time */
+ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
+ pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
+ return -EAGAIN;
}
- fun_index = device * 8 + function;
-
ret = qm_pm_get_sync(qm);
if (ret) {
ret = -EINVAL;
@@ -4304,6 +4456,8 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
goto err_put_sync;
}
+ pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
+ fun_index, val);
ret = count;
err_put_sync:
@@ -4728,46 +4882,6 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return ret;
}
-static int qm_wait_reset_finish(struct hisi_qm *qm)
-{
- int delay = 0;
-
- /* All reset requests need to be queued for processing */
- while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
- msleep(++delay);
- if (delay > QM_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int qm_reset_prepare_ready(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
-
- /*
- * PF and VF on host doesnot support resetting at the
- * same time on Kunpeng920.
- */
- if (qm->ver < QM_HW_V3)
- return qm_wait_reset_finish(pf_qm);
-
- return qm_wait_reset_finish(qm);
-}
-
-static void qm_reset_bit_clear(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
-
- if (qm->ver < QM_HW_V3)
- clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
-
- clear_bit(QM_RESETTING, &qm->misc_ctl);
-}
-
static int qm_controller_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5053,6 +5167,12 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret) {
+ pci_err(pdev, "failed to reset device memory\n");
+ return ret;
+ }
+
ret = qm_restart(qm);
if (ret) {
pci_err(pdev, "Failed to start QM!\n");
@@ -5267,8 +5387,10 @@ static int qm_irq_register(struct hisi_qm *qm)
return ret;
if (qm->ver > QM_HW_V1) {
- ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, 0, qm->dev_name, qm);
+ ret = request_threaded_irq(pci_irq_vector(pdev,
+ QM_AEQ_EVENT_IRQ_VECTOR),
+ qm_aeq_irq, qm_aeq_thread,
+ 0, qm->dev_name, qm);
if (ret)
goto err_aeq_irq;
@@ -5750,13 +5872,15 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_vfs;
+ int ret, total_func, i;
size_t off = 0;
- total_vfs = pci_sriov_get_totalvfs(qm->pdev);
- qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
if (!qm->factor)
return -ENOMEM;
+ for (i = 0; i < total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -5825,6 +5949,15 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_irq_register;
}
+ if (qm->fun_type == QM_HW_PF) {
+ qm_disable_clock_gate(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret) {
+ dev_err(dev, "failed to reset device memory\n");
+ goto err_irq_register;
+ }
+ }
+
if (qm->mode == UACCE_MODE_SVA) {
ret = qm_alloc_uacce(qm);
if (ret < 0)
@@ -5982,8 +6115,12 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
qm_cmd_init(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_clock_gate(qm);
+ ret = qm_dev_mem_reset(qm);
+ if (ret)
+ pci_err(pdev, "failed to reset device memory\n");
- return 0;
+ return ret;
}
/**
@@ -6038,7 +6175,7 @@ int hisi_qm_resume(struct device *dev)
if (ret)
pci_err(pdev, "failed to start qm(%d)\n", ret);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_resume);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 90551bf38b52..26d3ab1d308b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -105,7 +105,7 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
-#define SEC_SHAPER_TYPE_RATE 128
+#define SEC_SHAPER_TYPE_RATE 400
struct sec_hw_error {
u32 int_msk;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 873971ef9aee..678f8b58ec42 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -103,8 +103,8 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
-#define HZIP_SHAPER_RATE_COMPRESS 252
-#define HZIP_SHAPER_RATE_DECOMPRESS 229
+#define HZIP_SHAPER_RATE_COMPRESS 750
+#define HZIP_SHAPER_RATE_DECOMPRESS 140
#define HZIP_DELAY_1_US 1
#define HZIP_POLL_TIMEOUT_US 1000
@@ -364,15 +364,16 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
/* user domain configurations */
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
- writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva && qm->ver == QM_HW_V2) {
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63);
} else {
writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
/* let's open all compression/decompression cores */
@@ -829,7 +830,10 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "zlib\ngzip";
+ if (pdev->revision >= QM_HW_V3)
+ qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
+ else
+ qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
index 679e6ae295e0..5d0785d3f1b5 100644
--- a/drivers/crypto/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/keembay/keembay-ocs-ecc.c
@@ -930,6 +930,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
if (!ecc_dev->engine) {
dev_err(dev, "Could not allocate crypto engine\n");
+ rc = -ENOMEM;
goto list_del;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index c076d0b3ad5f..b681bd2dc6ad 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -94,15 +94,13 @@ static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
u32 num_queues)
{
struct otx_cpt_pending_queue *queue = NULL;
- size_t size;
int ret;
u32 i;
pqinfo->num_queues = num_queues;
- size = (qlen * sizeof(struct otx_cpt_pending_entry));
for_each_pending_queue(pqinfo, queue, i) {
- queue->head = kzalloc((size), GFP_KERNEL);
+ queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
if (!queue->head) {
ret = -ENOMEM;
goto pending_qfail;
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index c242d22008c3..965297e96954 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
- cn10k_cpt.o
+ cn10k_cpt.o otx2_cpt_devlink.o
rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
otx2_cptvf_algs.o cn10k_cpt.o
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5445b05f53c..fb56824cb0a6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/crypto.h>
+#include <net/devlink.h>
#include "otx2_cpt_hw_types.h"
#include "rvu.h"
#include "mbox.h"
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
new file mode 100644
index 000000000000..bb02e0db3615
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Marvell. */
+
+#include "otx2_cpt_devlink.h"
+
+static int otx2_cpt_dl_egrp_create(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_create(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ return otx2_cpt_dl_custom_egrp_delete(cptpf, ctx);
+}
+
+static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+
+ otx2_cpt_print_uc_dbg_info(cptpf);
+
+ return 0;
+}
+
+enum otx2_cpt_dl_param_id {
+ OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+};
+
+static const struct devlink_param otx2_cpt_dl_params[] = {
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
+ "egrp_create", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_create,
+ NULL),
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+ "egrp_delete", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
+ NULL),
+};
+
+static int otx2_cpt_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, "rvu_cptpf");
+}
+
+static const struct devlink_ops otx2_cpt_devlink_ops = {
+ .info_get = otx2_cpt_devlink_info_get,
+};
+
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct otx2_cpt_devlink *cpt_dl;
+ struct devlink *dl;
+ int ret;
+
+ dl = devlink_alloc(&otx2_cpt_devlink_ops,
+ sizeof(struct otx2_cpt_devlink), dev);
+ if (!dl) {
+ dev_warn(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ cpt_dl = devlink_priv(dl);
+ cpt_dl->dl = dl;
+ cpt_dl->cptpf = cptpf;
+ cptpf->dl = dl;
+ ret = devlink_params_register(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ if (ret) {
+ dev_err(dev, "devlink params register failed with error %d",
+ ret);
+ devlink_free(dl);
+ return ret;
+ }
+
+ devlink_register(dl);
+
+ return 0;
+}
+
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf)
+{
+ struct devlink *dl = cptpf->dl;
+
+ if (!dl)
+ return;
+
+ devlink_unregister(dl);
+ devlink_params_unregister(dl, otx2_cpt_dl_params,
+ ARRAY_SIZE(otx2_cpt_dl_params));
+ devlink_free(dl);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
new file mode 100644
index 000000000000..8b7d88c5d519
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef __OTX2_CPT_DEVLINK_H
+#define __OTX2_CPT_DEVLINK_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+
+struct otx2_cpt_devlink {
+ struct devlink *dl;
+ struct otx2_cptpf_dev *cptpf;
+};
+
+/* Devlink APIs */
+int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf);
+void otx2_cpt_unregister_dl(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPT_DEVLINK_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 5ebba86c65d9..05b2d9c650e1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -53,6 +53,9 @@ struct otx2_cptpf_dev {
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
bool has_cpt1;
+
+ /* Devlink */
+ struct devlink *dl;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 146a55ac4b9b..1720a5bb7016 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -4,6 +4,7 @@
#include <linux/firmware.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
+#include "otx2_cpt_devlink.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
#include "cn10k_cpt.h"
@@ -494,12 +495,11 @@ static ssize_t kvf_limits_store(struct device *dev,
{
struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
int lfs_num;
+ int ret;
- if (kstrtoint(buf, 0, &lfs_num)) {
- dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
- lfs_num, num_online_cpus());
- return -EINVAL;
- }
+ ret = kstrtoint(buf, 0, &lfs_num);
+ if (ret)
+ return ret;
if (lfs_num < 1 || lfs_num > num_online_cpus()) {
dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
lfs_num, num_online_cpus());
@@ -767,8 +767,15 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
goto cleanup_eng_grps;
+
+ err = otx2_cpt_register_dl(cptpf);
+ if (err)
+ goto sysfs_grp_del;
+
return 0;
+sysfs_grp_del:
+ sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
unregister_intr:
@@ -788,6 +795,7 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
return;
cptpf_sriov_disable(pdev);
+ otx2_cpt_unregister_dl(cptpf);
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index dff34b3ec09e..4c8ebdf671ca 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -29,7 +29,8 @@ static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
bool found = false;
int i;
- if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ if (eng_grp->g->engs_num < 0 ||
+ eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx2\n",
eng_grp->g->engs_num);
return bmap;
@@ -1110,18 +1111,19 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
struct pci_dev *pdev = cptpf->pdev;
struct fw_info_t fw_info;
- int ret;
+ int ret = 0;
+ mutex_lock(&eng_grps->lock);
/*
* We don't create engine groups if it was already
* made (when user enabled VFs for the first time)
*/
if (eng_grps->is_grps_created)
- return 0;
+ goto unlock;
ret = cpt_ucode_load_fw(pdev, &fw_info);
if (ret)
- return ret;
+ goto unlock;
/*
* Create engine group with SE engines for kernel
@@ -1186,7 +1188,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
cpt_ucode_release_fw(&fw_info);
if (is_dev_otx2(pdev))
- return 0;
+ goto unlock;
/*
* Configure engine group mask to allow context prefetching
* for the groups.
@@ -1201,12 +1203,15 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
+ mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
+unlock:
+ mutex_unlock(&eng_grps->lock);
return ret;
}
@@ -1286,6 +1291,7 @@ void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grp_info *grp;
int i, j;
+ mutex_lock(&eng_grps->lock);
delete_engine_grps(pdev, eng_grps);
/* Release memory */
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
@@ -1295,6 +1301,7 @@ void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
grp->engs[j].bmap = NULL;
}
}
+ mutex_unlock(&eng_grps->lock);
}
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
@@ -1303,6 +1310,7 @@ int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grp_info *grp;
int i, j, ret;
+ mutex_init(&eng_grps->lock);
eng_grps->obj = pci_get_drvdata(pdev);
eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
@@ -1349,11 +1357,14 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
struct fw_info_t fw_info;
int ret;
+ mutex_lock(&eng_grps->lock);
ret = cpt_ucode_load_fw(pdev, &fw_info);
- if (ret)
+ if (ret) {
+ mutex_unlock(&eng_grps->lock);
return ret;
+ }
- uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for AE\n");
ret = -EINVAL;
@@ -1396,12 +1407,14 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
goto delete_eng_grp;
cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
return 0;
delete_eng_grp:
delete_engine_grps(pdev, eng_grps);
release_fw:
cpt_ucode_release_fw(&fw_info);
+ mutex_unlock(&eng_grps->lock);
return ret;
}
@@ -1501,3 +1514,291 @@ delete_grps:
return ret;
}
+
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
+ struct device *dev = &cptpf->pdev->dev;
+ char *start, *val, *err_msg, *tmp;
+ int grp_idx = 0, ret = -EINVAL;
+ bool has_se, has_ie, has_ae;
+ struct fw_info_t fw_info;
+ int ucode_idx = 0;
+
+ if (!eng_grps->is_grps_created) {
+ dev_err(dev, "Not allowed before creating the default groups\n");
+ return -EINVAL;
+ }
+ err_msg = "Invalid engine group format";
+ strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ start = tmp_buf;
+
+ has_se = has_ie = has_ae = false;
+
+ for (;;) {
+ val = strsep(&start, ";");
+ if (!val)
+ break;
+ val = strim(val);
+ if (!*val)
+ continue;
+
+ if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
+ if (has_se || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
+ has_se = true;
+ } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
+ if (has_ae || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
+ has_ae = true;
+ } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
+ if (has_ie || ucode_idx)
+ goto err_print;
+ tmp = strim(strsep(&val, ":"));
+ if (!val)
+ goto err_print;
+ if (strlen(tmp) != 2)
+ goto err_print;
+ if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
+ goto err_print;
+ engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
+ has_ie = true;
+ } else {
+ if (ucode_idx > 1)
+ goto err_print;
+ if (!strlen(val))
+ goto err_print;
+ if (strnstr(val, " ", strlen(val)))
+ goto err_print;
+ ucode_filename[ucode_idx++] = val;
+ }
+ }
+
+ /* Validate input parameters */
+ if (!(grp_idx && ucode_idx))
+ goto err_print;
+
+ if (ucode_idx > 1 && grp_idx < 2)
+ goto err_print;
+
+ if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
+ err_msg = "Error max 2 engine types can be attached";
+ goto err_print;
+ }
+
+ if (grp_idx > 1) {
+ if ((engs[0].type + engs[1].type) !=
+ (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
+ err_msg = "Only combination of SE+IE engines is allowed";
+ goto err_print;
+ }
+ /* Keep SE engines at zero index */
+ if (engs[1].type == OTX2_CPT_SE_TYPES)
+ swap(engs[0], engs[1]);
+ }
+ mutex_lock(&eng_grps->lock);
+
+ if (cptpf->enabled_vfs) {
+ dev_err(dev, "Disable VFs before modifying engine groups\n");
+ ret = -EACCES;
+ goto err_unlock;
+ }
+ INIT_LIST_HEAD(&fw_info.ucodes);
+ ret = load_fw(dev, &fw_info, ucode_filename[0]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
+ goto err_unlock;
+ }
+ if (ucode_idx > 1) {
+ ret = load_fw(dev, &fw_info, ucode_filename[1]);
+ if (ret) {
+ dev_err(dev, "Unable to load firmware %s\n",
+ ucode_filename[1]);
+ goto release_fw;
+ }
+ }
+ uc_info[0] = get_ucode(&fw_info, engs[0].type);
+ if (uc_info[0] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[0].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ if (ucode_idx > 1) {
+ uc_info[1] = get_ucode(&fw_info, engs[1].type);
+ if (uc_info[1] == NULL) {
+ dev_err(dev, "Unable to find firmware for %s\n",
+ get_eng_type_str(engs[1].type));
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ }
+ ret = create_engine_group(dev, eng_grps, engs, grp_idx,
+ (void **)uc_info, 1);
+
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+err_unlock:
+ mutex_unlock(&eng_grps->lock);
+ return ret;
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return ret;
+}
+
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct device *dev = &cptpf->pdev->dev;
+ char *tmp, *err_msg;
+ int egrp;
+ int ret;
+
+ err_msg = "Invalid input string format(ex: egrp:0)";
+ if (strncasecmp(ctx->val.vstr, "egrp", 4))
+ goto err_print;
+ tmp = ctx->val.vstr;
+ strsep(&tmp, ":");
+ if (!tmp)
+ goto err_print;
+ if (kstrtoint(tmp, 10, &egrp))
+ goto err_print;
+
+ if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
+ dev_err(dev, "Invalid engine group %d", egrp);
+ return -EINVAL;
+ }
+ if (!eng_grps->grp[egrp].is_enabled) {
+ dev_err(dev, "Error engine_group%d is not configured", egrp);
+ return -EINVAL;
+ }
+ mutex_lock(&eng_grps->lock);
+ ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
+ mutex_unlock(&eng_grps->lock);
+
+ return ret;
+
+err_print:
+ dev_err(dev, "%s\n", err_msg);
+ return -EINVAL;
+}
+
+static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
+ int size, int idx)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
+ struct otx2_cpt_engs_rsvd *engs;
+ int len, i;
+
+ buf[0] = '\0';
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (idx != -1 && idx != i)
+ continue;
+
+ if (eng_grp->mirror.is_ena)
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ if (i > 0 && idx == -1) {
+ len = strlen(buf);
+ scnprintf(buf + len, size - len, ", ");
+ }
+
+ len = strlen(buf);
+ scnprintf(buf + len, size - len, "%d %s ",
+ mirrored_engs ? engs->count + mirrored_engs->count :
+ engs->count,
+ get_eng_type_str(engs->type));
+ if (mirrored_engs) {
+ len = strlen(buf);
+ scnprintf(buf + len, size - len,
+ "(%d shared with engine_group%d) ",
+ engs->count <= 0 ?
+ engs->count + mirrored_engs->count :
+ mirrored_engs->count,
+ eng_grp->mirror.idx);
+ }
+ }
+}
+
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ struct otx2_cpt_eng_grp_info *mirrored_grp;
+ char engs_info[2 * OTX2_CPT_NAME_LENGTH];
+ struct otx2_cpt_eng_grp_info *grp;
+ struct otx2_cpt_engs_rsvd *engs;
+ u32 mask[4];
+ int i, j;
+
+ pr_debug("Engine groups global info");
+ pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
+ eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
+ pr_debug("free SE %d", eng_grps->avail.se_cnt);
+ pr_debug("free IE %d", eng_grps->avail.ie_cnt);
+ pr_debug("free AE %d", eng_grps->avail.ae_cnt);
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ pr_debug("engine_group%d, state %s", i,
+ grp->is_enabled ? "enabled" : "disabled");
+ if (grp->is_enabled) {
+ mirrored_grp = &eng_grps->grp[grp->mirror.idx];
+ pr_debug("Ucode0 filename %s, version %s",
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].filename :
+ grp->ucode[0].filename,
+ grp->mirror.is_ena ?
+ mirrored_grp->ucode[0].ver_str :
+ grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(grp))
+ pr_debug("Ucode1 filename %s, version %s",
+ grp->ucode[1].filename,
+ grp->ucode[1].ver_str);
+ }
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ engs = &grp->engs[j];
+ if (engs->type) {
+ get_engs_info(grp, engs_info,
+ 2 * OTX2_CPT_NAME_LENGTH, j);
+ pr_debug("Slot%d: %s", j, engs_info);
+ bitmap_to_arr32(mask, engs->bmap,
+ eng_grps->engs_num);
+ if (is_dev_otx2(cptpf->pdev))
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
+ mask[3], mask[2], mask[1],
+ mask[0]);
+ else
+ pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
+ mask[4], mask[3], mask[2], mask[1],
+ mask[0]);
+ }
+ }
+ }
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index fe019ab730b2..8f4d4e5f531a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -143,6 +143,7 @@ struct otx2_cpt_eng_grp_info {
};
struct otx2_cpt_eng_grps {
+ struct mutex lock;
struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
struct otx2_cpt_engs_available avail;
void *obj; /* device specific data */
@@ -160,5 +161,9 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
-
+int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
+ struct devlink_param_gset_ctx *ctx);
+void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf);
#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 877a948469bd..2748a3327e39 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1682,11 +1682,8 @@ static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = lptr;
struct cpt_device_desc *rdesc = rptr;
- struct cpt_device_desc desc;
- desc = *ldesc;
- *ldesc = *rdesc;
- *rdesc = desc;
+ swap(*ldesc, *rdesc);
}
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9b968ac4ee7b..a196bb8b1701 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1302,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
- pm_runtime_resume_and_get(dev);
+ pm_runtime_get_sync(dev);
return 0;
}
#endif
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index be77656864e3..538aff80869f 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -735,7 +735,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -752,7 +752,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -770,7 +770,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -787,7 +787,7 @@ static struct skcipher_alg algs_ecb_cbc[] = {
{
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-omap",
- .base.cra_priority = 100,
+ .base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 77783feb62b2..4b90c0f22b03 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -13,6 +13,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA512
select CRYPTO_LIB_AES
select FW_LOADER
+ select CRC8
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fd29861526d6..6d10edc40aca 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
+/* Copyright(c) 2020 - 2021 Intel Corporation */
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
+#include <adf_cfg.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -13,12 +14,18 @@ struct adf_fw_config {
char *obj_name;
};
-static struct adf_fw_config adf_4xxx_fw_config[] = {
+static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
{0xF0, ADF_4XXX_SYM_OBJ},
{0xF, ADF_4XXX_ASYM_OBJ},
{0x100, ADF_4XXX_ADMIN_OBJ},
};
+static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
+ {0xF0, ADF_4XXX_DC_OBJ},
+ {0xF, ADF_4XXX_DC_OBJ},
+ {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
/* Worker thread to service arbiter mappings */
static const u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
0x5555555, 0x5555555, 0x5555555, 0x5555555,
@@ -32,6 +39,39 @@ static struct adf_hw_device_class adf_4xxx_class = {
.instances = 0,
};
+enum dev_services {
+ SVC_CY = 0,
+ SVC_DC,
+};
+
+static const char *const dev_cfg_services[] = {
+ [SVC_CY] = ADF_CFG_CY,
+ [SVC_DC] = ADF_CFG_DC,
+};
+
+static int get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ u32 ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ ADF_SERVICES_ENABLED " param not found\n");
+ return ret;
+ }
+
+ ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+ services);
+
+ return ret;
+}
+
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_4XXX_ACCELERATORS_MASK;
@@ -96,23 +136,67 @@ static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_cy, capabilities_dc;
u32 fusectl1;
- u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
- ICP_ACCEL_CAPABILITIES_AES_V2;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE)
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return capabilities_cy;
+ case SVC_DC:
+ return capabilities_dc;
+ }
- return capabilities;
+ return 0;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
@@ -191,29 +275,36 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
return ret;
}
-static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
static u32 uof_get_num_objs(void)
{
- return ARRAY_SIZE(adf_4xxx_fw_config);
-}
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
+ ARRAY_SIZE(adf_4xxx_fw_dc_config),
+ "Size mismatch between adf_4xxx_fw_*_config arrays");
-static char *uof_get_name(u32 obj_num)
-{
- return adf_4xxx_fw_config[obj_num].obj_name;
+ return ARRAY_SIZE(adf_4xxx_fw_cy_config);
}
-static u32 uof_get_ae_mask(u32 obj_num)
+static char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- return adf_4xxx_fw_config[obj_num].ae_mask;
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_4xxx_fw_cy_config[obj_num].obj_name;
+ case SVC_DC:
+ return adf_4xxx_fw_dc_config[obj_num].obj_name;
+ }
+
+ return NULL;
}
-static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- /* For the moment do not report vf2pf sources */
+ switch (get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+ case SVC_DC:
+ return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+ }
+
return 0;
}
@@ -222,12 +313,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
@@ -259,12 +352,11 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->enable_pfvf_comms = pfvf_comms_disabled;
- hw_data->get_vf2pf_sources = get_vf2pf_sources;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
}
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 924bac6feb37..12e4fb9b40ce 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -37,6 +37,7 @@
/* Bank and ring configuration */
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
+#define ADF_4XXX_NUM_BANKS_PER_VF 4
/* Error source registers */
#define ADF_4XXX_ERRSOU0 (0x41A200)
@@ -76,6 +77,7 @@
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
+#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
index 71ef065914b2..a6c78b9c730b 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c
@@ -29,6 +29,29 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
+static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
@@ -227,8 +250,18 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
+ ret = adf_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
/* Get accelerator capabilities mask */
hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ goto out_err;
+ }
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 1fa690219d92..b941fe3713ff 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -109,6 +109,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -135,14 +136,9 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 3e69b520e82f..a9fbe57b32ae 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_c3xxxvf_hw_data.h"
static struct adf_hw_device_class c3xxxiov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C3XXXIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index f5de4ce66014..6b4bf181d15b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
#define ADF_C3XXXIOV_ETR_BAR 0
#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
-#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1df1b868978d..fa18d8009f53 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 0613db077689..b1eac2f81faa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
-#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
@@ -111,6 +111,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -137,14 +138,9 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 3bee3e467363..0282038fca54 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_c62xvf_hw_data.h"
static struct adf_hw_device_class c62xiov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C62XIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index 794778c48678..a1a62c003ebf 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
#define ADF_C62XIOV_ETR_BAR 0
#define ADF_C62XIOV_ETR_MAX_BANKS 1
-#define ADF_C62XIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 8103bd81d617..686ec752d0e9 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 9c57abdf56b7..7e191a42a5c7 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -19,5 +19,7 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
- adf_vf2pf_msg.o adf_vf_isr.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
+ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+ adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 57d9ca08e611..2d4cd7c7cf33 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
+#include "adf_pfvf_msg.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
@@ -147,6 +148,19 @@ struct adf_accel_dev;
struct adf_etr_data;
struct adf_etr_ring_data;
+struct adf_pfvf_ops {
+ int (*enable_comms)(struct adf_accel_dev *accel_dev);
+ u32 (*get_pf2vf_offset)(u32 i);
+ u32 (*get_vf2pf_offset)(u32 i);
+ u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+ int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock);
+ struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver);
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -157,7 +171,6 @@ struct adf_hw_device_data {
u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
u32 (*get_num_aes)(struct adf_hw_device_data *self);
u32 (*get_num_accels)(struct adf_hw_device_data *self);
- u32 (*get_pf2vf_offset)(u32 i);
void (*get_arb_info)(struct arb_info *arb_csrs_info);
void (*get_admin_info)(struct admin_info *admin_csrs_info);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
@@ -176,35 +189,34 @@ struct adf_hw_device_data {
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
- int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
- u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
- void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
- u32 vf_mask);
- void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
- u32 vf_mask);
+ int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
- char *(*uof_get_name)(u32 obj_num);
+ char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
- u32 (*uof_get_ae_mask)(u32 obj_num);
+ u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
u32 straps;
u32 accel_capabilities_mask;
+ u32 extended_dc_capabilities;
+ u32 clock_frequency;
u32 instance_id;
u16 accel_mask;
u32 ae_mask;
u32 admin_ae_mask;
u16 tx_rings_mask;
+ u16 ring_to_svc_map;
u8 tx_rx_gap;
u8 num_banks;
+ u16 num_banks_per_vf;
u8 num_rings_per_bank;
u8 num_accel;
u8 num_logical_accel;
u8 num_engines;
- u8 min_iov_compat_ver;
};
/* CSR write macro */
@@ -214,14 +226,22 @@ struct adf_hw_device_data {
/* CSR read macro */
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+#define ADF_CFG_NUM_SERVICES 4
+#define ADF_SRV_TYPE_BIT_LEN 3
+#define ADF_SRV_TYPE_MASK 0x7
+
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
#define GET_NUM_RINGS_PER_BANK(accel_dev) \
GET_HW_DATA(accel_dev)->num_rings_per_bank
+#define GET_SRV_TYPE(accel_dev, idx) \
+ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
+ & ADF_SRV_TYPE_MASK)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -238,6 +258,7 @@ struct adf_accel_vf_info {
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
+ u8 vf_compat_ver;
};
struct adf_accel_dev {
@@ -265,9 +286,9 @@ struct adf_accel_dev {
char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
- struct completion iov_msg_completion;
- u8 compatible;
- u8 pf_version;
+ struct completion msg_received;
+ struct pfvf_message response; /* temp field holding pf2vf response */
+ u8 pf_compat_ver;
} vf;
};
bool is_vf;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index ca4eae8cdd0b..4ce2b666929e 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -22,8 +22,12 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
num_objs = hw_device->uof_get_num_objs();
for (i = 0; i < num_objs; i++) {
- obj_name = hw_device->uof_get_name(i);
- ae_mask = hw_device->uof_get_ae_mask(i);
+ obj_name = hw_device->uof_get_name(accel_dev, i);
+ ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
+ if (!obj_name || !ae_mask) {
+ dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
+ goto out_err;
+ }
if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
dev_err(&GET_DEV(accel_dev),
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 43680e178242..498eb6f690e3 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -194,6 +194,35 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+ u32 *capabilities)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_resp resp;
+ struct icp_qat_fw_init_admin_req req;
+ unsigned long ae_mask;
+ unsigned long ae;
+ int ret;
+
+ /* Target only service accelerator engines */
+ ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
+
+ *capabilities = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
+ if (ret)
+ return ret;
+
+ *capabilities |= resp.extended_features;
+ }
+
+ return 0;
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
@@ -204,8 +233,16 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
+ u32 dc_capabilities = 0;
int ret;
+ ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+ return ret;
+ }
+ accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
ret = adf_set_fw_constants(accel_dev);
if (ret)
return ret;
@@ -218,9 +255,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *csr = pmisc->virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
struct admin_info admin_csrs_info;
u32 mailbox_offset, adminmsg_u, adminmsg_l;
void __iomem *mailbox;
@@ -254,13 +289,13 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
hw_data->get_admin_info(&admin_csrs_info);
mailbox_offset = admin_csrs_info.mailbox_offset;
- mailbox = csr + mailbox_offset;
+ mailbox = pmisc_addr + mailbox_offset;
adminmsg_u = admin_csrs_info.admin_msg_ur;
adminmsg_l = admin_csrs_info.admin_msg_lr;
reg_val = (u64)admin->phy_addr;
- ADF_CSR_WR(csr, adminmsg_u, upper_32_bits(reg_val));
- ADF_CSR_WR(csr, adminmsg_l, lower_32_bits(reg_val));
+ ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
+ ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
mutex_init(&admin->lock);
admin->mailbox_addr = mailbox;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index 575b6f002303..b5b208cbe5a1 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -297,3 +297,4 @@ int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
up_read(&cfg->lock);
return ret;
}
+EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 4fabb70b1f18..6e5de1dab97b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -19,6 +19,19 @@
#define ADF_MAX_DEVICES (32 * 32)
#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
+#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
+#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
+#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
+enum adf_cfg_service_type {
+ UNUSED = 0,
+ CRYPTO,
+ COMP,
+ SYM,
+ ASYM,
+ USED
+};
+
enum adf_cfg_val_type {
ADF_DEC,
ADF_HEX,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 09651e1f937a..655248dbf962 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -22,6 +22,9 @@
#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
#define ADF_CY "Cy"
#define ADF_DC "Dc"
+#define ADF_CFG_DC "dc"
+#define ADF_CFG_CY "sym;asym"
+#define ADF_SERVICES_ENABLED "ServicesEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index de94b76a6d2c..76f4f96ec5eb 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#ifndef ADF_DRV_H
#define ADF_DRV_H
@@ -62,9 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -117,6 +114,7 @@ void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
int qat_crypto_register(void);
int qat_crypto_unregister(void);
int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
void qat_alg_callback(void *resp);
@@ -131,6 +129,8 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
@@ -193,17 +193,14 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
- u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg);
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
@@ -212,11 +209,6 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
#else
#define adf_sriov_configure NULL
-static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
@@ -229,15 +221,6 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
-static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
- return 0;
-}
-
-static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-}
-
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -261,4 +244,15 @@ static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
}
#endif
+
+static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *pmisc;
+
+ pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+
+ return pmisc->virt_addr;
+}
+
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 262bdc05dab4..57035b7dd4b2 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -1,57 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include "adf_common_drv.h"
#include "adf_gen2_hw_data.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
-#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
-
-u32 adf_gen2_get_pf2vf_offset(u32 i)
-{
- return ADF_GEN2_PF2VF_OFFSET(i);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
-
-u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
-{
- u32 errsou3, errmsk3, vf_int_mask;
-
- /* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
- vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
- vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
-
- return vf_int_mask;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
-
-void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
- /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
-
-void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
- /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
-
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
{
if (!self || !self->accel_mask)
@@ -73,31 +26,29 @@ EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)
- [hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned long accel_mask = hw_data->accel_mask;
unsigned long ae_mask = hw_data->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
unsigned int val, i;
/* Enable Accel Engine error detection & correction */
for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
- val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
}
/* Enable shared memory error detection & correction */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
- val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
val |= ADF_GEN2_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
@@ -105,15 +56,9 @@ EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id, i;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 reg;
-
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
+ int i;
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
for (i = 0; i < num_a_regs; i++) {
@@ -259,21 +204,33 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
u32 legfuses;
u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
if ((straps | fuses) & ADF_POWERGATE_PKE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ if ((straps | fuses) & ADF_POWERGATE_DC)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
@@ -281,18 +238,12 @@ EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
unsigned long accel_mask = hw_data->accel_mask;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id;
u32 i = 0;
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
-
/* Configures WDT timers */
for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
/* Enable WDT for sym and dc */
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index c169d704097d..f2e0451b11c0 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -4,6 +4,7 @@
#define ADF_GEN2_HW_DATA_H_
#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
@@ -113,8 +114,16 @@ do { \
(ADF_ARB_REG_SLOT * (index)), value)
/* Power gating */
+#define ADF_POWERGATE_DC BIT(23)
#define ADF_POWERGATE_PKE BIT(24)
+/* Default ring mapping */
+#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
+ (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
/* WDT timers
*
* Timeout is in cycles. Clock speed may vary across products but this
@@ -136,19 +145,6 @@ do { \
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
- /* VF2PF interrupts */
-#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
-#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
-
-u32 adf_gen2_get_pf2vf_offset(u32 i);
-u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar);
-void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
-void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
-
u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
new file mode 100644
index 000000000000..1a9072aac2ca
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen2_pfvf.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_vf_proto.h"
+#include "adf_pfvf_utils.h"
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+#define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_GEN2_VF_PF2VF_OFFSET 0x200
+
+#define ADF_GEN2_CSR_IN_USE 0x6AC2
+#define ADF_GEN2_CSR_IN_USE_MASK 0xFFFE
+
+enum gen2_csr_pos {
+ ADF_GEN2_CSR_PF2VF_OFFSET = 0,
+ ADF_GEN2_CSR_VF2PF_OFFSET = 16,
+};
+
+#define ADF_PFVF_GEN2_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN2_MSGTYPE_MASK 0x0F
+#define ADF_PFVF_GEN2_MSGDATA_SHIFT 6
+#define ADF_PFVF_GEN2_MSGDATA_MASK 0x3FF
+
+static const struct pfvf_csr_format csr_gen2_fmt = {
+ { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
+ { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
+};
+
+#define ADF_PFVF_MSG_RETRY_DELAY 5
+#define ADF_PFVF_MSG_MAX_RETRIES 3
+
+static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_PF_PF2VF_OFFSET(i);
+}
+
+static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+{
+ return ADF_GEN2_VF_PF2VF_OFFSET;
+}
+
+static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ u32 errsou3, errmsk3, vf_int_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
+ */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ return vf_int_mask;
+}
+
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+
+static void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+
+static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+{
+ return ADF_PFVF_INT << offset;
+}
+
+static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
+{
+ return (csr_msg & 0xFFFF) << offset;
+}
+
+static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
+{
+ return (csr_val >> offset) & 0xFFFF;
+}
+
+static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
+{
+ return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
+}
+
+static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
+}
+
+static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+ *msg |= (ADF_GEN2_CSR_IN_USE << offset);
+}
+
+static bool is_legacy_user_pfvf_message(u32 msg)
+{
+ return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
+}
+
+static bool is_pf2vf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_vf2pf_notification(u8 msg_type)
+{
+ switch (msg_type) {
+ case ADF_VF2PF_MSGTYPE_INIT:
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct pfvf_gen2_params {
+ u32 pfvf_offset;
+ struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
+ enum gen2_csr_pos local_offset;
+ enum gen2_csr_pos remote_offset;
+ bool (*is_notification_message)(u8 msg_type);
+ u8 compat_ver;
+};
+
+static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
+ struct mutex *lock = params->csr_lock;
+ u32 pfvf_offset = params->pfvf_offset;
+ u32 int_bit;
+ u32 csr_val;
+ u32 csr_msg;
+ int ret;
+
+ /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
+ * allows us to build and read messages as if they where all 0 based.
+ * However, send and receive are in a single shared 32 bits register,
+ * so we need to shift and/or mask the message half before decoding
+ * it and after encoding it. Which one to shift depends on the
+ * direction.
+ */
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
+ if (unlikely(!csr_msg))
+ return -EINVAL;
+
+ /* Prepare for CSR format, shifting the wire message in place and
+ * setting the in use pattern
+ */
+ csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
+ gen2_csr_set_in_use(&csr_msg, remote_offset);
+
+ mutex_lock(lock);
+
+start:
+ /* Check if the PFVF CSR is in use by remote function */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (gen2_csr_is_in_use(csr_val, local_offset)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "PFVF CSR in use by remote function\n");
+ goto retry;
+ }
+
+ /* Attempt to get ownership of the PFVF CSR */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
+
+ /* Wait for confirmation from remote func it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (unlikely(ret < 0)) {
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+ csr_val &= ~int_bit;
+ }
+
+ /* For fire-and-forget notifications, the receiver does not clear
+ * the in-use pattern. This is used to detect collisions.
+ */
+ if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
+ /* Collision must have overwritten the message */
+ dev_err(&GET_DEV(accel_dev),
+ "Collision on notification - PFVF CSR overwritten by remote function\n");
+ goto retry;
+ }
+
+ /* If the far side did not clear the in-use pattern it is either
+ * 1) Notification - message left intact to detect collision
+ * 2) Older protocol (compatibility version < 3) on the far side
+ * where the sender is responsible for clearing the in-use
+ * pattern after the received has acknowledged receipt.
+ * In either case, clear the in-use pattern now.
+ */
+ if (gen2_csr_is_in_use(csr_val, remote_offset)) {
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+ }
+
+out:
+ mutex_unlock(lock);
+ return ret;
+
+retry:
+ if (--retries) {
+ msleep(ADF_PFVF_MSG_RETRY_DELAY);
+ goto start;
+ } else {
+ ret = -EBUSY;
+ goto out;
+ }
+}
+
+static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
+ struct pfvf_gen2_params *params)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ enum gen2_csr_pos remote_offset = params->remote_offset;
+ enum gen2_csr_pos local_offset = params->local_offset;
+ u32 pfvf_offset = params->pfvf_offset;
+ struct pfvf_message msg = { 0 };
+ u32 int_bit;
+ u32 csr_val;
+ u16 csr_msg;
+
+ int_bit = gen2_csr_get_int_bit(local_offset);
+
+ /* Read message */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+ if (!(csr_val & int_bit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+ return msg;
+ }
+
+ /* Extract the message from the CSR */
+ csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
+
+ /* Ignore legacy non-system (non-kernel) messages */
+ if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ignored non-system message (0x%.8x);\n", csr_val);
+ /* Because this must be a legacy message, the far side
+ * must clear the in-use pattern, so don't do it.
+ */
+ return msg;
+ }
+
+ /* Return the pfvf_message format */
+ msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
+
+ /* The in-use pattern is not cleared for notifications (so that
+ * it can be used for collision detection) or older implementations
+ */
+ if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
+ !params->is_notification_message(msg.type))
+ gen2_csr_clear_in_use(&csr_val, remote_offset);
+
+ /* To ACK, clear the INT bit */
+ csr_val &= ~int_bit;
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+
+ return msg;
+}
+
+static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ u32 pfvf_offset, struct mutex *csr_lock)
+{
+ struct pfvf_gen2_params params = {
+ .csr_lock = csr_lock,
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ };
+
+ return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .is_notification_message = is_pf2vf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ struct pfvf_gen2_params params = {
+ .pfvf_offset = pfvf_offset,
+ .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+ .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+ .is_notification_message = is_vf2pf_notification,
+ .compat_ver = compat_ver,
+ };
+
+ return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+ pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
+
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
+ pfvf_ops->send_msg = adf_gen2_vf2pf_send;
+ pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
new file mode 100644
index 000000000000..a716545a764c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN2_PFVF_H
+#define ADF_GEN2_PFVF_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+
+#if defined(CONFIG_PCI_IOV)
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+
+static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
index 000528327b29..3148a62938fd 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
@@ -109,20 +111,13 @@ static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
u32 ssm_wdt_pke_high = 0;
u32 ssm_wdt_pke_low = 0;
u32 ssm_wdt_high = 0;
u32 ssm_wdt_low = 0;
- void __iomem *pmisc_addr;
- struct adf_bar *pmisc;
- int pmisc_id;
-
- pmisc_id = hw_data->get_misc_bar_id(hw_data);
- pmisc = &GET_BARS(accel_dev)[pmisc_id];
- pmisc_addr = pmisc->virt_addr;
/* Convert 64bit WDT timer value into 32bit values for
* mmio write to 32bit CSRs.
@@ -139,3 +134,61 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /* Write rpresetctl register BIT(0) as 1
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (!ret) {
+ /* When rp reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+ }
+
+ return ret;
+}
+
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+ void __iomem *csr;
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "ring pair reset for bank:%d\n", bank_number);
+
+ csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index b8fca1ff7aab..f0f71ca44ca3 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_HW_CSR_DATA_H_
#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
@@ -94,6 +95,13 @@ do { \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+/* Default ring mapping */
+#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
+ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+ ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+ SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
/* WDT timers
*
* Timeout is in cycles. Clock speed may vary across products but this
@@ -106,6 +114,15 @@ do { \
#define ADF_SSMWDTPKEL_OFFSET 0x58
#define ADF_SSMWDTPKEH_OFFSET 0x60
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
new file mode 100644
index 000000000000..8efbedf63bc8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+#define ADF_4XXX_MAX_NUM_VFS 16
+
+#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
+#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
+
+/* VF2PF interrupt source registers */
+#define ADF_4XXX_VM2PF_SOU(i) (0x41A180 + ((i) * 4))
+#define ADF_4XXX_VM2PF_MSK(i) (0x41A1C0 + ((i) * 4))
+#define ADF_4XXX_VM2PF_INT_EN_MSK BIT(0)
+
+#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
+#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
+#define ADF_PFVF_GEN4_MSGDATA_SHIFT 8
+#define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF
+
+static const struct pfvf_csr_format csr_gen4_fmt = {
+ { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
+ { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
+};
+
+static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
+{
+ return ADF_4XXX_PF2VM_OFFSET(i);
+}
+
+static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+{
+ return ADF_4XXX_VM2PF_OFFSET(i);
+}
+
+static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ int i;
+ u32 sou, mask;
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ u32 vf_mask = 0;
+
+ for (i = 0; i < num_csrs; i++) {
+ sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU(i));
+ mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK(i));
+ sou &= ~mask;
+ vf_mask |= sou << i;
+ }
+
+ return vf_mask;
+}
+
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ unsigned long mask = vf_mask;
+ unsigned int val;
+ int i;
+
+ for_each_set_bit(i, &mask, num_csrs) {
+ unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
+
+ val = ADF_CSR_RD(pmisc_addr, offset) & ~ADF_4XXX_VM2PF_INT_EN_MSK;
+ ADF_CSR_WR(pmisc_addr, offset, val);
+ }
+}
+
+static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr,
+ u32 vf_mask)
+{
+ int num_csrs = ADF_4XXX_MAX_NUM_VFS;
+ unsigned long mask = vf_mask;
+ unsigned int val;
+ int i;
+
+ for_each_set_bit(i, &mask, num_csrs) {
+ unsigned int offset = ADF_4XXX_VM2PF_MSK(i);
+
+ val = ADF_CSR_RD(pmisc_addr, offset) | ADF_4XXX_VM2PF_INT_EN_MSK;
+ ADF_CSR_WR(pmisc_addr, offset, val);
+ }
+}
+
+static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg, u32 pfvf_offset,
+ struct mutex *csr_lock)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 csr_val;
+ int ret;
+
+ csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
+ if (unlikely(!csr_val))
+ return -EINVAL;
+
+ mutex_lock(csr_lock);
+
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
+
+ /* Wait for confirmation from remote that it received the message */
+ ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
+ ADF_PFVF_MSG_ACK_DELAY_US,
+ ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+ true, pmisc_addr, pfvf_offset);
+ if (ret < 0)
+ dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+
+ mutex_unlock(csr_lock);
+ return ret;
+}
+
+static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
+ u32 pfvf_offset, u8 compat_ver)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 csr_val;
+
+ /* Read message from the CSR */
+ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+
+ /* We can now acknowledge the message reception by clearing the
+ * interrupt bit
+ */
+ ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
+
+ /* Return the pfvf_message format */
+ return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
+}
+
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+ pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+ pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+ pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources;
+ pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+ pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts;
+ pfvf_ops->send_msg = adf_gen4_pfvf_send;
+ pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
new file mode 100644
index 000000000000..17d1b774d4a8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN4_PFVF_H
+#define ADF_GEN4_PFVF_H
+
+#include "adf_accel_devices.h"
+
+#ifdef CONFIG_PCI_IOV
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index e3749e5817d9..2edc63c6b6ca 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,8 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
- if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ !accel_dev->is_vf) {
dev_err(&GET_DEV(accel_dev), "Device not configured\n");
return -EFAULT;
}
@@ -117,10 +118,16 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
hw_data->enable_ints(accel_dev);
hw_data->enable_error_correction(accel_dev);
- ret = hw_data->enable_pfvf_comms(accel_dev);
+ ret = hw_data->pfvf_ops.enable_comms(accel_dev);
if (ret)
return ret;
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+ accel_dev->is_vf) {
+ if (qat_crypto_vf_dev_config(accel_dev))
+ return -EFAULT;
+ }
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index 40593c9449a2..4ca482aa69f7 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -54,52 +54,83 @@ static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
return IRQ_HANDLED;
}
-static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+#ifdef CONFIG_PCI_IOV
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
- struct adf_accel_dev *accel_dev = dev_ptr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
-#ifdef CONFIG_PCI_IOV
- /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
- if (accel_dev->pf.vf_info) {
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long vf_mask;
-
- /* Get the interrupt sources triggered by VFs */
- vf_mask = hw_data->get_vf2pf_sources(pmisc_addr);
-
- if (vf_mask) {
- struct adf_accel_vf_info *vf_info;
- bool irq_handled = false;
- int i;
-
- /* Disable VF2PF interrupts for VFs with pending ints */
- adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
-
- /*
- * Handle VF2PF interrupt unless the VF is malicious and
- * is attempting to flood the host OS with VF2PF interrupts.
- */
- for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
- vf_info = accel_dev->pf.vf_info + i;
-
- if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
- dev_info(&GET_DEV(accel_dev),
- "Too many ints from VF%d\n",
- vf_info->vf_nr + 1);
- continue;
- }
-
- adf_schedule_vf2pf_handler(vf_info);
- irq_handled = true;
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+ GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+ GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+}
+
+static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ bool irq_handled = false;
+ unsigned long vf_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr);
+
+ if (vf_mask) {
+ struct adf_accel_vf_info *vf_info;
+ int i;
+
+ /* Disable VF2PF interrupts for VFs with pending ints */
+ adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask);
+
+ /*
+ * Handle VF2PF interrupt unless the VF is malicious and
+ * is attempting to flood the host OS with VF2PF interrupts.
+ */
+ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
+ vf_info = accel_dev->pf.vf_info + i;
+
+ if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Too many ints from VF%d\n",
+ vf_info->vf_nr);
+ continue;
}
- if (irq_handled)
- return IRQ_HANDLED;
+ adf_schedule_vf2pf_handler(vf_info);
+ irq_handled = true;
}
}
+ return irq_handled;
+}
+#endif /* CONFIG_PCI_IOV */
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+ struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+ /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+ if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
+ return IRQ_HANDLED;
#endif /* CONFIG_PCI_IOV */
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
deleted file mode 100644
index 59860bdaedb6..000000000000
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ /dev/null
@@ -1,416 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pf2vf_msg.h"
-
-#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
-#define ADF_PFVF_MSG_ACK_DELAY 2
-#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
-#define ADF_PFVF_MSG_RETRY_DELAY 5
-#define ADF_PFVF_MSG_MAX_RETRIES 3
-#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
- ADF_PFVF_MSG_ACK_MAX_RETRY + \
- ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
-
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
-
- spin_lock(&accel_dev->pf.vf2pf_ints_lock);
- hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
- spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
-}
-
-static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
-{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
- u32 val, pf2vf_offset, count = 0;
- u32 local_in_use_mask, local_in_use_pattern;
- u32 remote_in_use_mask, remote_in_use_pattern;
- struct mutex *lock; /* lock preventing concurrent acces of CSR */
- u32 int_bit;
- int ret = 0;
-
- if (accel_dev->is_vf) {
- pf2vf_offset = hw_data->get_pf2vf_offset(0);
- lock = &accel_dev->vf.vf2pf_lock;
- local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
- local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
- remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
- remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
- int_bit = ADF_VF2PF_INT;
- } else {
- pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
- lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
- local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
- local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
- remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
- remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
- int_bit = ADF_PF2VF_INT;
- }
-
- mutex_lock(lock);
-
- /* Check if the PFVF CSR is in use by remote function */
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- if ((val & remote_in_use_mask) == remote_in_use_pattern) {
- dev_dbg(&GET_DEV(accel_dev),
- "PFVF CSR in use by remote function\n");
- ret = -EBUSY;
- goto out;
- }
-
- msg &= ~local_in_use_mask;
- msg |= local_in_use_pattern;
-
- /* Attempt to get ownership of the PFVF CSR */
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
-
- /* Wait for confirmation from remote func it received the message */
- do {
- msleep(ADF_PFVF_MSG_ACK_DELAY);
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
-
- if (val != msg) {
- dev_dbg(&GET_DEV(accel_dev),
- "Collision - PFVF CSR overwritten by remote function\n");
- ret = -EIO;
- goto out;
- }
-
- if (val & int_bit) {
- dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
- val &= ~int_bit;
- ret = -EIO;
- }
-
- /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
-out:
- mutex_unlock(lock);
- return ret;
-}
-
-/**
- * adf_iov_putmsg() - send PFVF message
- * @accel_dev: Pointer to acceleration device.
- * @msg: Message to send
- * @vf_nr: VF number to which the message will be sent if on PF, ignored
- * otherwise
- *
- * Function sends a message through the PFVF channel
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
-{
- u32 count = 0;
- int ret;
-
- do {
- ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
- if (ret)
- msleep(ADF_PFVF_MSG_RETRY_DELAY);
- } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
-
- return ret;
-}
-
-/**
- * adf_send_pf2vf_msg() - send PF to VF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr: VF number to which the message will be sent
- * @msg: Message to send
- *
- * This function allows the PF to send a message to a specific VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
-{
- return adf_iov_putmsg(accel_dev, msg, vf_nr);
-}
-
-/**
- * adf_send_vf2pf_msg() - send VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @msg: Message to send
- *
- * This function allows the VF to send a message to the PF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
-{
- return adf_iov_putmsg(accel_dev, msg, 0);
-}
-
-/**
- * adf_send_vf2pf_req() - send VF2PF request message
- * @accel_dev: Pointer to acceleration device.
- * @msg: Request message to send
- *
- * This function sends a message that requires a response from the VF to the PF
- * and waits for a reply.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
-{
- unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
- int ret;
-
- reinit_completion(&accel_dev->vf.iov_msg_completion);
-
- /* Send request from VF to PF */
- ret = adf_send_vf2pf_msg(accel_dev, msg);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send request msg to PF\n");
- return ret;
- }
-
- /* Wait for response */
- if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
- timeout)) {
- dev_err(&GET_DEV(accel_dev),
- "PFVF request/response message timeout expired\n");
- return -EIO;
- }
-
- return 0;
-}
-
-void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
-{
- struct adf_accel_dev *accel_dev = vf_info->accel_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- int bar_id = hw_data->get_misc_bar_id(hw_data);
- struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
-
- /* Read message from the VF */
- msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
- if (!(msg & ADF_VF2PF_INT)) {
- dev_info(&GET_DEV(accel_dev),
- "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
- goto out;
- }
-
- /* To ACK, clear the VF2PFINT bit */
- msg &= ~ADF_VF2PF_INT;
- ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
-
- if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
- /* Ignore legacy non-system (non-kernel) VF2PF messages */
- goto err;
-
- switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
- case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
- {
- u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
-
- resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
- ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPAT_THIS_VERSION <<
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
-
- dev_dbg(&GET_DEV(accel_dev),
- "Compatibility Version Request from VF%d vers=%u\n",
- vf_nr + 1, vf_compat_ver);
-
- if (vf_compat_ver < hw_data->min_iov_compat_ver) {
- dev_err(&GET_DEV(accel_dev),
- "VF (vers %d) incompatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
- dev_err(&GET_DEV(accel_dev),
- "VF (vers %d) compat with PF (vers %d) unkn.\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- } else {
- dev_dbg(&GET_DEV(accel_dev),
- "VF (vers %d) compatible with PF (vers %d)\n",
- vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
- resp |= ADF_PF2VF_VF_COMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- }
- }
- break;
- case ADF_VF2PF_MSGTYPE_VERSION_REQ:
- dev_dbg(&GET_DEV(accel_dev),
- "Legacy VersionRequest received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
- ADF_PF2VF_MSGTYPE_SHIFT) |
- (ADF_PFVF_COMPAT_THIS_VERSION <<
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
- resp |= ADF_PF2VF_VF_COMPATIBLE <<
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- /* Set legacy major and minor version num */
- resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
- 1 << ADF_PF2VF_MINORVERSION_SHIFT;
- break;
- case ADF_VF2PF_MSGTYPE_INIT:
- {
- dev_dbg(&GET_DEV(accel_dev),
- "Init message received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- vf_info->init = true;
- }
- break;
- case ADF_VF2PF_MSGTYPE_SHUTDOWN:
- {
- dev_dbg(&GET_DEV(accel_dev),
- "Shutdown message received from VF%d 0x%x\n",
- vf_nr + 1, msg);
- vf_info->init = false;
- }
- break;
- default:
- goto err;
- }
-
- if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
- dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
-
-out:
- /* re-enable interrupt on PF from this VF */
- adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
-
- return;
-err:
- dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
- vf_nr + 1, msg);
-}
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_vf_info *vf;
- u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
- (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
- int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
-
- for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send restarting msg to VF%d\n", i);
- }
-}
-
-static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 msg = 0;
- int ret;
-
- msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
- msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
- msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
- BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
-
- ret = adf_send_vf2pf_req(accel_dev, msg);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Compatibility Version Request.\n");
- return ret;
- }
-
- /* Response from PF received, check compatibility */
- switch (accel_dev->vf.compatible) {
- case ADF_PF2VF_VF_COMPATIBLE:
- break;
- case ADF_PF2VF_VF_COMPAT_UNKNOWN:
- /* VF is newer than PF and decides whether it is compatible */
- if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
- accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
- break;
- }
- fallthrough;
- case ADF_PF2VF_VF_INCOMPATIBLE:
- dev_err(&GET_DEV(accel_dev),
- "PF (vers %d) and VF (vers %d) are not compatible\n",
- accel_dev->vf.pf_version,
- ADF_PFVF_COMPAT_THIS_VERSION);
- return -EINVAL;
- default:
- dev_err(&GET_DEV(accel_dev),
- "Invalid response from PF; assume not compatible\n");
- return -EINVAL;
- }
- return ret;
-}
-
-/**
- * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
-{
- adf_enable_pf2vf_interrupts(accel_dev);
- return adf_vf2pf_request_version(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
-
-/**
- * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * This function carries out the necessary steps to setup and start the PFVF
- * communication channel, if any.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
deleted file mode 100644
index a7d8f8367345..000000000000
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_PF2VF_MSG_H
-#define ADF_PF2VF_MSG_H
-
-/*
- * PF<->VF Messaging
- * The PF has an array of 32-bit PF2VF registers, one for each VF. The
- * PF can access all these registers; each VF can access only the one
- * register associated with that particular VF.
- *
- * The register functionally is split into two parts:
- * The bottom half is for PF->VF messages. In particular when the first
- * bit of this register (bit 0) gets set an interrupt will be triggered
- * in the respective VF.
- * The top half is for VF->PF messages. In particular when the first bit
- * of this half of register (bit 16) gets set an interrupt will be triggered
- * in the PF.
- *
- * The remaining bits within this register are available to encode messages.
- * and implement a collision control mechanism to prevent concurrent use of
- * the PF2VF register by both the PF and VF.
- *
- * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
- * _______________________________________________
- * | | | | | | | | | | | | | | | | |
- * +-----------------------------------------------+
- * \___________________________/ \_________/ ^ ^
- * ^ ^ | |
- * | | | VF2PF Int
- * | | Message Origin
- * | Message Type
- * Message-specific Data/Reserved
- *
- * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
- * _______________________________________________
- * | | | | | | | | | | | | | | | | |
- * +-----------------------------------------------+
- * \___________________________/ \_________/ ^ ^
- * ^ ^ | |
- * | | | PF2VF Int
- * | | Message Origin
- * | Message Type
- * Message-specific Data/Reserved
- *
- * Message Origin (Should always be 1)
- * A legacy out-of-tree QAT driver allowed for a set of messages not supported
- * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
- *
- * When a PF or VF attempts to send a message in the lower or upper 16 bits,
- * respectively, the other 16 bits are written to first with a defined
- * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
- */
-
-#define ADF_PFVF_COMPAT_THIS_VERSION 0x1 /* PF<->VF compat */
-
-/* PF->VF messages */
-#define ADF_PF2VF_INT BIT(0)
-#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
-#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
-#define ADF_PF2VF_MSGTYPE_SHIFT 2
-#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
-#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
-#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
-#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
-
-/* PF->VF Version Response */
-#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
-#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
-#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
-#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
-#define ADF_PF2VF_MINORVERSION_SHIFT 6
-#define ADF_PF2VF_MAJORVERSION_SHIFT 10
-#define ADF_PF2VF_VF_COMPATIBLE 1
-#define ADF_PF2VF_VF_INCOMPATIBLE 2
-#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
-
-/* VF->PF messages */
-#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
-#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
-#define ADF_VF2PF_INT BIT(16)
-#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
-#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
-#define ADF_VF2PF_MSGTYPE_SHIFT 18
-#define ADF_VF2PF_MSGTYPE_INIT 0x3
-#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
-#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
-#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
-
-/* VF->PF Compatible Version Request */
-#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
-
-#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
new file mode 100644
index 000000000000..9c37a2661392
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#ifndef ADF_PFVF_MSG_H
+#define ADF_PFVF_MSG_H
+
+#include <linux/bits.h>
+
+/*
+ * PF<->VF Gen2 Messaging format
+ *
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | VF2PF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | | | | | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \___________________________/ \_________/ ^ ^
+ * ^ ^ | |
+ * | | | PF2VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see function
+ * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
+ *
+ *
+ * PF<->VF Gen4 Messaging format
+ *
+ * Similarly to the gen2 messaging format, 32-bit long registers are used for
+ * communication between PF and VFs. However, each VF and PF share a pair of
+ * 32-bits register to avoid collisions: one for PV to VF messages and one
+ * for VF to PF messages.
+ *
+ * Both the Interrupt bit and the Message Origin bit retain the same position
+ * and meaning, although non-system messages are now deprecated and not
+ * expected.
+ *
+ * 31 30 9 8 7 6 5 4 3 2 1 0
+ * _______________________________________________
+ * | | | . . . | | | | | | | | | | |
+ * +-----------------------------------------------+
+ * \_____________________/ \_______________/ ^ ^
+ * ^ ^ | |
+ * | | | PF/VF Int
+ * | | Message Origin
+ * | Message Type
+ * Message-specific Data/Reserved
+ *
+ * For both formats, the message reception is acknowledged by lowering the
+ * interrupt bit on the register where the message was sent.
+ */
+
+/* PFVF message common bits */
+#define ADF_PFVF_INT BIT(0)
+#define ADF_PFVF_MSGORIGIN_SYSTEM BIT(1)
+
+/* Different generations have different CSR layouts, use this struct
+ * to abstract these differences away
+ */
+struct pfvf_message {
+ u8 type;
+ u32 data;
+};
+
+/* PF->VF messages */
+enum pf2vf_msgtype {
+ ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
+ ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
+ ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
+};
+
+/* VF->PF messages */
+enum vf2pf_msgtype {
+ ADF_VF2PF_MSGTYPE_INIT = 0x03,
+ ADF_VF2PF_MSGTYPE_SHUTDOWN = 0x04,
+ ADF_VF2PF_MSGTYPE_VERSION_REQ = 0x05,
+ ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ = 0x06,
+ ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
+ ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
+ ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
+};
+
+/* VF/PF compatibility version. */
+enum pfvf_compatibility_version {
+ /* Support for extended capabilities */
+ ADF_PFVF_COMPAT_CAPABILITIES = 0x02,
+ /* In-use pattern cleared by receiver */
+ ADF_PFVF_COMPAT_FAST_ACK = 0x03,
+ /* Ring to service mapping support for non-standard mappings */
+ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Reference to the latest version */
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+};
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK GENMASK(7, 0)
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK GENMASK(9, 8)
+
+enum pf2vf_compat_response {
+ ADF_PF2VF_VF_COMPATIBLE = 0x01,
+ ADF_PF2VF_VF_INCOMPATIBLE = 0x02,
+ ADF_PF2VF_VF_COMPAT_UNKNOWN = 0x03,
+};
+
+enum ring_reset_result {
+ RPRESET_SUCCESS = 0x00,
+ RPRESET_NOT_SUPPORTED = 0x01,
+ RPRESET_INVAL_BANK = 0x02,
+ RPRESET_TIMEOUT = 0x03,
+};
+
+#define ADF_VF2PF_RNG_RESET_RP_MASK GENMASK(1, 0)
+#define ADF_VF2PF_RNG_RESET_RSVD_MASK GENMASK(25, 2)
+
+/* PF->VF Block Responses */
+#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK GENMASK(1, 0)
+#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK GENMASK(9, 2)
+
+enum pf2vf_blkmsg_resp_type {
+ ADF_PF2VF_BLKMSG_RESP_TYPE_DATA = 0x00,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_CRC = 0x01,
+ ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR = 0x02,
+};
+
+/* PF->VF Block Error Code */
+enum pf2vf_blkmsg_error {
+ ADF_PF2VF_INVALID_BLOCK_TYPE = 0x00,
+ ADF_PF2VF_INVALID_BYTE_NUM_REQ = 0x01,
+ ADF_PF2VF_PAYLOAD_TRUNCATED = 0x02,
+ ADF_PF2VF_UNSPECIFIED_ERROR = 0x03,
+};
+
+/* VF->PF Block Requests */
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK GENMASK(1, 0)
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK GENMASK(8, 2)
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK GENMASK(2, 0)
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK GENMASK(8, 3)
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK GENMASK(3, 0)
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK GENMASK(8, 4)
+#define ADF_VF2PF_BLOCK_CRC_REQ_MASK BIT(9)
+
+/* PF->VF Block Request Types
+ * 0..15 - 32 byte message
+ * 16..23 - 64 byte message
+ * 24..27 - 128 byte message
+ */
+enum vf2pf_blkmsg_req_type {
+ ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02,
+ ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03,
+};
+
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
+
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
+
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
+ (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
+ ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
+
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
+ FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
+
+struct pfvf_blkmsg_header {
+ u8 version;
+ u8 payload_size;
+} __packed;
+
+#define ADF_PFVF_BLKMSG_HEADER_SIZE (sizeof(struct pfvf_blkmsg_header))
+#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg) (sizeof(blkmsg) - \
+ ADF_PFVF_BLKMSG_HEADER_SIZE)
+#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg) (ADF_PFVF_BLKMSG_HEADER_SIZE + \
+ (blkmsg)->hdr.payload_size)
+#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE 128
+
+/* PF->VF Block message header bytes */
+#define ADF_PFVF_BLKMSG_VER_BYTE 0
+#define ADF_PFVF_BLKMSG_LEN_BYTE 1
+
+/* PF/VF Capabilities message values */
+enum blkmsg_capabilities_versions {
+ ADF_PFVF_CAPABILITIES_V1_VERSION = 0x01,
+ ADF_PFVF_CAPABILITIES_V2_VERSION = 0x02,
+ ADF_PFVF_CAPABILITIES_V3_VERSION = 0x03,
+};
+
+struct capabilities_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+} __packed;
+
+struct capabilities_v2 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+} __packed;
+
+struct capabilities_v3 {
+ struct pfvf_blkmsg_header hdr;
+ u32 ext_dc_caps;
+ u32 capabilities;
+ u32 frequency;
+} __packed;
+
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+ ADF_PFVF_RING_TO_SVC_VERSION = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+ struct pfvf_blkmsg_header hdr;
+ u16 map;
+} __packed;
+
+#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
new file mode 100644
index 000000000000..14c069f0d71a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_vf_info *vf;
+ struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
+ int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+ for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+ if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send restarting msg to VF%d\n", i);
+ }
+}
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v2 caps_msg;
+
+ caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
+ caps_msg.capabilities = hw_data->accel_capabilities_mask;
+
+ caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
+ caps_msg.hdr.payload_size =
+ ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
+
+ memcpy(buffer, &caps_msg, sizeof(caps_msg));
+
+ return 0;
+}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat)
+{
+ struct ring_to_svc_map_v1 rts_map_msg;
+
+ rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+ rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+ rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+ memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
new file mode 100644
index 000000000000..e8982d1ac896
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_MSG_H
+#define ADF_PFVF_PF_MSG_H
+
+#include "adf_accel_devices.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+
+typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 compat);
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+ u8 *buffer, u8 comapt);
+
+#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
new file mode 100644
index 000000000000..588352de1ef0
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
+
+static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
+ NULL, /* no message type defined for value 0 */
+ NULL, /* no message type defined for value 1 */
+ adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+ adf_pf_ring_to_svc_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
+};
+
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: VF number to which the message will be sent
+ * @msg: Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
+}
+
+/**
+ * adf_recv_vf2pf_msg() - receive a VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: Number of the VF from where the message will be received
+ *
+ * This function allows the PF to receive a message from a specific VF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
+}
+
+static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
+{
+ if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
+ return NULL;
+
+ return pf2vf_blkmsg_providers[type];
+}
+
+/* Byte pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
+{
+ return blkmsg[index];
+}
+
+/* CRC pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
+{
+ /* count is 0-based, turn it into a length */
+ return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
+}
+
+static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
+ u8 type, u8 byte, u8 max_size, u8 *data,
+ pf2vf_blkmsg_data_getter_fn data_getter)
+{
+ u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ adf_pf2vf_blkmsg_provider provider;
+ u8 msg_size;
+
+ provider = get_blkmsg_response_provider(type);
+
+ if (unlikely(!provider)) {
+ pr_err("QAT: No registered provider for message %d\n", type);
+ *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
+ return -EINVAL;
+ }
+
+ if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
+ pr_err("QAT: unknown error from provider for message %d\n", type);
+ *data = ADF_PF2VF_UNSPECIFIED_ERROR;
+ return -EINVAL;
+ }
+
+ msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
+
+ if (unlikely(msg_size >= max_size)) {
+ pr_err("QAT: Invalid size %d provided for message type %d\n",
+ msg_size, type);
+ *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
+ return -EINVAL;
+ }
+
+ if (unlikely(byte >= msg_size)) {
+ pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
+ byte, msg_size);
+ *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
+ return -EINVAL;
+ }
+
+ *data = data_getter(blkmsg, byte);
+ return 0;
+}
+
+static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
+ struct pfvf_message req)
+{
+ u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
+ struct pfvf_message resp = { 0 };
+ u8 resp_data = 0;
+ u8 blk_type;
+ u8 blk_byte;
+ u8 byte_max;
+
+ switch (req.type) {
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+ + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
+ blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ break;
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
+ blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
+ byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ break;
+ }
+
+ /* Is this a request for CRC or data? */
+ if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
+ blk_type, blk_byte, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_crc))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
+ } else {
+ dev_dbg(&GET_DEV(vf_info->accel_dev),
+ "BlockMsg of type %d for data byte %d received from VF%d\n",
+ blk_type, blk_byte, vf_info->vf_nr);
+
+ if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+ byte_max, &resp_data,
+ adf_pf2vf_blkmsg_get_byte))
+ resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
+ }
+
+ resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+ resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
+ FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
+
+ return resp;
+}
+
+static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message req)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct pfvf_message resp = {
+ .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
+ .data = RPRESET_SUCCESS
+ };
+ u32 bank_number;
+ u32 rsvd_field;
+
+ bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
+ rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
+ vf_nr, bank_number);
+
+ if (!hw_data->ring_pair_reset || rsvd_field) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring Pair Reset for VF%d is not supported\n", vf_nr);
+ resp.data = RPRESET_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (bank_number >= hw_data->num_banks_per_vf) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
+ bank_number, vf_nr);
+ resp.data = RPRESET_INVAL_BANK;
+ goto out;
+ }
+
+ /* Convert the VF provided value to PF bank number */
+ bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
+ if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d failure\n", vf_nr);
+ resp.data = RPRESET_TIMEOUT;
+ goto out;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Ring pair reset for VF%d successfully\n", vf_nr);
+
+out:
+ return resp;
+}
+
+static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
+ struct pfvf_message msg, struct pfvf_message *resp)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+
+ switch (msg.type) {
+ case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+ {
+ u8 vf_compat_ver = msg.data;
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
+ vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
+
+ if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+ else
+ compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
+
+ vf_info->vf_compat_ver = vf_compat_ver;
+
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
+ ADF_PFVF_COMPAT_THIS_VERSION) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+ {
+ u8 compat;
+
+ dev_dbg(&GET_DEV(accel_dev),
+ "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
+ vf_nr);
+
+ /* legacy driver, VF compat_ver is 0 */
+ vf_info->vf_compat_ver = 0;
+
+ /* PF always newer than legacy VF */
+ compat = ADF_PF2VF_VF_COMPATIBLE;
+
+ /* Set legacy major and minor version to the latest, 1.1 */
+ resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+ resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
+ FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_INIT:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Init message received from VF%d\n", vf_nr);
+ vf_info->init = true;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+ {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Shutdown message received from VF%d\n", vf_nr);
+ vf_info->init = false;
+ }
+ break;
+ case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+ case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+ *resp = handle_blkmsg_req(vf_info, msg);
+ break;
+ case ADF_VF2PF_MSGTYPE_RP_RESET:
+ *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
+ break;
+ default:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
+ vf_nr, msg.type, msg.data);
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct pfvf_message req;
+ struct pfvf_message resp = {0};
+
+ req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
+ if (!req.type) /* Legacy or no message */
+ return true;
+
+ if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
+ return false;
+
+ if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send response to VF%d\n", vf_nr);
+
+ return true;
+}
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ adf_pfvf_crc_init();
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
new file mode 100644
index 000000000000..165d266d023d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_PROTO_H
+#define ADF_PFVF_PF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
+
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
new file mode 100644
index 000000000000..c5f6d77d4bb8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/crc8.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+
+/* CRC Calculation */
+DECLARE_CRC8_TABLE(pfvf_crc8_table);
+#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
+
+void adf_pfvf_crc_init(void)
+{
+ crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
+}
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
+{
+ return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
+}
+
+static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
+ u32 value, const struct pfvf_field_format *fmt)
+{
+ if (unlikely((value & fmt->mask) != value)) {
+ dev_err(&GET_DEV(accel_dev),
+ "PFVF message value 0x%X out of range, %u max allowed\n",
+ value, fmt->mask);
+ return false;
+ }
+
+ *csr_msg |= value << fmt->offset;
+
+ return true;
+}
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt)
+{
+ u32 csr_msg = 0;
+
+ if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
+ !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
+ return 0;
+
+ return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
+}
+
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
+ const struct pfvf_csr_format *fmt)
+{
+ struct pfvf_message msg = { 0 };
+
+ msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
+ msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
+
+ if (unlikely(!msg.type))
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid PFVF msg with no type received\n");
+
+ return msg;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
new file mode 100644
index 000000000000..2be048e2287b
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_UTILS_H
+#define ADF_PFVF_UTILS_H
+
+#include <linux/types.h>
+#include "adf_pfvf_msg.h"
+
+/* How long to wait for far side to acknowledge receipt */
+#define ADF_PFVF_MSG_ACK_DELAY_US 4
+#define ADF_PFVF_MSG_ACK_MAX_DELAY_US (1 * USEC_PER_SEC)
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
+void adf_pfvf_crc_init(void);
+
+struct pfvf_field_format {
+ u8 offset;
+ u32 mask;
+};
+
+struct pfvf_csr_format {
+ struct pfvf_field_format type;
+ struct pfvf_field_format data;
+};
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ const struct pfvf_csr_format *fmt);
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
+ const struct pfvf_csr_format *fmt);
+
+#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
new file mode 100644
index 000000000000..14b222691c9c
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+/**
+ * adf_vf2pf_notify_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init message from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
+
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
+
+/**
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown message from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_send_vf2pf_msg(accel_dev, msg))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+ u8 pf_version;
+ int compat;
+ int ret;
+ struct pfvf_message resp;
+ struct pfvf_message msg = {
+ .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
+ .data = ADF_PFVF_COMPAT_THIS_VERSION,
+ };
+
+ BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+ ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Compatibility Version Request.\n");
+ return ret;
+ }
+
+ pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
+ compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
+
+ /* Response from PF received, check compatibility */
+ switch (compat) {
+ case ADF_PF2VF_VF_COMPATIBLE:
+ break;
+ case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+ /* VF is newer than PF - compatible for now */
+ break;
+ case ADF_PF2VF_VF_INCOMPATIBLE:
+ dev_err(&GET_DEV(accel_dev),
+ "PF (vers %d) and VF (vers %d) are not compatible\n",
+ pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
+ return -EINVAL;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid response from PF; assume not compatible\n");
+ return -EINVAL;
+ }
+
+ accel_dev->vf.pf_compat_ver = pf_version;
+ return 0;
+}
+
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct capabilities_v3 cap_msg = { { 0 }, };
+ unsigned int len = sizeof(cap_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
+ /* The PF is too old to support the extended capabilities */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
+ (u8 *)&cap_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ switch (cap_msg.hdr.version) {
+ default:
+ /* Newer version received, handle only the know parts */
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V3_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v3)))
+ hw_data->clock_frequency = cap_msg.frequency;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get frequency");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V2_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v2)))
+ hw_data->accel_capabilities_mask = cap_msg.capabilities;
+ else
+ dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
+ fallthrough;
+ case ADF_PFVF_CAPABILITIES_V1_VERSION:
+ if (likely(len >= sizeof(struct capabilities_v1))) {
+ hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
+ } else {
+ dev_err(&GET_DEV(accel_dev),
+ "Capabilities message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+ struct ring_to_svc_map_v1 rts_map_msg = { { 0 }, };
+ unsigned int len = sizeof(rts_map_msg);
+
+ if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+ /* Use already set default mappings */
+ return 0;
+
+ if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+ (u8 *)&rts_map_msg, &len)) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to get block message response\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+ dev_err(&GET_DEV(accel_dev),
+ "RING_TO_SVC message truncated to %d bytes\n", len);
+ return -EFAULT;
+ }
+
+ /* Only v1 at present */
+ accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
new file mode 100644
index 000000000000..71bc0e3f1d93
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_MSG_H
+#define ADF_PFVF_VF_MSG_H
+
+#if defined(CONFIG_PCI_IOV)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+
+#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
new file mode 100644
index 000000000000..1015155b6374
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_PFVF_MSG_ACK_DELAY 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
+
+/* How often to retry if there is no response */
+#define ADF_PFVF_MSG_RESP_RETRIES 5
+#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
+ ADF_PFVF_MSG_ACK_MAX_RETRY + \
+ ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg: Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
+
+ return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+ &accel_dev->vf.vf2pf_lock);
+}
+
+/**
+ * adf_recv_pf2vf_msg() - receive a PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ *
+ * This function allows the VF to receive a message from the PF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+ u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
+
+ return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Request message to send
+ * @resp: Returned PF response
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+ unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
+ int ret;
+
+ reinit_completion(&accel_dev->vf.msg_received);
+
+ /* Send request from VF to PF */
+ do {
+ ret = adf_send_vf2pf_msg(accel_dev, msg);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send request msg to PF\n");
+ return ret;
+ }
+
+ /* Wait for response, if it times out retry */
+ ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
+ timeout);
+ if (ret) {
+ if (likely(resp))
+ *resp = accel_dev->vf.response;
+
+ /* Once copied, set to an invalid value */
+ accel_dev->vf.response.type = 0;
+
+ return 0;
+ }
+
+ dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
+ } while (--retries);
+
+ return -EIO;
+}
+
+static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
+ u8 *type, u8 *data)
+{
+ struct pfvf_message req = { 0 };
+ struct pfvf_message resp = { 0 };
+ u8 blk_type;
+ u8 blk_byte;
+ u8 msg_type;
+ u8 max_data;
+ int err;
+
+ /* Convert the block type to {small, medium, large} size category */
+ if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
+ blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+ } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
+ msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
+ blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
+ *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
+ blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
+ max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+ } else {
+ dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (*data > max_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid byte %s %u for message type %u\n",
+ crc ? "count" : "index", *data, *type);
+ return -EINVAL;
+ }
+
+ /* Build the block message */
+ req.type = msg_type;
+ req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
+
+ err = adf_send_vf2pf_req(accel_dev, req, &resp);
+ if (err)
+ return err;
+
+ *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
+ *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
+
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
+ u8 index, u8 *data)
+{
+ int ret;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected BLKMSG response type %u, byte 0x%x\n",
+ type, index);
+ return -EFAULT;
+ }
+
+ *data = index;
+ return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
+ u8 bytes, u8 *crc)
+{
+ int ret;
+
+ /* The count of bytes refers to a length, however shift it to a 0-based
+ * count to avoid overflows. Thus, a request for 0 bytes is technically
+ * valid.
+ */
+ --bytes;
+
+ ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
+ if (ret < 0)
+ return ret;
+
+ if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
+ type, bytes);
+ return -EFAULT;
+ }
+
+ *crc = bytes;
+ return 0;
+}
+
+/**
+ * adf_send_vf2pf_blkmsg_req() - retrieve block message
+ * @accel_dev: Pointer to acceleration VF device.
+ * @type: The block message type, see adf_pfvf_msg.h for allowed values
+ * @buffer: input buffer where to place the received data
+ * @buffer_len: buffer length as input, the amount of written bytes on output
+ *
+ * Request a message of type 'type' over the block message transport.
+ * This function will send the required amount block message requests and
+ * return the overall content back to the caller through the provided buffer.
+ * The buffer should be large enough to contain the requested message type,
+ * otherwise the response will be truncated.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len)
+{
+ unsigned int index;
+ unsigned int msg_len;
+ int ret;
+ u8 remote_crc;
+ u8 local_crc;
+
+ if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
+ dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Buffer size too small for a block message\n");
+ return -EINVAL;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_VER_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid version 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+ ADF_PFVF_BLKMSG_LEN_BYTE,
+ &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid size 0 received for block request %u", type);
+ return -EFAULT;
+ }
+
+ /* We need to pick the minimum since there is no way to request a
+ * specific version. As a consequence any scenario is possible:
+ * - PF has a newer (longer) version which doesn't fit in the buffer
+ * - VF expects a newer (longer) version, so we must not ask for
+ * bytes in excess
+ * - PF and VF share the same version, no problem
+ */
+ msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
+ msg_len = min(*buffer_len, msg_len);
+
+ /* Get the payload */
+ for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
+ ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
+ &buffer[index]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
+ if (unlikely(ret))
+ return ret;
+
+ local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
+ if (unlikely(local_crc != remote_crc)) {
+ dev_err(&GET_DEV(accel_dev),
+ "CRC error on msg type %d. Local %02X, remote %02X\n",
+ type, local_crc, remote_crc);
+ return -EIO;
+ }
+
+ *buffer_len = msg_len;
+ return 0;
+}
+
+static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
+ struct pfvf_message msg)
+{
+ switch (msg.type) {
+ case ADF_PF2VF_MSGTYPE_RESTARTING:
+ dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
+
+ adf_pf2vf_handle_pf_restarting(accel_dev);
+ return false;
+ case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+ case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
+ case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
+ dev_dbg(&GET_DEV(accel_dev),
+ "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
+ msg.type, msg.data);
+ accel_dev->vf.response = msg;
+ complete(&accel_dev->vf.msg_received);
+ return true;
+ default:
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
+ msg.type, msg.data);
+ }
+
+ return false;
+}
+
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg;
+
+ msg = adf_recv_pf2vf_msg(accel_dev);
+ if (msg.type) /* Invalid or no message */
+ return adf_handle_pf2vf_msg(accel_dev, msg);
+
+ /* No replies for PF->VF messages at present */
+
+ return true;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ adf_pfvf_crc_init();
+ adf_enable_pf2vf_interrupts(accel_dev);
+
+ ret = adf_vf2pf_request_version(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_capabilities(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_vf2pf_get_ring_to_svc(accel_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
new file mode 100644
index 000000000000..f6ee9b38c0e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_PROTO_H
+#define ADF_PFVF_VF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+ struct pfvf_message *resp);
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+ u8 *buffer, unsigned int *buffer_len);
+
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 90ec057f9183..b960bca1f9d2 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include "adf_common_drv.h"
#include "adf_cfg.h"
-#include "adf_pf2vf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+
+#define ADF_VF2PF_RATELIMIT_INTERVAL 8
+#define ADF_VF2PF_RATELIMIT_BURST 130
static struct workqueue_struct *pf2vf_resp_wq;
@@ -19,8 +22,16 @@ static void adf_iov_send_resp(struct work_struct *work)
{
struct adf_pf2vf_resp *pf2vf_resp =
container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+ struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
+ struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+ u32 vf_nr = vf_info->vf_nr;
+ bool ret;
+
+ ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
+ if (ret)
+ /* re-enable interrupt on PF from this VF */
+ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
- adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
kfree(pf2vf_resp);
}
@@ -50,11 +61,12 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
/* This ptr will be populated when VFs will be created */
vf_info->accel_dev = accel_dev;
vf_info->vf_nr = i;
+ vf_info->vf_compat_ver = 0;
mutex_init(&vf_info->pf2vf_lock);
ratelimit_state_init(&vf_info->vf2pf_ratelimit,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
+ ADF_VF2PF_RATELIMIT_INTERVAL,
+ ADF_VF2PF_RATELIMIT_BURST);
}
/* Set Valid bits in AE Thread to PCIe Function Mapping */
@@ -62,7 +74,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
hw_data->configure_iov_threads(accel_dev, true);
/* Enable VF to PF interrupts for all VFs */
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
/*
@@ -92,13 +104,13 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
return;
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_pf2vf_notify_restarting(accel_dev);
pci_disable_sriov(accel_to_pci_dev(accel_dev));
/* Disable VF to PF interrupts */
- if (hw_data->get_pf2vf_offset)
+ if (hw_data->pfvf_ops.get_pf2vf_offset)
adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0));
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
@@ -114,6 +126,32 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_disable_sriov);
+static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ if (!ret) {
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED,
+ services, ADF_STR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to PCI device.
@@ -153,8 +191,9 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- adf_dev_stop(accel_dev);
- adf_dev_shutdown(accel_dev);
+ ret = adf_sriov_prepare_restart(accel_dev);
+ if (ret)
+ return ret;
}
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
deleted file mode 100644
index 8d11bb24cea0..000000000000
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pf2vf_msg.h"
-
-/**
- * adf_vf2pf_notify_init() - send init msg to PF
- * @accel_dev: Pointer to acceleration VF device.
- *
- * Function sends an init message from the VF to a PF
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_send_vf2pf_msg(accel_dev, msg)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
-
-/**
- * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
- * @accel_dev: Pointer to acceleration VF device.
- *
- * Function sends a shutdown message from the VF to a PF
- *
- * Return: void
- */
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
- if (adf_send_vf2pf_msg(accel_dev, msg))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index db5e7abbe5f3..86c3bd0c9c2b 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -15,7 +15,6 @@
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
-#include "adf_pf2vf_msg.h"
#define ADF_VINTSOU_OFFSET 0x204
#define ADF_VINTMSK_OFFSET 0x208
@@ -31,22 +30,16 @@ struct adf_vf_stop_data {
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
- ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x0);
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
}
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- void __iomem *pmisc_bar_addr =
- pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
- ADF_CSR_WR(pmisc_bar_addr, ADF_VINTMSK_OFFSET, 0x2);
+ ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
}
EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
@@ -85,78 +78,37 @@ static void adf_dev_stop_async(struct work_struct *work)
kfree(stop_data);
}
-static void adf_pf2vf_bh_handler(void *data)
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
{
- struct adf_accel_dev *accel_dev = data;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_bar_addr = pmisc->virt_addr;
- u32 msg;
-
- /* Read the message from PF */
- msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
- if (!(msg & ADF_PF2VF_INT)) {
- dev_info(&GET_DEV(accel_dev),
- "Spurious PF2VF interrupt, msg %X. Ignored\n", msg);
- goto out;
- }
+ struct adf_vf_stop_data *stop_data;
- if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
- /* Ignore legacy non-system (non-kernel) PF2VF messages */
- goto err;
-
- switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
- case ADF_PF2VF_MSGTYPE_RESTARTING: {
- struct adf_vf_stop_data *stop_data;
-
- dev_dbg(&GET_DEV(accel_dev),
- "Restarting msg received from PF 0x%x\n", msg);
-
- clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
- stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
- if (!stop_data) {
- dev_err(&GET_DEV(accel_dev),
- "Couldn't schedule stop for vf_%d\n",
- accel_dev->accel_id);
- return;
- }
- stop_data->accel_dev = accel_dev;
- INIT_WORK(&stop_data->work, adf_dev_stop_async);
- queue_work(adf_vf_stop_wq, &stop_data->work);
- /* To ack, clear the PF2VFINT bit */
- msg &= ~ADF_PF2VF_INT;
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
- return;
- }
- case ADF_PF2VF_MSGTYPE_VERSION_RESP:
- dev_dbg(&GET_DEV(accel_dev),
- "Version resp received from PF 0x%x\n", msg);
- accel_dev->vf.pf_version =
- (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
- ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
- accel_dev->vf.compatible =
- (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
- ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
- complete(&accel_dev->vf.iov_msg_completion);
- break;
- default:
- goto err;
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return -ENOMEM;
}
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
- /* To ack, clear the PF2VFINT bit */
- msg &= ~ADF_PF2VF_INT;
- ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+ return 0;
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+ struct adf_accel_dev *accel_dev = data;
+ bool ret;
+
+ ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
+ if (ret)
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
-out:
- /* Re-enable PF2VF interrupts */
- adf_enable_pf2vf_interrupts(accel_dev);
return;
-err:
- dev_err(&GET_DEV(accel_dev),
- "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
- msg);
+
}
static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index f05ad17fbdd6..afe59a7684ac 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -14,7 +14,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_COUNTERS_GET = 5,
ICP_QAT_FW_LOOPBACK = 6,
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
- ICP_QAT_FW_HEARTBEAT_GET = 8
+ ICP_QAT_FW_HEARTBEAT_GET = 8,
+ ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -52,6 +53,7 @@ struct icp_qat_fw_init_admin_resp {
__u16 version_minor_num;
__u16 version_major_num;
};
+ __u32 extended_features;
};
__u64 opaque_data;
union {
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index e39e8a2d51a7..433304cad2ed 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -91,7 +91,18 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
- /* Bits 10-25 are currently reserved */
+ /* Bits 10-11 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
+ ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
+ /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+ /* Bits 18-21 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
};
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index ece6776fbd53..7234c4940fae 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -8,6 +8,7 @@
#include "adf_transport_access_macros.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
+#include "adf_gen2_hw_data.h"
#include "qat_crypto.h"
#include "icp_qat_fw.h"
@@ -105,6 +106,30 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
/**
+ * qat_crypto_vf_dev_config()
+ * create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create
+ * asym, sym or, crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
+{
+ u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
+
+ if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
+ dev_err(&GET_DEV(accel_dev),
+ "Unsupported ring/service mapping present on PF");
+ return -EFAULT;
+ }
+
+ return qat_crypto_dev_config(accel_dev);
+}
+
+/**
* qat_crypto_dev_config() - create dev config required to create crypto inst.
*
* @accel_dev: Pointer to acceleration device.
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 12ca6b8764aa..4bfd8f3566f7 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -684,8 +684,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *misc_bar =
- &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
unsigned int max_en_ae_id = 0;
struct adf_bar *sram_bar;
unsigned int csr_val = 0;
@@ -715,18 +714,12 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
handle->chip_info->fcu_loaded_ae_pos = 0;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET_4XXX);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET_4XXX);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET_4XXX);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
- + LOCAL_TO_XFER_REG_OFFSET);
+ + LOCAL_TO_XFER_REG_OFFSET);
break;
case PCI_DEVICE_ID_INTEL_QAT_C62X:
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
@@ -749,15 +742,9 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
@@ -782,15 +769,9 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->fcu_dram_addr_lo = 0;
handle->chip_info->fcu_loaded_ae_csr = 0;
handle->chip_info->fcu_loaded_ae_pos = 0;
- handle->hal_cap_g_ctl_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_CAP_OFFSET);
- handle->hal_cap_ae_xfer_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_AE_OFFSET);
- handle->hal_ep_csr_addr_v =
- (void __iomem *)((uintptr_t)misc_bar->virt_addr +
- ICP_QAT_EP_OFFSET);
+ handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+ handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+ handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
handle->hal_cap_ae_local_csr_addr_v =
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+ LOCAL_TO_XFER_REG_OFFSET);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 8e2e1554dcf6..09599fe4d2f3 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
@@ -69,6 +69,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
return capabilities;
}
@@ -114,14 +116,19 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
{
- u32 errsou5, errmsk5, vf_int_mask;
+ u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask;
- vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3);
- /* Get the interrupt sources triggered by VFs, but to avoid duplicates
- * in the work queue, clear vf_int_mask_sets bits that are already
- * masked in ERRMSK register.
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
*/
+ errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3);
+
+ /* Do the same for ERRSOU5 */
errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
@@ -133,7 +140,11 @@ static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
- adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
/* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
if (vf_mask >> 16) {
@@ -147,7 +158,11 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
if (vf_mask >> 16) {
@@ -176,6 +191,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_gen2_enable_error_correction;
@@ -201,14 +217,12 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_sbr;
- hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
- hw_data->get_vf2pf_sources = get_vf2pf_sources;
- hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
- hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
+ adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources;
+ hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 0af34dd8708a..aa17272a1507 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -25,6 +25,8 @@
#define ADF_DH895XCC_SMIA1_MASK 0x1
/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask) (((vf_mask) & 0xFFFF) << 9)
#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 7c6ed6bc8abf..31c14d7e1c11 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2020 Intel Corporation */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
-#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
#include "adf_dh895xccvf_hw_data.h"
static struct adf_hw_device_class dh895xcciov_class = {
@@ -47,11 +48,6 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
return DEV_SKU_VF;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_DH895XCCIOV_PF2VF_OFFSET;
-}
-
static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -71,6 +67,7 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
@@ -86,13 +83,11 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
- hw_data->enable_pfvf_comms = adf_enable_vf2pf_comms;
- hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
+ adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 306ebb71a408..6973fa967bc8 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -12,7 +12,6 @@
#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
#define ADF_DH895XCCIOV_ETR_BAR 0
#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
-#define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 99d90f3ea2b7..18756b2e1c91 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -171,11 +171,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
/* Completion for VF2PF request/response message exchange */
- init_completion(&accel_dev->vf.iov_msg_completion);
-
- ret = qat_crypto_dev_config(accel_dev);
- if (ret)
- goto out_err_free_reg;
+ init_completion(&accel_dev->vf.msg_received);
ret = adf_dev_init(accel_dev);
if (ret)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 290e2446a2f3..97a530171f07 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -802,8 +802,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
ret = crypto_register_aead(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 8e6fcf2c21cc..59159f5e64e5 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -498,8 +498,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
ret = crypto_register_ahash(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 8ff10928f581..3d27cd5210ef 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -484,8 +484,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
ret = crypto_register_skcipher(alg);
if (ret) {
- kfree(tmpl);
dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
+ kfree(tmpl);
return ret;
}
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index bcbc38dc6ae8..51b58e57153f 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -8,6 +8,7 @@
* Vitaly Andrianov
* Tero Kristo
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -646,8 +647,8 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
cmdl[upd_info->enc_offset.index] &=
~SA_CMDL_SOP_BYPASS_LEN_MASK;
cmdl[upd_info->enc_offset.index] |=
- ((u32)req->enc_offset <<
- __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
+ req->enc_offset);
if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
__be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
@@ -666,8 +667,8 @@ static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
cmdl[upd_info->auth_offset.index] &=
~SA_CMDL_SOP_BYPASS_LEN_MASK;
cmdl[upd_info->auth_offset.index] |=
- ((u32)req->auth_offset <<
- __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
+ req->auth_offset);
if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
req->auth_iv,
@@ -689,16 +690,16 @@ void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
u8 hash_size, u32 *swinfo)
{
swinfo[0] = sc_id;
- swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
+ swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
if (likely(cmdl_present))
- swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
- __ffs(SA_SW0_CMDL_INFO_MASK));
- swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
+ swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
+ cmdl_offset | SA_SW0_CMDL_PRESENT);
+ swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
- swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
+ swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
}
/* Dump the security context */
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 75867c0b0017..be1bf39a317d 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -279,7 +279,7 @@ static struct shash_alg algs[] = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
- .cra_driver_name = DRIVER_NAME,
+ .cra_driver_name = "stm32-crc32-crc32",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
@@ -301,7 +301,7 @@ static struct shash_alg algs[] = {
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32c",
- .cra_driver_name = DRIVER_NAME,
+ .cra_driver_name = "stm32-crc32-crc32c",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 7389a0536ff0..59ef541123ae 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -37,7 +37,6 @@
/* Mode mask = bits [15..0] */
#define FLG_MODE_MASK GENMASK(15, 0)
/* Bit [31..16] status */
-#define FLG_CCM_PADDED_WA BIT(16)
/* Registers */
#define CRYP_CR 0x00000000
@@ -105,8 +104,6 @@
/* Misc */
#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
#define GCM_CTR_INIT 2
-#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
-#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
@@ -144,26 +141,16 @@ struct stm32_cryp {
size_t authsize;
size_t hw_blocksize;
- size_t total_in;
- size_t total_in_save;
- size_t total_out;
- size_t total_out_save;
+ size_t payload_in;
+ size_t header_in;
+ size_t payload_out;
- struct scatterlist *in_sg;
struct scatterlist *out_sg;
- struct scatterlist *out_sg_save;
-
- struct scatterlist in_sgl;
- struct scatterlist out_sgl;
- bool sgs_copied;
-
- int in_sg_len;
- int out_sg_len;
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- u32 last_ctr[4];
+ __be32 last_ctr[4];
u32 gcm_ctr;
};
@@ -245,6 +232,11 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
!(status & SR_BUSY), 10, 100000);
}
+static inline void stm32_cryp_enable(struct stm32_cryp *cryp)
+{
+ writel_relaxed(readl_relaxed(cryp->regs + CRYP_CR) | CR_CRYPEN, cryp->regs + CRYP_CR);
+}
+
static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
{
u32 status;
@@ -262,6 +254,7 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
}
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err);
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
@@ -283,103 +276,6 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
return cryp;
}
-static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
- size_t align)
-{
- int len = 0;
-
- if (!total)
- return 0;
-
- if (!IS_ALIGNED(total, align))
- return -EINVAL;
-
- while (sg) {
- if (!IS_ALIGNED(sg->offset, sizeof(u32)))
- return -EINVAL;
-
- if (!IS_ALIGNED(sg->length, align))
- return -EINVAL;
-
- len += sg->length;
- sg = sg_next(sg);
- }
-
- if (len != total)
- return -EINVAL;
-
- return 0;
-}
-
-static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
-{
- int ret;
-
- ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
- cryp->hw_blocksize);
- if (ret)
- return ret;
-
- ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
- cryp->hw_blocksize);
-
- return ret;
-}
-
-static void sg_copy_buf(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out)
-{
- struct scatter_walk walk;
-
- if (!nbytes)
- return;
-
- scatterwalk_start(&walk, sg);
- scatterwalk_advance(&walk, start);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
-}
-
-static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
-{
- void *buf_in, *buf_out;
- int pages, total_in, total_out;
-
- if (!stm32_cryp_check_io_aligned(cryp)) {
- cryp->sgs_copied = 0;
- return 0;
- }
-
- total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
- pages = total_in ? get_order(total_in) : 1;
- buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
-
- total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
- pages = total_out ? get_order(total_out) : 1;
- buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
-
- if (!buf_in || !buf_out) {
- dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
- cryp->sgs_copied = 0;
- return -EFAULT;
- }
-
- sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
-
- sg_init_one(&cryp->in_sgl, buf_in, total_in);
- cryp->in_sg = &cryp->in_sgl;
- cryp->in_sg_len = 1;
-
- sg_init_one(&cryp->out_sgl, buf_out, total_out);
- cryp->out_sg_save = cryp->out_sg;
- cryp->out_sg = &cryp->out_sgl;
- cryp->out_sg_len = 1;
-
- cryp->sgs_copied = 1;
-
- return 0;
-}
-
static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv)
{
if (!iv)
@@ -481,16 +377,99 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
/* Wait for end of processing */
ret = stm32_cryp_wait_enable(cryp);
- if (ret)
+ if (ret) {
dev_err(cryp->dev, "Timeout (gcm init)\n");
+ return ret;
+ }
- return ret;
+ /* Prepare next phase */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ }
+
+ return 0;
+}
+
+static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp)
+{
+ u32 cfg;
+ int err;
+
+ /* Check if whole header written */
+ if (!cryp->header_in) {
+ /* Wait for completion */
+ err = stm32_cryp_wait_busy(cryp);
+ if (err) {
+ dev_err(cryp->dev, "Timeout (gcm/ccm header)\n");
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp, err);
+ return;
+ }
+
+ if (stm32_cryp_get_input_text_len(cryp)) {
+ /* Phase 3 : payload */
+ cfg = stm32_cryp_read(cryp, CRYP_CR);
+ cfg &= ~CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ cfg &= ~CR_PH_MASK;
+ cfg |= CR_PH_PAYLOAD | CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ /*
+ * Phase 4 : tag.
+ * Nothing to read, nothing to write, caller have to
+ * end request
+ */
+ }
+ }
+}
+
+static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
+{
+ unsigned int i;
+ size_t written;
+ size_t len;
+ u32 alen = cryp->areq->assoclen;
+ u32 block[AES_BLOCK_32] = {0};
+ u8 *b8 = (u8 *)block;
+
+ if (alen <= 65280) {
+ /* Write first u32 of B1 */
+ b8[0] = (alen >> 8) & 0xFF;
+ b8[1] = alen & 0xFF;
+ len = 2;
+ } else {
+ /* Build the two first u32 of B1 */
+ b8[0] = 0xFF;
+ b8[1] = 0xFE;
+ b8[2] = (alen & 0xFF000000) >> 24;
+ b8[3] = (alen & 0x00FF0000) >> 16;
+ b8[4] = (alen & 0x0000FF00) >> 8;
+ b8[5] = alen & 0x000000FF;
+ len = 6;
+ }
+
+ written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
+
+ scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
+
+ cryp->header_in -= written;
+
+ stm32_crypt_gcmccm_end_header(cryp);
}
static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
- u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32];
+ u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32;
__be32 *bd;
u32 *d;
unsigned int i, textlen;
@@ -531,10 +510,24 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
/* Wait for end of processing */
ret = stm32_cryp_wait_enable(cryp);
- if (ret)
+ if (ret) {
dev_err(cryp->dev, "Timeout (ccm init)\n");
+ return ret;
+ }
- return ret;
+ /* Prepare next phase */
+ if (cryp->areq->assoclen) {
+ cfg |= CR_PH_HEADER | CR_CRYPEN;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* Write first (special) block (may move to next phase [payload]) */
+ stm32_cryp_write_ccm_first_header(cryp);
+ } else if (stm32_cryp_get_input_text_len(cryp)) {
+ cfg |= CR_PH_PAYLOAD;
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ }
+
+ return 0;
}
static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
@@ -542,14 +535,11 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
- pm_runtime_resume_and_get(cryp->dev);
+ pm_runtime_get_sync(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- /* Set key */
- stm32_cryp_hw_write_key(cryp);
-
/* Set configuration */
cfg = CR_DATA8 | CR_FFLUSH;
@@ -575,23 +565,36 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
/* AES ECB/CBC decrypt: run key preparation first */
if (is_decrypt(cryp) &&
((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
- stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+ /* Configure in key preparation mode */
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP);
+ /* Set key only after full configuration done */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Start prepare key */
+ stm32_cryp_enable(cryp);
/* Wait for end of processing */
ret = stm32_cryp_wait_busy(cryp);
if (ret) {
dev_err(cryp->dev, "Timeout (key preparation)\n");
return ret;
}
- }
- cfg |= hw_mode;
+ cfg |= hw_mode | CR_DEC_NOT_ENC;
- if (is_decrypt(cryp))
- cfg |= CR_DEC_NOT_ENC;
+ /* Apply updated config (Decrypt + algo) and flush */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+ } else {
+ cfg |= hw_mode;
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
- /* Apply config and flush (valid when CRYPEN = 0) */
- stm32_cryp_write(cryp, CRYP_CR, cfg);
+ /* Apply config and flush */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ /* Set key only after configuration done */
+ stm32_cryp_hw_write_key(cryp);
+ }
switch (hw_mode) {
case CR_AES_GCM:
@@ -605,16 +608,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
if (ret)
return ret;
- /* Phase 2 : header (authenticated data) */
- if (cryp->areq->assoclen) {
- cfg |= CR_PH_HEADER;
- } else if (stm32_cryp_get_input_text_len(cryp)) {
- cfg |= CR_PH_PAYLOAD;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- cfg |= CR_PH_INIT;
- }
-
break;
case CR_DES_CBC:
@@ -629,11 +622,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
}
/* Enable now */
- cfg |= CR_CRYPEN;
-
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cryp->flags &= ~FLG_CCM_PADDED_WA;
+ stm32_cryp_enable(cryp);
return 0;
}
@@ -644,28 +633,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
/* Phase 4 : output tag */
err = stm32_cryp_read_auth_tag(cryp);
- if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- if (cryp->sgs_copied) {
- void *buf_in, *buf_out;
- int pages, len;
-
- buf_in = sg_virt(&cryp->in_sgl);
- buf_out = sg_virt(&cryp->out_sgl);
-
- sg_copy_buf(buf_out, cryp->out_sg_save, 0,
- cryp->total_out_save, 1);
-
- len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
- pages = len ? get_order(len) : 1;
- free_pages((unsigned long)buf_in, pages);
-
- len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
- pages = len ? get_order(len) : 1;
- free_pages((unsigned long)buf_out, pages);
- }
-
pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
@@ -674,8 +644,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
else
crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
-
- memset(cryp->ctx->key, 0, cryp->ctx->keylen);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -801,7 +769,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
@@ -825,31 +806,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
}
static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
}
static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
}
@@ -863,53 +874,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
}
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
{
+ int err;
+
+ err = crypto_ccm_check_iv(req->iv);
+ if (err)
+ return err;
+
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
}
static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
{
+ int err;
+
+ err = crypto_ccm_check_iv(req->iv);
+ if (err)
+ return err;
+
return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
}
static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
}
static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
}
static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
}
static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
}
static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
}
static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
{
+ if (req->cryptlen % DES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
@@ -919,6 +999,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct stm32_cryp_ctx *ctx;
struct stm32_cryp *cryp;
struct stm32_cryp_reqctx *rctx;
+ struct scatterlist *in_sg;
int ret;
if (!req && !areq)
@@ -944,76 +1025,55 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
if (req) {
cryp->req = req;
cryp->areq = NULL;
- cryp->total_in = req->cryptlen;
- cryp->total_out = cryp->total_in;
+ cryp->header_in = 0;
+ cryp->payload_in = req->cryptlen;
+ cryp->payload_out = req->cryptlen;
+ cryp->authsize = 0;
} else {
/*
* Length of input and output data:
* Encryption case:
- * INPUT = AssocData || PlainText
+ * INPUT = AssocData || PlainText
* <- assoclen -> <- cryptlen ->
- * <------- total_in ----------->
*
- * OUTPUT = AssocData || CipherText || AuthTag
- * <- assoclen -> <- cryptlen -> <- authsize ->
- * <---------------- total_out ----------------->
+ * OUTPUT = AssocData || CipherText || AuthTag
+ * <- assoclen -> <-- cryptlen --> <- authsize ->
*
* Decryption case:
- * INPUT = AssocData || CipherText || AuthTag
- * <- assoclen -> <--------- cryptlen --------->
- * <- authsize ->
- * <---------------- total_in ------------------>
+ * INPUT = AssocData || CipherTex || AuthTag
+ * <- assoclen ---> <---------- cryptlen ---------->
*
- * OUTPUT = AssocData || PlainText
- * <- assoclen -> <- crypten - authsize ->
- * <---------- total_out ----------------->
+ * OUTPUT = AssocData || PlainText
+ * <- assoclen -> <- cryptlen - authsize ->
*/
cryp->areq = areq;
cryp->req = NULL;
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
- cryp->total_in = areq->assoclen + areq->cryptlen;
- if (is_encrypt(cryp))
- /* Append auth tag to output */
- cryp->total_out = cryp->total_in + cryp->authsize;
- else
- /* No auth tag in output */
- cryp->total_out = cryp->total_in - cryp->authsize;
+ if (is_encrypt(cryp)) {
+ cryp->payload_in = areq->cryptlen;
+ cryp->header_in = areq->assoclen;
+ cryp->payload_out = areq->cryptlen;
+ } else {
+ cryp->payload_in = areq->cryptlen - cryp->authsize;
+ cryp->header_in = areq->assoclen;
+ cryp->payload_out = cryp->payload_in;
+ }
}
- cryp->total_in_save = cryp->total_in;
- cryp->total_out_save = cryp->total_out;
+ in_sg = req ? req->src : areq->src;
+ scatterwalk_start(&cryp->in_walk, in_sg);
- cryp->in_sg = req ? req->src : areq->src;
cryp->out_sg = req ? req->dst : areq->dst;
- cryp->out_sg_save = cryp->out_sg;
-
- cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
- if (cryp->in_sg_len < 0) {
- dev_err(cryp->dev, "Cannot get in_sg_len\n");
- ret = cryp->in_sg_len;
- return ret;
- }
-
- cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
- if (cryp->out_sg_len < 0) {
- dev_err(cryp->dev, "Cannot get out_sg_len\n");
- ret = cryp->out_sg_len;
- return ret;
- }
-
- ret = stm32_cryp_copy_sgs(cryp);
- if (ret)
- return ret;
-
- scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
if (is_gcm(cryp) || is_ccm(cryp)) {
/* In output, jump after assoc data */
- scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
- cryp->total_out -= cryp->areq->assoclen;
+ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2);
}
+ if (is_ctr(cryp))
+ memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr));
+
ret = stm32_cryp_hw_init(cryp);
return ret;
}
@@ -1061,8 +1121,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
if (!cryp)
return -ENODEV;
- if (unlikely(!cryp->areq->assoclen &&
- !stm32_cryp_get_input_text_len(cryp))) {
+ if (unlikely(!cryp->payload_in && !cryp->header_in)) {
/* No input data to process: get tag and finish */
stm32_cryp_finish_req(cryp, 0);
return 0;
@@ -1071,43 +1130,10 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
return stm32_cryp_cpu_start(cryp);
}
-static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
- unsigned int n)
-{
- scatterwalk_advance(&cryp->out_walk, n);
-
- if (unlikely(cryp->out_sg->length == _walked_out)) {
- cryp->out_sg = sg_next(cryp->out_sg);
- if (cryp->out_sg) {
- scatterwalk_start(&cryp->out_walk, cryp->out_sg);
- return (sg_virt(cryp->out_sg) + _walked_out);
- }
- }
-
- return (u32 *)((u8 *)dst + n);
-}
-
-static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
- unsigned int n)
-{
- scatterwalk_advance(&cryp->in_walk, n);
-
- if (unlikely(cryp->in_sg->length == _walked_in)) {
- cryp->in_sg = sg_next(cryp->in_sg);
- if (cryp->in_sg) {
- scatterwalk_start(&cryp->in_walk, cryp->in_sg);
- return (sg_virt(cryp->in_sg) + _walked_in);
- }
- }
-
- return (u32 *)((u8 *)src + n);
-}
-
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
{
- u32 cfg, size_bit, *dst, d32;
- u8 *d8;
- unsigned int i, j;
+ u32 cfg, size_bit;
+ unsigned int i;
int ret = 0;
/* Update Config */
@@ -1130,7 +1156,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
- cryp->areq->cryptlen - AES_BLOCK_SIZE;
+ cryp->areq->cryptlen - cryp->authsize;
size_bit *= 8;
if (cryp->caps->swap_final)
size_bit = (__force u32)cpu_to_be32(size_bit);
@@ -1139,11 +1165,9 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
} else {
/* CCM: write CTR0 */
- u8 iv[AES_BLOCK_SIZE];
- u32 *iv32 = (u32 *)iv;
- __be32 *biv;
-
- biv = (void *)iv;
+ u32 iv32[AES_BLOCK_32];
+ u8 *iv = (u8 *)iv32;
+ __be32 *biv = (__be32 *)iv32;
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
@@ -1165,39 +1189,18 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
}
if (is_encrypt(cryp)) {
+ u32 out_tag[AES_BLOCK_32];
+
/* Get and write tag */
- dst = sg_virt(cryp->out_sg) + _walked_out;
+ for (i = 0; i < AES_BLOCK_32; i++)
+ out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- for (i = 0; i < AES_BLOCK_32; i++) {
- if (cryp->total_out >= sizeof(u32)) {
- /* Read a full u32 */
- *dst = stm32_cryp_read(cryp, CRYP_DOUT);
-
- dst = stm32_cryp_next_out(cryp, dst,
- sizeof(u32));
- cryp->total_out -= sizeof(u32);
- } else if (!cryp->total_out) {
- /* Empty fifo out (data from input padding) */
- stm32_cryp_read(cryp, CRYP_DOUT);
- } else {
- /* Read less than an u32 */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- d8 = (u8 *)&d32;
-
- for (j = 0; j < cryp->total_out; j++) {
- *((u8 *)dst) = *(d8++);
- dst = stm32_cryp_next_out(cryp, dst, 1);
- }
- cryp->total_out = 0;
- }
- }
+ scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
- scatterwalk_map_and_copy(in_tag, cryp->in_sg,
- cryp->total_in_save - cryp->authsize,
- cryp->authsize, 0);
+ scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
for (i = 0; i < AES_BLOCK_32; i++)
out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
@@ -1217,115 +1220,59 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
{
u32 cr;
- if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
- cryp->last_ctr[3] = 0;
- cryp->last_ctr[2]++;
- if (!cryp->last_ctr[2]) {
- cryp->last_ctr[1]++;
- if (!cryp->last_ctr[1])
- cryp->last_ctr[0]++;
- }
+ if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) {
+ /*
+ * In this case, we need to increment manually the ctr counter,
+ * as HW doesn't handle the U32 carry.
+ */
+ crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr));
cr = stm32_cryp_read(cryp, CRYP_CR);
stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
- stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->last_ctr);
+ stm32_cryp_hw_write_iv(cryp, cryp->last_ctr);
stm32_cryp_write(cryp, CRYP_CR, cr);
}
- cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
- cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
- cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
- cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+ /* The IV registers are BE */
+ cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+ cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+ cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+ cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
}
-static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
{
- unsigned int i, j;
- u32 d32, *dst;
- u8 *d8;
- size_t tag_size;
-
- /* Do no read tag now (if any) */
- if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
- tag_size = cryp->authsize;
- else
- tag_size = 0;
-
- dst = sg_virt(cryp->out_sg) + _walked_out;
+ unsigned int i;
+ u32 block[AES_BLOCK_32];
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
- /* Read a full u32 */
- *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
- cryp->total_out -= sizeof(u32);
- } else if (cryp->total_out == tag_size) {
- /* Empty fifo out (data from input padding) */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- } else {
- /* Read less than an u32 */
- d32 = stm32_cryp_read(cryp, CRYP_DOUT);
- d8 = (u8 *)&d32;
-
- for (j = 0; j < cryp->total_out - tag_size; j++) {
- *((u8 *)dst) = *(d8++);
- dst = stm32_cryp_next_out(cryp, dst, 1);
- }
- cryp->total_out = tag_size;
- }
- }
-
- return !(cryp->total_out - tag_size) || !cryp->total_in;
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out);
}
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
- unsigned int i, j;
- u32 *src;
- u8 d8[4];
- size_t tag_size;
-
- /* Do no write tag (if any) */
- if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
- tag_size = cryp->authsize;
- else
- tag_size = 0;
-
- src = sg_virt(cryp->in_sg) + _walked_in;
+ unsigned int i;
+ u32 block[AES_BLOCK_32] = {0};
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
- if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
- /* Write a full u32 */
- stm32_cryp_write(cryp, CRYP_DIN, *src);
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_in), 0);
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
- src = stm32_cryp_next_in(cryp, src, sizeof(u32));
- cryp->total_in -= sizeof(u32);
- } else if (cryp->total_in == tag_size) {
- /* Write padding data */
- stm32_cryp_write(cryp, CRYP_DIN, 0);
- } else {
- /* Write less than an u32 */
- memset(d8, 0, sizeof(u32));
- for (j = 0; j < cryp->total_in - tag_size; j++) {
- d8[j] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- }
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- cryp->total_in = tag_size;
- }
- }
+ cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
{
int err;
- u32 cfg, tmp[AES_BLOCK_32];
- size_t total_in_ori = cryp->total_in;
- struct scatterlist *out_sg_ori = cryp->out_sg;
+ u32 cfg, block[AES_BLOCK_32] = {0};
unsigned int i;
/* 'Special workaround' procedure described in the datasheet */
@@ -1350,18 +1297,25 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
- cryp->total_in = total_in_ori;
+ /* wait end of process */
err = stm32_cryp_wait_output(cryp);
if (err) {
- dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ dev_err(cryp->dev, "Timeout (write gcm last data)\n");
return stm32_cryp_finish_req(cryp, err);
}
/* c) get and store encrypted data */
- stm32_cryp_irq_read_data(cryp);
- scatterwalk_map_and_copy(tmp, out_sg_ori,
- cryp->total_in_save - total_in_ori,
- total_in_ori, 0);
+ /*
+ * Same code as stm32_cryp_irq_read_data(), but we want to store
+ * block value
+ */
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out);
/* d) change mode back to AES GCM */
cfg &= ~CR_ALGO_MASK;
@@ -1374,19 +1328,13 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
/* f) write padded data */
- for (i = 0; i < AES_BLOCK_32; i++) {
- if (cryp->total_in)
- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
- else
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
- }
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
/* g) Empty fifo out */
err = stm32_cryp_wait_output(cryp);
if (err) {
- dev_err(cryp->dev, "Timeout (write gcm header)\n");
+ dev_err(cryp->dev, "Timeout (write gcm padded data)\n");
return stm32_cryp_finish_req(cryp, err);
}
@@ -1399,16 +1347,14 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
{
- u32 cfg, payload_bytes;
+ u32 cfg;
/* disable ip, set NPBLB and reneable ip */
cfg = stm32_cryp_read(cryp, CRYP_CR);
cfg &= ~CR_CRYPEN;
stm32_cryp_write(cryp, CRYP_CR, cfg);
- payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
- cryp->total_in;
- cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
+ cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT;
cfg |= CR_CRYPEN;
stm32_cryp_write(cryp, CRYP_CR, cfg);
}
@@ -1417,13 +1363,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
{
int err = 0;
u32 cfg, iv1tmp;
- u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
- size_t last_total_out, total_in_ori = cryp->total_in;
- struct scatterlist *out_sg_ori = cryp->out_sg;
+ u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32];
+ u32 block[AES_BLOCK_32] = {0};
unsigned int i;
/* 'Special workaround' procedure described in the datasheet */
- cryp->flags |= FLG_CCM_PADDED_WA;
/* a) disable ip */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -1453,7 +1397,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
/* b) pad and write the last block */
stm32_cryp_irq_write_block(cryp);
- cryp->total_in = total_in_ori;
+ /* wait end of process */
err = stm32_cryp_wait_output(cryp);
if (err) {
dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
@@ -1461,13 +1405,16 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
}
/* c) get and store decrypted data */
- last_total_out = cryp->total_out;
- stm32_cryp_irq_read_data(cryp);
+ /*
+ * Same code as stm32_cryp_irq_read_data(), but we want to store
+ * block value
+ */
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
+ block[i] = stm32_cryp_read(cryp, CRYP_DOUT);
- memset(tmp, 0, sizeof(tmp));
- scatterwalk_map_and_copy(tmp, out_sg_ori,
- cryp->total_out_save - last_total_out,
- last_total_out, 0);
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
+ cryp->payload_out), 1);
+ cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out);
/* d) Load again CRYP_CSGCMCCMxR */
for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
@@ -1484,10 +1431,10 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, CRYP_CR, cfg);
/* g) XOR and write padded data */
- for (i = 0; i < ARRAY_SIZE(tmp); i++) {
- tmp[i] ^= cstmp1[i];
- tmp[i] ^= cstmp2[i];
- stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
+ for (i = 0; i < ARRAY_SIZE(block); i++) {
+ block[i] ^= cstmp1[i];
+ block[i] ^= cstmp2[i];
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
}
/* h) wait for completion */
@@ -1501,30 +1448,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
{
- if (unlikely(!cryp->total_in)) {
+ if (unlikely(!cryp->payload_in)) {
dev_warn(cryp->dev, "No more data to process\n");
return;
}
- if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
+ if (unlikely(cryp->payload_in < AES_BLOCK_SIZE &&
(stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
is_encrypt(cryp))) {
/* Padding for AES GCM encryption */
- if (cryp->caps->padding_wa)
+ if (cryp->caps->padding_wa) {
/* Special case 1 */
- return stm32_cryp_irq_write_gcm_padded_data(cryp);
+ stm32_cryp_irq_write_gcm_padded_data(cryp);
+ return;
+ }
/* Setting padding bytes (NBBLB) */
stm32_cryp_irq_set_npblb(cryp);
}
- if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
+ if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) &&
(stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
is_decrypt(cryp))) {
/* Padding for AES CCM decryption */
- if (cryp->caps->padding_wa)
+ if (cryp->caps->padding_wa) {
/* Special case 2 */
- return stm32_cryp_irq_write_ccm_padded_data(cryp);
+ stm32_cryp_irq_write_ccm_padded_data(cryp);
+ return;
+ }
/* Setting padding bytes (NBBLB) */
stm32_cryp_irq_set_npblb(cryp);
@@ -1536,192 +1487,60 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
stm32_cryp_irq_write_block(cryp);
}
-static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
+static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
{
- int err;
- unsigned int i, j;
- u32 cfg, *src;
-
- src = sg_virt(cryp->in_sg) + _walked_in;
-
- for (i = 0; i < AES_BLOCK_32; i++) {
- stm32_cryp_write(cryp, CRYP_DIN, *src);
-
- src = stm32_cryp_next_in(cryp, src, sizeof(u32));
- cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
-
- /* Check if whole header written */
- if ((cryp->total_in_save - cryp->total_in) ==
- cryp->areq->assoclen) {
- /* Write padding if needed */
- for (j = i + 1; j < AES_BLOCK_32; j++)
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- /* Wait for completion */
- err = stm32_cryp_wait_busy(cryp);
- if (err) {
- dev_err(cryp->dev, "Timeout (gcm header)\n");
- return stm32_cryp_finish_req(cryp, err);
- }
-
- if (stm32_cryp_get_input_text_len(cryp)) {
- /* Phase 3 : payload */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
- cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cfg &= ~CR_PH_MASK;
- cfg |= CR_PH_PAYLOAD;
- cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- /* Phase 4 : tag */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- }
-
- break;
- }
-
- if (!cryp->total_in)
- break;
- }
-}
+ unsigned int i;
+ u32 block[AES_BLOCK_32] = {0};
+ size_t written;
-static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
-{
- int err;
- unsigned int i = 0, j, k;
- u32 alen, cfg, *src;
- u8 d8[4];
-
- src = sg_virt(cryp->in_sg) + _walked_in;
- alen = cryp->areq->assoclen;
-
- if (!_walked_in) {
- if (cryp->areq->assoclen <= 65280) {
- /* Write first u32 of B1 */
- d8[0] = (alen >> 8) & 0xFF;
- d8[1] = alen & 0xFF;
- d8[2] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- d8[3] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
- } else {
- /* Build the two first u32 of B1 */
- d8[0] = 0xFF;
- d8[1] = 0xFE;
- d8[2] = alen & 0xFF000000;
- d8[3] = alen & 0x00FF0000;
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- d8[0] = alen & 0x0000FF00;
- d8[1] = alen & 0x000000FF;
- d8[2] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
- d8[3] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
- i++;
-
- cryp->total_in -= min_t(size_t, 2, cryp->total_in);
- }
- }
+ written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
- /* Write next u32 */
- for (; i < AES_BLOCK_32; i++) {
- /* Build an u32 */
- memset(d8, 0, sizeof(u32));
- for (k = 0; k < sizeof(u32); k++) {
- d8[k] = *((u8 *)src);
- src = stm32_cryp_next_in(cryp, src, 1);
-
- cryp->total_in -= min_t(size_t, 1, cryp->total_in);
- if ((cryp->total_in_save - cryp->total_in) == alen)
- break;
- }
+ scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
+ for (i = 0; i < AES_BLOCK_32; i++)
+ stm32_cryp_write(cryp, CRYP_DIN, block[i]);
- stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
-
- if ((cryp->total_in_save - cryp->total_in) == alen) {
- /* Write padding if needed */
- for (j = i + 1; j < AES_BLOCK_32; j++)
- stm32_cryp_write(cryp, CRYP_DIN, 0);
-
- /* Wait for completion */
- err = stm32_cryp_wait_busy(cryp);
- if (err) {
- dev_err(cryp->dev, "Timeout (ccm header)\n");
- return stm32_cryp_finish_req(cryp, err);
- }
-
- if (stm32_cryp_get_input_text_len(cryp)) {
- /* Phase 3 : payload */
- cfg = stm32_cryp_read(cryp, CRYP_CR);
- cfg &= ~CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
-
- cfg &= ~CR_PH_MASK;
- cfg |= CR_PH_PAYLOAD;
- cfg |= CR_CRYPEN;
- stm32_cryp_write(cryp, CRYP_CR, cfg);
- } else {
- /* Phase 4 : tag */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- }
+ cryp->header_in -= written;
- break;
- }
- }
+ stm32_crypt_gcmccm_end_header(cryp);
}
static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
{
struct stm32_cryp *cryp = arg;
u32 ph;
+ u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR);
if (cryp->irq_status & MISR_OUT)
/* Output FIFO IRQ: read data */
- if (unlikely(stm32_cryp_irq_read_data(cryp))) {
- /* All bytes processed, finish */
- stm32_cryp_write(cryp, CRYP_IMSCR, 0);
- stm32_cryp_finish_req(cryp, 0);
- return IRQ_HANDLED;
- }
+ stm32_cryp_irq_read_data(cryp);
if (cryp->irq_status & MISR_IN) {
- if (is_gcm(cryp)) {
- ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
- if (unlikely(ph == CR_PH_HEADER))
- /* Write Header */
- stm32_cryp_irq_write_gcm_header(cryp);
- else
- /* Input FIFO IRQ: write data */
- stm32_cryp_irq_write_data(cryp);
- cryp->gcm_ctr++;
- } else if (is_ccm(cryp)) {
+ if (is_gcm(cryp) || is_ccm(cryp)) {
ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
if (unlikely(ph == CR_PH_HEADER))
/* Write Header */
- stm32_cryp_irq_write_ccm_header(cryp);
+ stm32_cryp_irq_write_gcmccm_header(cryp);
else
/* Input FIFO IRQ: write data */
stm32_cryp_irq_write_data(cryp);
+ if (is_gcm(cryp))
+ cryp->gcm_ctr++;
} else {
/* Input FIFO IRQ: write data */
stm32_cryp_irq_write_data(cryp);
}
}
+ /* Mask useless interrupts */
+ if (!cryp->payload_in && !cryp->header_in)
+ it_mask &= ~IMSCR_IN;
+ if (!cryp->payload_out)
+ it_mask &= ~IMSCR_OUT;
+ stm32_cryp_write(cryp, CRYP_IMSCR, it_mask);
+
+ if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
+ stm32_cryp_finish_req(cryp, 0);
+
return IRQ_HANDLED;
}
@@ -1742,7 +1561,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1759,7 +1578,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1777,7 +1596,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1795,7 +1614,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1812,7 +1631,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1830,7 +1649,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1847,7 +1666,7 @@ static struct skcipher_alg crypto_algs[] = {
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0xf,
+ .base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
.init = stm32_cryp_init_tfm,
@@ -1877,7 +1696,7 @@ static struct aead_alg aead_algs[] = {
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
+ .cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
},
@@ -1897,7 +1716,7 @@ static struct aead_alg aead_algs[] = {
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .cra_alignmask = 0xf,
+ .cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
},
@@ -1955,7 +1774,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
cryp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cryp->clk)) {
- dev_err(dev, "Could not get clock\n");
+ dev_err_probe(dev, PTR_ERR(cryp->clk), "Could not get clock\n");
+
return PTR_ERR(cryp->clk);
}
@@ -1973,7 +1793,11 @@ static int stm32_cryp_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
rst = devm_reset_control_get(dev, NULL);
- if (!IS_ERR(rst)) {
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ if (ret == -EPROBE_DEFER)
+ goto err_rst;
+ } else {
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
@@ -2024,9 +1848,7 @@ err_engine1:
spin_lock(&cryp_list.lock);
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
-
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
+err_rst:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 389de9e3302d..d33006d43f76 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
- pm_runtime_resume_and_get(hdev->dev);
+ pm_runtime_get_sync(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index db5713d7c940..59e1557a620a 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -224,6 +224,7 @@ struct cryp_dma {
* @phybase: Pointer to physical memory location of the cryp device.
* @dev: Pointer to the devices dev structure.
* @clk: Pointer to the device's clock control.
+ * @irq: IRQ number
* @pwr_regulator: Pointer to the device's power control.
* @power_status: Current status of the power.
* @ctx_lock: Lock for current_ctx.
@@ -239,6 +240,7 @@ struct cryp_device_data {
phys_addr_t phybase;
struct device *dev;
struct clk *clk;
+ int irq;
struct regulator *pwr_regulator;
int power_status;
spinlock_t ctx_lock;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 30cdd5253929..97277b7150cb 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1257,7 +1257,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
{
int ret;
struct resource *res;
- struct resource *res_irq;
struct cryp_device_data *device_data;
struct cryp_protection_config prot = {
.privilege_access = CRYP_STATE_ENABLE
@@ -1341,15 +1340,13 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_power;
}
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq) {
- dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
- __func__);
- ret = -ENODEV;
+ device_data->irq = platform_get_irq(pdev, 0);
+ if (device_data->irq <= 0) {
+ ret = device_data->irq ? device_data->irq : -ENXIO;
goto out_power;
}
- ret = devm_request_irq(&pdev->dev, res_irq->start,
+ ret = devm_request_irq(&pdev->dev, device_data->irq,
cryp_interrupt_handler, 0, "cryp1", device_data);
if (ret) {
dev_err(dev, "[%s]: Unable to request IRQ", __func__);
@@ -1489,7 +1486,6 @@ static int ux500_cryp_suspend(struct device *dev)
int ret;
struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
- struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
dev_dbg(dev, "[%s]", __func__);
@@ -1501,11 +1497,7 @@ static int ux500_cryp_suspend(struct device *dev)
return -ENOMEM;
}
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
- else
- disable_irq(res_irq->start);
+ disable_irq(device_data->irq);
spin_lock(&device_data->ctx_lock);
if (!device_data->current_ctx)
@@ -1532,7 +1524,6 @@ static int ux500_cryp_resume(struct device *dev)
int ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
- struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
dev_dbg(dev, "[%s]", __func__);
@@ -1556,11 +1547,8 @@ static int ux500_cryp_resume(struct device *dev)
if (ret)
dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
- else {
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- enable_irq(res_irq->start);
- }
+ else
+ enable_irq(device_data->irq);
return ret;
}