summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/marvell
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 02:23:56 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 02:23:56 +0100
commit31caf8b2a847214be856f843e251fc2ed2cd1075 (patch)
tree245257387cfcae352fae27b1ca824daab55472ba /drivers/crypto/marvell
parentMerge tag 'tpmdd-next-v5.12-rc1-v2' of git://git.kernel.org/pub/scm/linux/ker... (diff)
parenthwrng: timeriomem - Use device-managed registration API (diff)
downloadlinux-31caf8b2a847214be856f843e251fc2ed2cd1075.tar.xz
linux-31caf8b2a847214be856f843e251fc2ed2cd1075.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Restrict crypto_cipher to internal API users only. Algorithms: - Add x86 aesni acceleration for cts. - Improve x86 aesni acceleration for xts. - Remove x86 acceleration of some uncommon algorithms. - Remove RIPE-MD, Tiger and Salsa20. - Remove tnepres. - Add ARM acceleration for BLAKE2s and BLAKE2b. Drivers: - Add Keem Bay OCS HCU driver. - Add Marvell OcteonTX2 CPT PF driver. - Remove PicoXcell driver. - Remove mediatek driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (154 commits) hwrng: timeriomem - Use device-managed registration API crypto: hisilicon/qm - fix printing format issue crypto: hisilicon/qm - do not reset hardware when CE happens crypto: hisilicon/qm - update irqflag crypto: hisilicon/qm - fix the value of 'QM_SQC_VFT_BASE_MASK_V2' crypto: hisilicon/qm - fix request missing error crypto: hisilicon/qm - removing driver after reset crypto: octeontx2 - fix -Wpointer-bool-conversion warning crypto: hisilicon/hpre - enable Elliptic curve cryptography crypto: hisilicon - PASID fixed on Kunpeng 930 crypto: hisilicon/qm - fix use of 'dma_map_single' crypto: hisilicon/hpre - tiny fix crypto: hisilicon/hpre - adapt the number of clusters crypto: cpt - remove casting dma_alloc_coherent crypto: keembay-ocs-aes - Fix 'q' assignment during CCM B0 generation crypto: xor - Fix typo of optimization hwrng: optee - Use device-managed registration API crypto: arm64/crc-t10dif - move NEON yield to C code crypto: arm64/aes-ce-mac - simplify NEON yield crypto: arm64/aes-neonbs - remove NEON yield calls ...
Diffstat (limited to 'drivers/crypto/marvell')
-rw-r--r--drivers/crypto/marvell/Kconfig15
-rw-r--r--drivers/crypto/marvell/Makefile1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c10
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h31
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c34
-rw-r--r--drivers/crypto/marvell/cesa/hash.c59
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c52
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h137
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h464
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c202
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h197
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c428
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h353
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h61
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c713
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c356
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c1415
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h162
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h29
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1758
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h178
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c410
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c167
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c541
25 files changed, 7746 insertions, 37 deletions
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 13063384f958..9125199f1702 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -35,3 +35,18 @@ config CRYPTO_DEV_OCTEONTX_CPT
To compile this driver as module, choose M here:
the modules will be called octeontx-cpt and octeontx-cptvf
+
+config CRYPTO_DEV_OCTEONTX2_CPT
+ tristate "Marvell OcteonTX2 CPT driver"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ depends on CRYPTO_LIB_AES
+ depends on NET_VENDOR_MARVELL
+ select OCTEONTX2_MBOX
+ select CRYPTO_DEV_MARVELL
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ help
+ This driver allows you to utilize the Marvell Cryptographic
+ Accelerator Unit(CPT) found in OcteonTX2 series of processors.
diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile
index 6c6a1519b0f1..39db6d9c0aaf 100644
--- a/drivers/crypto/marvell/Makefile
+++ b/drivers/crypto/marvell/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX_CPT) += octeontx/
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2/
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 06211858bf2e..f14aac532f53 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
engine->pool = of_gen_pool_get(cesa->dev->of_node,
"marvell,crypto-srams", idx);
if (engine->pool) {
- engine->sram = gen_pool_dma_alloc(engine->pool,
- cesa->sram_size,
- &engine->sram_dma);
- if (engine->sram)
+ engine->sram_pool = gen_pool_dma_alloc(engine->pool,
+ cesa->sram_size,
+ &engine->sram_dma);
+ if (engine->sram_pool)
return 0;
engine->pool = NULL;
@@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_engine *engine = &cesa->engines[idx];
if (engine->pool)
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
cesa->sram_size);
else
dma_unmap_resource(cesa->dev, engine->sram_dma,
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index fa56b45620c7..c1007f2ba79c 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -428,6 +428,7 @@ struct mv_cesa_dev {
* @id: engine id
* @regs: engine registers
* @sram: SRAM memory region
+ * @sram_pool: SRAM memory region from pool
* @sram_dma: DMA address of the SRAM memory region
* @lock: engine lock
* @req: current crypto request
@@ -448,7 +449,10 @@ struct mv_cesa_dev {
struct mv_cesa_engine {
int id;
void __iomem *regs;
- void __iomem *sram;
+ union {
+ void __iomem *sram;
+ void *sram_pool;
+ };
dma_addr_t sram_dma;
spinlock_t lock;
struct crypto_async_request *req;
@@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
struct mv_cesa_sg_dma_iter *sgiter,
gfp_t gfp_flags);
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram);
+
+static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ true);
+}
+
+static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl,
+ unsigned int nents,
+ unsigned int sram_off,
+ size_t buflen, off_t skip)
+{
+ return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
+ false);
+}
+
/* Algorithm definitions */
extern struct ahash_alg mv_md5_alg;
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index b4a6ff9dd6d5..b739d3b873dc 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
- len = sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- len, sreq->offset);
+ len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET, len,
+ sreq->offset);
sreq->size = len;
mv_cesa_set_crypt_op_len(&sreq->op, len);
/* FIXME: only update enc_len field */
if (!sreq->skip_ctx) {
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
+ else
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
sreq->skip_ctx = true;
- } else {
+ } else if (engine->pool)
+ memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
+ else
memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
- }
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
@@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
- len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
- engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- sreq->size, sreq->offset);
+ len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
+ CESA_SA_DATA_SRAM_OFFSET, sreq->size,
+ sreq->offset);
sreq->offset += len;
if (sreq->offset < req->cryptlen)
@@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
basereq = &creq->base;
memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
ivsize);
- } else {
+ } else if (engine->pool)
+ memcpy(skreq->iv,
+ engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ else
memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
- }
}
static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 8cf9fd518d86..c72b0672fc71 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
int i;
mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+ if (engine->pool)
+ memcpy(engine->sram_pool, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
+ else
+ memcpy_toio(engine->sram, &creq->op_tmpl,
+ sizeof(creq->op_tmpl));
if (!sreq->offset) {
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
@@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
engine->regs + CESA_IVDIG(i));
}
- if (creq->cache_ptr)
- memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, creq->cache_ptr);
+ if (creq->cache_ptr) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ else
+ memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, creq->cache_ptr);
+ }
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
@@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
}
if (len - creq->cache_ptr)
- sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET +
- creq->cache_ptr,
- len - creq->cache_ptr,
- sreq->offset);
+ sreq->offset += mv_cesa_sg_copy_to_sram(
+ engine, req->src, creq->src_nents,
+ CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
+ len - creq->cache_ptr, sreq->offset);
op = &creq->op_tmpl;
@@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
len &= CESA_HASH_BLOCK_SIZE_MSK;
new_cache_ptr = 64 - trailerlen;
- memcpy_fromio(creq->cache,
- engine->sram +
- CESA_SA_DATA_SRAM_OFFSET + len,
- new_cache_ptr);
+ if (engine->pool)
+ memcpy(creq->cache,
+ engine->sram_pool +
+ CESA_SA_DATA_SRAM_OFFSET + len,
+ new_cache_ptr);
+ else
+ memcpy_fromio(creq->cache,
+ engine->sram +
+ CESA_SA_DATA_SRAM_OFFSET +
+ len,
+ new_cache_ptr);
} else {
i = mv_cesa_ahash_pad_req(creq, creq->cache);
len += i;
- memcpy_toio(engine->sram + len +
- CESA_SA_DATA_SRAM_OFFSET,
- creq->cache, i);
+ if (engine->pool)
+ memcpy(engine->sram_pool + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
+ else
+ memcpy_toio(engine->sram + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
@@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
/* FIXME: only update enc_len field */
- memcpy_toio(engine->sram, op, sizeof(*op));
+ if (engine->pool)
+ memcpy(engine->sram_pool, op, sizeof(*op));
+ else
+ memcpy_toio(engine->sram, op, sizeof(*op));
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 5d9c48fb72b2..f0b5537038c2 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -177,7 +177,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
/*
* Save the last request in error to engine->req, so that the core
- * knows which request was fautly
+ * knows which request was faulty
*/
if (res) {
spin_lock_bh(&engine->lock);
@@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
return 0;
}
+
+size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
+ struct scatterlist *sgl, unsigned int nents,
+ unsigned int sram_off, size_t buflen, off_t skip,
+ bool to_sram)
+{
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ struct sg_mapping_iter miter;
+ unsigned int offset = 0;
+
+ if (to_sram)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ if (!sg_miter_skip(&miter, skip))
+ return 0;
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_sram) {
+ if (engine->pool)
+ memcpy(engine->sram_pool + sram_off + offset,
+ miter.addr, len);
+ else
+ memcpy_toio(engine->sram + sram_off + offset,
+ miter.addr, len);
+ } else {
+ if (engine->pool)
+ memcpy(miter.addr,
+ engine->sram_pool + sram_off + offset,
+ len);
+ else
+ memcpy_fromio(miter.addr,
+ engine->sram + sram_off + offset,
+ len);
+ }
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+
+ return offset;
+}
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..b9c6201019e0
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o octeontx2-cptvf.o
+
+octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+ otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o
+octeontx2-cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+ otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+ otx2_cptvf_algs.o
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
new file mode 100644
index 000000000000..3518fac29834
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_COMMON_H
+#define __OTX2_CPT_COMMON_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include "otx2_cpt_hw_types.h"
+#include "rvu.h"
+#include "mbox.h"
+
+#define OTX2_CPT_MAX_VFS_NUM 128
+#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
+ (((blk) << 20) | ((slot) << 12) | (offs))
+#define OTX2_CPT_RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
+#define OTX2_CPT_NAME_LENGTH 64
+#define OTX2_CPT_DMA_MINALIGN 128
+
+#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
+
+enum otx2_cpt_eng_type {
+ OTX2_CPT_AE_TYPES = 1,
+ OTX2_CPT_SE_TYPES = 2,
+ OTX2_CPT_IE_TYPES = 3,
+ OTX2_CPT_MAX_ENG_TYPES,
+};
+
+/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
+#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
+#define MBOX_MSG_GET_CAPS 0xBFD
+#define MBOX_MSG_GET_KVF_LIMITS 0xBFC
+
+/*
+ * Message request and response to get engine group number
+ * which has attached a given type of engines (SE, AE, IE)
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_egrp_num_msg {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+};
+
+struct otx2_cpt_egrp_num_rsp {
+ struct mbox_msghdr hdr;
+ u8 eng_type;
+ u8 eng_grp_num;
+};
+
+/*
+ * Message request and response to get kernel crypto limits
+ * This messages are only used between CPT PF <-> CPT VF
+ */
+struct otx2_cpt_kvf_limits_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_kvf_limits_rsp {
+ struct mbox_msghdr hdr;
+ u8 kvf_limits;
+};
+
+/* CPT HW capabilities */
+union otx2_cpt_eng_caps {
+ u64 u;
+ struct {
+ u64 reserved_0_4:5;
+ u64 mul:1;
+ u64 sha1_sha2:1;
+ u64 chacha20:1;
+ u64 zuc_snow3g:1;
+ u64 sha3:1;
+ u64 aes:1;
+ u64 kasumi:1;
+ u64 des:1;
+ u64 crc:1;
+ u64 reserved_14_63:50;
+ };
+};
+
+/*
+ * Message request and response to get HW capabilities for each
+ * engine type (SE, IE, AE).
+ * This messages are only used between CPT PF <=> CPT VF
+ */
+struct otx2_cpt_caps_msg {
+ struct mbox_msghdr hdr;
+};
+
+struct otx2_cpt_caps_rsp {
+ struct mbox_msghdr hdr;
+ u16 cpt_pf_drv_version;
+ u8 cpt_revision;
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+};
+
+static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs, u64 val)
+{
+ writeq_relaxed(val, reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
+ u64 offs)
+{
+ return readq_relaxed(reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ struct pci_dev *pdev);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
+ struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val);
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val);
+struct otx2_cptlfs_info;
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
new file mode 100644
index 000000000000..ecafc42f37a2
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_HW_TYPES_H
+#define __OTX2_CPT_HW_TYPES_H
+
+#include <linux/types.h>
+
+/* Device IDs */
+#define OTX2_CPT_PCI_PF_DEVICE_ID 0xA0FD
+#define OTX2_CPT_PCI_VF_DEVICE_ID 0xA0FE
+
+/* Mailbox interrupts offset */
+#define OTX2_CPT_PF_MBOX_INT 6
+#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
+
+/* Maximum supported microcode groups */
+#define OTX2_CPT_MAX_ENGINE_GROUPS 8
+
+/* CPT instruction size in bytes */
+#define OTX2_CPT_INST_SIZE 64
+/*
+ * CPT VF MSIX vectors and their offsets
+ */
+#define OTX2_CPT_VF_MSIX_VECTORS 1
+#define OTX2_CPT_VF_INTR_MBOX_MASK BIT(0)
+
+/* CPT LF MSIX vectors */
+#define OTX2_CPT_LF_MSIX_VECTORS 2
+
+/* OcteonTX2 CPT PF registers */
+#define OTX2_CPT_PF_CONSTANTS (0x0)
+#define OTX2_CPT_PF_RESET (0x100)
+#define OTX2_CPT_PF_DIAG (0x120)
+#define OTX2_CPT_PF_BIST_STATUS (0x160)
+#define OTX2_CPT_PF_ECC0_CTL (0x200)
+#define OTX2_CPT_PF_ECC0_FLIP (0x210)
+#define OTX2_CPT_PF_ECC0_INT (0x220)
+#define OTX2_CPT_PF_ECC0_INT_W1S (0x230)
+#define OTX2_CPT_PF_ECC0_ENA_W1S (0x240)
+#define OTX2_CPT_PF_ECC0_ENA_W1C (0x250)
+#define OTX2_CPT_PF_MBOX_INTX(b) (0x400 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_INT_W1SX(b) (0x420 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1CX(b) (0x440 | (b) << 3)
+#define OTX2_CPT_PF_MBOX_ENA_W1SX(b) (0x460 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INT (0x500)
+#define OTX2_CPT_PF_EXEC_INT_W1S (0x520)
+#define OTX2_CPT_PF_EXEC_ENA_W1C (0x540)
+#define OTX2_CPT_PF_EXEC_ENA_W1S (0x560)
+#define OTX2_CPT_PF_GX_EN(b) (0x600 | (b) << 3)
+#define OTX2_CPT_PF_EXEC_INFO (0x700)
+#define OTX2_CPT_PF_EXEC_BUSY (0x800)
+#define OTX2_CPT_PF_EXEC_INFO0 (0x900)
+#define OTX2_CPT_PF_EXEC_INFO1 (0x910)
+#define OTX2_CPT_PF_INST_REQ_PC (0x10000)
+#define OTX2_CPT_PF_INST_LATENCY_PC (0x10020)
+#define OTX2_CPT_PF_RD_REQ_PC (0x10040)
+#define OTX2_CPT_PF_RD_LATENCY_PC (0x10060)
+#define OTX2_CPT_PF_RD_UC_PC (0x10080)
+#define OTX2_CPT_PF_ACTIVE_CYCLES_PC (0x10100)
+#define OTX2_CPT_PF_EXE_CTL (0x4000000)
+#define OTX2_CPT_PF_EXE_STATUS (0x4000008)
+#define OTX2_CPT_PF_EXE_CLK (0x4000010)
+#define OTX2_CPT_PF_EXE_DBG_CTL (0x4000018)
+#define OTX2_CPT_PF_EXE_DBG_DATA (0x4000020)
+#define OTX2_CPT_PF_EXE_BIST_STATUS (0x4000028)
+#define OTX2_CPT_PF_EXE_REQ_TIMER (0x4000030)
+#define OTX2_CPT_PF_EXE_MEM_CTL (0x4000038)
+#define OTX2_CPT_PF_EXE_PERF_CTL (0x4001000)
+#define OTX2_CPT_PF_EXE_DBG_CNTX(b) (0x4001100 | (b) << 3)
+#define OTX2_CPT_PF_EXE_PERF_EVENT_CNT (0x4001180)
+#define OTX2_CPT_PF_EXE_EPCI_INBX_CNT(b) (0x4001200 | (b) << 3)
+#define OTX2_CPT_PF_EXE_EPCI_OUTBX_CNT(b) (0x4001240 | (b) << 3)
+#define OTX2_CPT_PF_ENGX_UCODE_BASE(b) (0x4002000 | (b) << 3)
+#define OTX2_CPT_PF_QX_CTL(b) (0x8000000 | (b) << 20)
+#define OTX2_CPT_PF_QX_GMCTL(b) (0x8000020 | (b) << 20)
+#define OTX2_CPT_PF_QX_CTL2(b) (0x8000100 | (b) << 20)
+#define OTX2_CPT_PF_VFX_MBOXX(b, c) (0x8001000 | (b) << 20 | \
+ (c) << 8)
+
+/* OcteonTX2 CPT LF registers */
+#define OTX2_CPT_LF_CTL (0x10)
+#define OTX2_CPT_LF_DONE_WAIT (0x30)
+#define OTX2_CPT_LF_INPROG (0x40)
+#define OTX2_CPT_LF_DONE (0x50)
+#define OTX2_CPT_LF_DONE_ACK (0x60)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1S (0x90)
+#define OTX2_CPT_LF_DONE_INT_ENA_W1C (0xa0)
+#define OTX2_CPT_LF_MISC_INT (0xb0)
+#define OTX2_CPT_LF_MISC_INT_W1S (0xc0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S (0xd0)
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C (0xe0)
+#define OTX2_CPT_LF_Q_BASE (0xf0)
+#define OTX2_CPT_LF_Q_SIZE (0x100)
+#define OTX2_CPT_LF_Q_INST_PTR (0x110)
+#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
+#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
+/* LMT LF registers */
+#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
+#define OTX2_CPT_LMT_LF_LMTLINEX(a) (OTX2_CPT_LMT_LFBASE | 0x000 | \
+ (a) << 12)
+/* RVU VF registers */
+#define OTX2_RVU_VF_INT (0x20)
+#define OTX2_RVU_VF_INT_W1S (0x28)
+#define OTX2_RVU_VF_INT_ENA_W1S (0x30)
+#define OTX2_RVU_VF_INT_ENA_W1C (0x38)
+
+/*
+ * Enumeration otx2_cpt_ucode_error_code_e
+ *
+ * Enumerates ucode errors
+ */
+enum otx2_cpt_ucode_comp_code_e {
+ OTX2_CPT_UCC_SUCCESS = 0x00,
+ OTX2_CPT_UCC_INVALID_OPCODE = 0x01,
+
+ /* Scatter gather */
+ OTX2_CPT_UCC_SG_WRITE_LENGTH = 0x02,
+ OTX2_CPT_UCC_SG_LIST = 0x03,
+ OTX2_CPT_UCC_SG_NOT_SUPPORTED = 0x04,
+
+};
+
+/*
+ * Enumeration otx2_cpt_comp_e
+ *
+ * OcteonTX2 CPT Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum otx2_cpt_comp_e {
+ OTX2_CPT_COMP_E_NOTDONE = 0x00,
+ OTX2_CPT_COMP_E_GOOD = 0x01,
+ OTX2_CPT_COMP_E_FAULT = 0x02,
+ OTX2_CPT_COMP_E_HWERR = 0x04,
+ OTX2_CPT_COMP_E_INSTERR = 0x05,
+ OTX2_CPT_COMP_E_LAST_ENTRY = 0x06
+};
+
+/*
+ * Enumeration otx2_cpt_vf_int_vec_e
+ *
+ * OcteonTX2 CPT VF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_vf_int_vec_e {
+ OTX2_CPT_VF_INT_VEC_E_MBOX = 0x00
+};
+
+/*
+ * Enumeration otx2_cpt_lf_int_vec_e
+ *
+ * OcteonTX2 CPT LF MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+enum otx2_cpt_lf_int_vec_e {
+ OTX2_CPT_LF_INT_VEC_E_MISC = 0x00,
+ OTX2_CPT_LF_INT_VEC_E_DONE = 0x01
+};
+
+/*
+ * Structure otx2_cpt_inst_s
+ *
+ * CPT Instruction Structure
+ * This structure specifies the instruction layout. Instructions are
+ * stored in memory as little-endian unless CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_inst_s_s
+ * Word 0
+ * doneint:1 Done interrupt.
+ * 0 = No interrupts related to this instruction.
+ * 1 = When the instruction completes, CPT()_VQ()_DONE[DONE] will be
+ * incremented,and based on the rules described there an interrupt may
+ * occur.
+ * Word 1
+ * res_addr [127: 64] Result IOVA.
+ * If nonzero, specifies where to write CPT_RES_S.
+ * If zero, no result structure will be written.
+ * Address must be 16-byte aligned.
+ * Bits <63:49> are ignored by hardware; software should use a
+ * sign-extended bit <48> for forward compatibility.
+ * Word 2
+ * grp:10 [171:162] If [WQ_PTR] is nonzero, the SSO guest-group to use when
+ * CPT submits work SSO.
+ * For the SSO to not discard the add-work request, FPA_PF_MAP() must map
+ * [GRP] and CPT()_PF_Q()_GMCTL[GMID] as valid.
+ * tt:2 [161:160] If [WQ_PTR] is nonzero, the SSO tag type to use when CPT
+ * submits work to SSO
+ * tag:32 [159:128] If [WQ_PTR] is nonzero, the SSO tag to use when CPT
+ * submits work to SSO.
+ * Word 3
+ * wq_ptr [255:192] If [WQ_PTR] is nonzero, it is a pointer to a
+ * work-queue entry that CPT submits work to SSO after all context,
+ * output data, and result write operations are visible to other
+ * CNXXXX units and the cores. Bits <2:0> must be zero.
+ * Bits <63:49> are ignored by hardware; software should
+ * use a sign-extended bit <48> for forward compatibility.
+ * Internal:
+ * Bits <63:49>, <2:0> are ignored by hardware, treated as always 0x0.
+ * Word 4
+ * ei0; [319:256] Engine instruction word 0. Passed to the AE/SE.
+ * Word 5
+ * ei1; [383:320] Engine instruction word 1. Passed to the AE/SE.
+ * Word 6
+ * ei2; [447:384] Engine instruction word 1. Passed to the AE/SE.
+ * Word 7
+ * ei3; [511:448] Engine instruction word 1. Passed to the AE/SE.
+ *
+ */
+union otx2_cpt_inst_s {
+ u64 u[8];
+
+ struct {
+ /* Word 0 */
+ u64 nixtxl:3;
+ u64 doneint:1;
+ u64 nixtx_addr:60;
+ /* Word 1 */
+ u64 res_addr;
+ /* Word 2 */
+ u64 tag:32;
+ u64 tt:2;
+ u64 grp:10;
+ u64 reserved_172_175:4;
+ u64 rvu_pf_func:16;
+ /* Word 3 */
+ u64 qord:1;
+ u64 reserved_194_193:2;
+ u64 wq_ptr:61;
+ /* Word 4 */
+ u64 ei0;
+ /* Word 5 */
+ u64 ei1;
+ /* Word 6 */
+ u64 ei2;
+ /* Word 7 */
+ u64 ei3;
+ } s;
+};
+
+/*
+ * Structure otx2_cpt_res_s
+ *
+ * CPT Result Structure
+ * The CPT coprocessor writes the result structure after it completes a
+ * CPT_INST_S instruction. The result structure is exactly 16 bytes, and
+ * each instruction completion produces exactly one result structure.
+ *
+ * This structure is stored in memory as little-endian unless
+ * CPT()_PF_Q()_CTL[INST_BE] is set.
+ * cpt_res_s_s
+ * Word 0
+ * doneint:1 [16:16] Done interrupt. This bit is copied from the
+ * corresponding instruction's CPT_INST_S[DONEINT].
+ * compcode:8 [7:0] Indicates completion/error status of the CPT coprocessor
+ * for the associated instruction, as enumerated by CPT_COMP_E.
+ * Core software may write the memory location containing [COMPCODE] to
+ * 0x0 before ringing the doorbell, and then poll for completion by
+ * checking for a nonzero value.
+ * Once the core observes a nonzero [COMPCODE] value in this case,the CPT
+ * coprocessor will have also completed L2/DRAM write operations.
+ * Word 1
+ * reserved
+ *
+ */
+union otx2_cpt_res_s {
+ u64 u[2];
+
+ struct {
+ u64 compcode:8;
+ u64 uc_compcode:8;
+ u64 doneint:1;
+ u64 reserved_17_63:47;
+ u64 reserved_64_127;
+ } s;
+};
+
+/*
+ * Register (RVU_PF_BAR0) cpt#_af_constants1
+ *
+ * CPT AF Constants Register
+ * This register contains implementation-related parameters of CPT.
+ */
+union otx2_cptx_af_constants1 {
+ u64 u;
+ struct otx2_cptx_af_constants1_s {
+ u64 se:16;
+ u64 ie:16;
+ u64 ae:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int
+ *
+ * This register contain the per-queue miscellaneous interrupts.
+ *
+ */
+union otx2_cptx_lf_misc_int {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_misc_int_ena_w1s
+ *
+ * This register sets interrupt enable bits.
+ *
+ */
+union otx2_cptx_lf_misc_int_ena_w1s {
+ u64 u;
+ struct otx2_cptx_lf_misc_int_ena_w1s_s {
+ u64 reserved_0:1;
+ u64 nqerr:1;
+ u64 irde:1;
+ u64 nwrp:1;
+ u64 reserved_4:1;
+ u64 hwerr:1;
+ u64 fault:1;
+ u64 reserved_7_63:57;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_ctl
+ *
+ * This register configures the queue.
+ *
+ * When the queue is not execution-quiescent (see CPT_LF_INPROG[EENA,INFLIGHT]),
+ * software must only write this register with [ENA]=0.
+ */
+union otx2_cptx_lf_ctl {
+ u64 u;
+ struct otx2_cptx_lf_ctl_s {
+ u64 ena:1;
+ u64 fc_ena:1;
+ u64 fc_up_crossing:1;
+ u64 reserved_3:1;
+ u64 fc_hyst_bits:4;
+ u64 reserved_8_63:56;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done_wait
+ *
+ * This register specifies the per-queue interrupt coalescing settings.
+ */
+union otx2_cptx_lf_done_wait {
+ u64 u;
+ struct otx2_cptx_lf_done_wait_s {
+ u64 num_wait:20;
+ u64 reserved_20_31:12;
+ u64 time_wait:16;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_done
+ *
+ * This register contain the per-queue instruction done count.
+ */
+union otx2_cptx_lf_done {
+ u64 u;
+ struct otx2_cptx_lf_done_s {
+ u64 done:20;
+ u64 reserved_20_63:44;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_inprog
+ *
+ * These registers contain the per-queue instruction in flight registers.
+ *
+ */
+union otx2_cptx_lf_inprog {
+ u64 u;
+ struct otx2_cptx_lf_inprog_s {
+ u64 inflight:9;
+ u64 reserved_9_15:7;
+ u64 eena:1;
+ u64 grp_drp:1;
+ u64 reserved_18_30:13;
+ u64 grb_partial:1;
+ u64 grb_cnt:8;
+ u64 gwb_cnt:8;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_base
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_BASE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_base {
+ u64 u;
+ struct otx2_cptx_lf_q_base_s {
+ u64 fault:1;
+ u64 reserved_1_6:6;
+ u64 addr:46;
+ u64 reserved_53_63:11;
+ } s;
+};
+
+/*
+ * RVU_PFVF_BAR2 - cpt_lf_q_size
+ *
+ * CPT initializes these CSR fields to these values on any CPT_LF_Q_SIZE write:
+ * _ CPT_LF_Q_INST_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_INST_PTR[NQ_PTR]=2.
+ * _ CPT_LF_Q_INST_PTR[DQ_PTR]=2.
+ * _ CPT_LF_Q_GRP_PTR[XQ_XOR]=0.
+ * _ CPT_LF_Q_GRP_PTR[NQ_PTR]=1.
+ * _ CPT_LF_Q_GRP_PTR[DQ_PTR]=1.
+ */
+union otx2_cptx_lf_q_size {
+ u64 u;
+ struct otx2_cptx_lf_q_size_s {
+ u64 size_div40:15;
+ u64 reserved_15_63:49;
+ } s;
+};
+
+/*
+ * RVU_PF_BAR0 - cpt_af_lf_ctl
+ *
+ * This register configures queues. This register should be written only
+ * when the queue is execution-quiescent (see CPT_LF_INPROG[INFLIGHT]).
+ */
+union otx2_cptx_af_lf_ctrl {
+ u64 u;
+ struct otx2_cptx_af_lf_ctrl_s {
+ u64 pri:1;
+ u64 reserved_1_8:8;
+ u64 pf_func_inst:1;
+ u64 cont_err:1;
+ u64 reserved_11_15:5;
+ u64 nixtx_en:1;
+ u64 reserved_17_47:31;
+ u64 grp:8;
+ u64 reserved_56_63:8;
+ } s;
+};
+
+#endif /* __OTX2_CPT_HW_TYPES_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
new file mode 100644
index 000000000000..51cb6404ded7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+
+int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ struct mbox_msghdr *req;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct ready_msg_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_READY;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+{
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 0;
+ reg_msg->reg_offset = reg;
+ reg_msg->ret_val = val;
+
+ return 0;
+}
+
+int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ struct cpt_rd_wr_reg_msg *reg_msg;
+
+ reg_msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
+ sizeof(*reg_msg));
+ if (reg_msg == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ reg_msg->hdr.pcifunc = 0;
+
+ reg_msg->is_write = 1;
+ reg_msg->reg_offset = reg;
+ reg_msg->val = val;
+
+ return 0;
+}
+
+int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 val)
+{
+ int ret;
+
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ if (ret)
+ return ret;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_attach *req;
+ int ret;
+
+ req = (struct rsrc_attach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_ATTACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->cptlfs = lfs->lfs_num;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (!lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct rsrc_detach *req;
+ int ret;
+
+ req = (struct rsrc_detach *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (req == NULL) {
+ dev_err(&lfs->pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_DETACH_RESOURCES;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, lfs->pdev);
+ if (ret)
+ return ret;
+
+ if (lfs->are_lfs_attached)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct mbox_msghdr *req;
+ int ret, i;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msix_offset_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->id = MBOX_MSG_MSIX_OFFSET;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = 0;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (lfs->lf[i].msix_offset == MSIX_VECTOR_INVALID) {
+ dev_err(&pdev->dev,
+ "Invalid msix offset %d for LF %d\n",
+ lfs->lf[i].msix_offset, i);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
new file mode 100644
index 000000000000..dbb1ee746f4c
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_REQMGR_H
+#define __OTX2_CPT_REQMGR_H
+
+#include "otx2_cpt_common.h"
+
+/* Completion code size and initial value */
+#define OTX2_CPT_COMPLETION_CODE_SIZE 8
+#define OTX2_CPT_COMPLETION_CODE_INIT OTX2_CPT_COMP_E_NOTDONE
+/*
+ * Maximum total number of SG buffers is 100, we divide it equally
+ * between input and output
+ */
+#define OTX2_CPT_MAX_SG_IN_CNT 50
+#define OTX2_CPT_MAX_SG_OUT_CNT 50
+
+/* DMA mode direct or SG */
+#define OTX2_CPT_DMA_MODE_DIRECT 0
+#define OTX2_CPT_DMA_MODE_SG 1
+
+/* Context source CPTR or DPTR */
+#define OTX2_CPT_FROM_CPTR 0
+#define OTX2_CPT_FROM_DPTR 1
+
+#define OTX2_CPT_MAX_REQ_SIZE 65535
+
+union otx2_cpt_opcode {
+ u16 flags;
+ struct {
+ u8 major;
+ u8 minor;
+ } s;
+};
+
+struct otx2_cptvf_request {
+ u32 param1;
+ u32 param2;
+ u16 dlen;
+ union otx2_cpt_opcode opcode;
+};
+
+/*
+ * CPT_INST_S software command definitions
+ * Words EI (0-3)
+ */
+union otx2_cpt_iq_cmd_word0 {
+ u64 u;
+ struct {
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
+ } s;
+};
+
+union otx2_cpt_iq_cmd_word3 {
+ u64 u;
+ struct {
+ u64 cptr:61;
+ u64 grp:3;
+ } s;
+};
+
+struct otx2_cpt_iq_command {
+ union otx2_cpt_iq_cmd_word0 cmd;
+ u64 dptr;
+ u64 rptr;
+ union otx2_cpt_iq_cmd_word3 cptr;
+};
+
+struct otx2_cpt_pending_entry {
+ void *completion_addr; /* Completion address */
+ void *info;
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ u8 resume_sender; /* Notify sender to resume sending requests */
+ u8 busy; /* Entry status (free/busy) */
+};
+
+struct otx2_cpt_pending_queue {
+ struct otx2_cpt_pending_entry *head; /* Head of the queue */
+ u32 front; /* Process work from here */
+ u32 rear; /* Append new work here */
+ u32 pending_count; /* Pending requests count */
+ u32 qlen; /* Queue length */
+ spinlock_t lock; /* Queue lock */
+};
+
+struct otx2_cpt_buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+union otx2_cpt_ctrl_info {
+ u32 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved_6_31:26;
+ u32 grp:3; /* Group bits */
+ u32 dma_mode:2; /* DMA mode */
+ u32 se_req:1; /* To SE core */
+#else
+ u32 se_req:1; /* To SE core */
+ u32 dma_mode:2; /* DMA mode */
+ u32 grp:3; /* Group bits */
+ u32 reserved_6_31:26;
+#endif
+ } s;
+};
+
+struct otx2_cpt_req_info {
+ /* Kernel async request callback */
+ void (*callback)(int status, void *arg1, void *arg2);
+ struct crypto_async_request *areq; /* Async request callback arg */
+ struct otx2_cptvf_request req;/* Request information (core specific) */
+ union otx2_cpt_ctrl_info ctrl;/* User control information */
+ struct otx2_cpt_buf_ptr in[OTX2_CPT_MAX_SG_IN_CNT];
+ struct otx2_cpt_buf_ptr out[OTX2_CPT_MAX_SG_OUT_CNT];
+ u8 *iv_out; /* IV to send back */
+ u16 rlen; /* Output length */
+ u8 in_cnt; /* Number of input buffers */
+ u8 out_cnt; /* Number of output buffers */
+ u8 req_type; /* Type of request */
+ u8 is_enc; /* Is a request an encryption request */
+ u8 is_trunc_hmac;/* Is truncated hmac used */
+};
+
+struct otx2_cpt_inst_info {
+ struct otx2_cpt_pending_entry *pentry;
+ struct otx2_cpt_req_info *req;
+ struct pci_dev *pdev;
+ void *completion_addr;
+ u8 *out_buffer;
+ u8 *in_buffer;
+ dma_addr_t dptr_baddr;
+ dma_addr_t rptr_baddr;
+ dma_addr_t comp_baddr;
+ unsigned long time_in;
+ u32 dlen;
+ u32 dma_len;
+ u8 extra_time;
+};
+
+struct otx2_cpt_sglist_component {
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
+};
+
+static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
+ struct otx2_cpt_inst_info *info)
+{
+ struct otx2_cpt_req_info *req;
+ int i;
+
+ if (info->dptr_baddr)
+ dma_unmap_single(&pdev->dev, info->dptr_baddr,
+ info->dma_len, DMA_BIDIRECTIONAL);
+
+ if (info->req) {
+ req = info->req;
+ for (i = 0; i < req->out_cnt; i++) {
+ if (req->out[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->out[i].dma_addr,
+ req->out[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ for (i = 0; i < req->in_cnt; i++) {
+ if (req->in[i].dma_addr)
+ dma_unmap_single(&pdev->dev,
+ req->in[i].dma_addr,
+ req->in[i].size,
+ DMA_BIDIRECTIONAL);
+ }
+ }
+ kfree(info);
+}
+
+struct otx2_cptlf_wqe;
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num);
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+
+#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
new file mode 100644
index 000000000000..823a4571fd67
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptlf.h"
+#include "rvu_reg.h"
+
+#define CPT_TIMER_HOLD 0x03F
+#define CPT_COUNT_HOLD 32
+
+static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
+ int time_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.time_wait = time_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT);
+ done_wait.s.num_wait = num_wait;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+}
+
+static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
+ int time_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
+}
+
+static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
+}
+
+static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.pri = pri ? 1 : 0;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
+ int eng_grps_mask)
+{
+ struct otx2_cptlfs_info *lfs = lf->lfs;
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ int ret;
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.grp = eng_grps_mask;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u);
+ return ret;
+}
+
+static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
+ int eng_grp_mask, int pri)
+{
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ ret = cptlf_set_pri(&lfs->lf[slot], pri);
+ if (ret)
+ return ret;
+
+ ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+
+ /* Set instruction queues base addresses */
+ otx2_cptlf_set_iqueues_base_addr(lfs);
+
+ /* Set instruction queues sizes */
+ otx2_cptlf_set_iqueues_size(lfs);
+
+ /* Set done interrupts time wait */
+ cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
+
+ /* Set done interrupts num wait */
+ cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
+
+ /* Enable instruction queues */
+ otx2_cptlf_enable_iqueues(lfs);
+}
+
+static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Disable instruction queues */
+ otx2_cptlf_disable_iqueues(lfs);
+}
+
+static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
+{
+ union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
+ u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
+ OTX2_CPT_LF_MISC_INT_ENA_W1C;
+ int slot;
+
+ irq_misc.s.fault = 0x1;
+ irq_misc.s.hwerr = 0x1;
+ irq_misc.s.irde = 0x1;
+ irq_misc.s.nqerr = 0x1;
+ irq_misc.s.nwrp = 0x1;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
+ irq_misc.u);
+}
+
+static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ /* Enable done interrupts */
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
+ /* Enable Misc interrupts */
+ cptlf_set_misc_intrs(lfs, true);
+}
+
+static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
+ cptlf_set_misc_intrs(lfs, false);
+}
+
+static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_done irq_cnt;
+
+ irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE);
+ return irq_cnt.s.done;
+}
+
+static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
+{
+ union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
+ struct otx2_cptlf_info *lf = arg;
+ struct device *dev;
+
+ dev = &lf->lfs->pdev->dev;
+ irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT);
+ irq_misc_ack.u = 0x0;
+
+ if (irq_misc.s.fault) {
+ dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.fault = 0x1;
+
+ } else if (irq_misc.s.hwerr) {
+ dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
+ lf->slot);
+ irq_misc_ack.s.hwerr = 0x1;
+
+ } else if (irq_misc.s.nwrp) {
+ dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
+ lf->slot);
+ irq_misc_ack.s.nwrp = 0x1;
+
+ } else if (irq_misc.s.irde) {
+ dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
+ irq_misc_ack.s.irde = 0x1;
+
+ } else if (irq_misc.s.nqerr) {
+ dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
+ irq_misc_ack.s.nqerr = 0x1;
+
+ } else {
+ dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Acknowledge interrupts */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
+{
+ union otx2_cptx_lf_done_wait done_wait;
+ struct otx2_cptlf_info *lf = arg;
+ int irq_cnt;
+
+ /* Read the number of completed requests */
+ irq_cnt = cptlf_read_done_cnt(lf);
+ if (irq_cnt) {
+ done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_DONE_WAIT);
+ /* Acknowledge the number of completed requests */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_ACK, irq_cnt);
+
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_DONE_WAIT, done_wait.u);
+ if (unlikely(!lf->wqe)) {
+ dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
+ lf->slot);
+ return IRQ_NONE;
+ }
+
+ /* Schedule processing of completed requests */
+ tasklet_hi_schedule(&lf->wqe->work);
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, offs, vector;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ if (!lfs->lf[i].is_irq_reg[offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[offs] = false;
+ }
+ }
+ cptlf_disable_intrs(lfs);
+}
+
+static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ int lf_num, int irq_offset,
+ irq_handler_t handler)
+{
+ int ret, vector;
+
+ vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
+ irq_offset);
+ ret = request_irq(vector, handler, 0,
+ lfs->lf[lf_num].irq_name[irq_offset],
+ &lfs->lf[lf_num]);
+ if (ret)
+ return ret;
+
+ lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
+
+ return ret;
+}
+
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int irq_offs, ret, i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_misc_intr_handler);
+ if (ret)
+ goto free_irq;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
+ i);
+ ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
+ cptlf_done_intr_handler);
+ if (ret)
+ goto free_irq;
+ }
+ cptlf_enable_intrs(lfs);
+ return 0;
+
+free_irq:
+ otx2_cptlf_unregister_interrupts(lfs);
+ return ret;
+}
+
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ int slot, offs;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
+ irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lfs->lf[slot].msix_offset +
+ offs), NULL);
+ free_cpumask_var(lfs->lf[slot].affinity_mask);
+ }
+}
+
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_info *lf = lfs->lf;
+ int slot, offs, ret;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
+ dev_err(&lfs->pdev->dev,
+ "cpumask allocation failed for LF %d", slot);
+ ret = -ENOMEM;
+ goto free_affinity_mask;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(slot,
+ dev_to_node(&lfs->pdev->dev)),
+ lf[slot].affinity_mask);
+
+ for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
+ ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
+ lf[slot].msix_offset + offs),
+ lf[slot].affinity_mask);
+ if (ret)
+ goto free_affinity_mask;
+ }
+ }
+ return 0;
+
+free_affinity_mask:
+ otx2_cptlf_free_irqs_affinity(lfs);
+ return ret;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ int lfs_num)
+{
+ int slot, ret;
+
+ if (!lfs->pdev || !lfs->reg_base)
+ return -EINVAL;
+
+ lfs->lfs_num = lfs_num;
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lfs->lf[slot].lfs = lfs;
+ lfs->lf[slot].slot = slot;
+ lfs->lf[slot].lmtline = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
+ OTX2_CPT_LMT_LF_LMTLINEX(0));
+ lfs->lf[slot].ioreg = lfs->reg_base +
+ OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_NQX(0));
+ }
+ /* Send request to attach LFs */
+ ret = otx2_cpt_attach_rscrs_msg(lfs);
+ if (ret)
+ goto clear_lfs_num;
+
+ ret = otx2_cpt_alloc_instruction_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating instruction queues failed\n");
+ goto detach_rsrcs;
+ }
+ cptlf_hw_init(lfs);
+ /*
+ * Allow each LF to execute requests destined to any of 8 engine
+ * groups and set queue priority of each LF to high
+ */
+ ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
+ if (ret)
+ goto free_iq;
+
+ return 0;
+
+free_iq:
+ otx2_cpt_free_instruction_queues(lfs);
+ cptlf_hw_cleanup(lfs);
+detach_rsrcs:
+ otx2_cpt_detach_rsrcs_msg(lfs);
+clear_lfs_num:
+ lfs->lfs_num = 0;
+ return ret;
+}
+
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ lfs->lfs_num = 0;
+ /* Cleanup LFs hardware side */
+ cptlf_hw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
new file mode 100644
index 000000000000..314e97354100
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+#ifndef __OTX2_CPTLF_H
+#define __OTX2_CPTLF_H
+
+#include <linux/soc/marvell/octeontx2/asm.h>
+#include <mbox.h>
+#include <rvu.h>
+#include "otx2_cpt_common.h"
+#include "otx2_cpt_reqmgr.h"
+
+/*
+ * CPT instruction and pending queues user requested length in CPT_INST_S msgs
+ */
+#define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200
+
+/*
+ * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S
+ * messages.
+ */
+#define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40)
+
+/*
+ * CPT instruction and pending queues length in CPT_INST_S messages
+ */
+#define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40)
+
+/* CPT instruction queue length in bytes */
+#define OTX2_CPT_INST_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 40 * \
+ OTX2_CPT_INST_SIZE)
+
+/* CPT instruction group queue length in bytes */
+#define OTX2_CPT_INST_GRP_QLEN_BYTES (OTX2_CPT_SIZE_DIV40 * 16)
+
+/* CPT FC length in bytes */
+#define OTX2_CPT_Q_FC_LEN 128
+
+/* CPT instruction queue alignment */
+#define OTX2_CPT_INST_Q_ALIGNMENT 128
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF
+
+/* Maximum LFs supported in OcteonTX2 for CPT */
+#define OTX2_CPT_MAX_LFS_NUM 64
+
+/* Queue priority */
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+#define OTX2_CPT_QUEUE_LOW_PRIO 0x0
+
+enum otx2_cptlf_state {
+ OTX2_CPTLF_IN_RESET,
+ OTX2_CPTLF_STARTED,
+};
+
+struct otx2_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+struct otx2_cptlfs_info;
+struct otx2_cptlf_wqe {
+ struct tasklet_struct work;
+ struct otx2_cptlfs_info *lfs;
+ u8 lf_num;
+};
+
+struct otx2_cptlf_info {
+ struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */
+ void __iomem *lmtline; /* Address of LMTLINE */
+ void __iomem *ioreg; /* LMTLINE send register */
+ int msix_offset; /* MSI-X interrupts offset */
+ cpumask_var_t affinity_mask; /* IRQs affinity mask */
+ u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */
+ u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */
+ u8 slot; /* Slot number of this LF */
+
+ struct otx2_cpt_inst_queue iqueue;/* Instruction queue */
+ struct otx2_cpt_pending_queue pqueue; /* Pending queue */
+ struct otx2_cptlf_wqe *wqe; /* Tasklet work info */
+};
+
+struct otx2_cptlfs_info {
+ /* Registers start address of VF/PF LFs are attached to */
+ void __iomem *reg_base;
+ struct pci_dev *pdev; /* Device LFs are attached to */
+ struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
+ struct otx2_mbox *mbox;
+ u8 are_lfs_attached; /* Whether CPT LFs are attached */
+ u8 lfs_num; /* Number of CPT LFs */
+ u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kvf_limits; /* Kernel crypto limits */
+ atomic_t state; /* LF's state. started/reset */
+};
+
+static inline void otx2_cpt_free_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ if (iq->real_vaddr)
+ dma_free_coherent(&lfs->pdev->dev,
+ iq->size,
+ iq->real_vaddr,
+ iq->real_dma_addr);
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+ }
+}
+
+static inline int otx2_cpt_alloc_instruction_queues(
+ struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cpt_inst_queue *iq;
+ int ret = 0, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ iq = &lfs->lf[i].iqueue;
+ iq->size = OTX2_CPT_INST_QLEN_BYTES +
+ OTX2_CPT_Q_FC_LEN +
+ OTX2_CPT_INST_GRP_QLEN_BYTES +
+ OTX2_CPT_INST_Q_ALIGNMENT;
+ iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr,
+ OTX2_CPT_INST_Q_ALIGNMENT);
+ }
+ return 0;
+
+error:
+ otx2_cpt_free_instruction_queues(lfs);
+ return ret;
+}
+
+static inline void otx2_cptlf_set_iqueues_base_addr(
+ struct otx2_cptlfs_info *lfs)
+{
+ union otx2_cptx_lf_q_base lf_q_base;
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf_q_base.u = lfs->lf[slot].iqueue.dma_addr;
+ otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
+ OTX2_CPT_LF_Q_BASE, lf_q_base.u);
+ }
+}
+
+static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 };
+
+ lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_Q_SIZE, lf_q_size.u);
+}
+
+static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
+{
+ union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
+ union otx2_cptx_lf_inprog lf_inprog;
+ int timeout = 20;
+
+ /* Disable instructions enqueuing */
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+
+ /* Wait for instruction queue to become empty */
+ do {
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
+ lf->slot, OTX2_CPT_LF_INPROG);
+ if (!lf_inprog.s.inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ dev_err(&lf->lfs->pdev->dev,
+ "Error LF %d is still busy.\n", lf->slot);
+ break;
+ }
+
+ } while (1);
+
+ /*
+ * Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ lf_inprog.s.eena = 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++)
+ otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+}
+
+static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_ctl lf_ctl;
+
+ lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL);
+
+ /* Set iqueue's enqueuing */
+ lf_ctl.s.ena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_CTL, lf_ctl.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_enq(lf, true);
+}
+
+static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
+ bool enable)
+{
+ union otx2_cptx_lf_inprog lf_inprog;
+
+ lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG);
+
+ /* Set iqueue's execution */
+ lf_inprog.s.eena = enable ? 0x1 : 0x0;
+ otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
+ OTX2_CPT_LF_INPROG, lf_inprog.u);
+}
+
+static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, true);
+}
+
+static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf)
+{
+ otx2_cptlf_set_iqueue_exec(lf, false);
+}
+
+static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
+{
+ int slot;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
+ otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
+ }
+}
+
+static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst,
+ struct otx2_cpt_iq_command *iq_cmd,
+ u64 comp_baddr)
+{
+ cptinst->u[0] = 0x0;
+ cptinst->s.doneint = true;
+ cptinst->s.res_addr = comp_baddr;
+ cptinst->u[2] = 0x0;
+ cptinst->u[3] = 0x0;
+ cptinst->s.ei0 = iq_cmd->cmd.u;
+ cptinst->s.ei1 = iq_cmd->dptr;
+ cptinst->s.ei2 = iq_cmd->rptr;
+ cptinst->s.ei3 = iq_cmd->cptr.u;
+}
+
+/*
+ * On OcteonTX2 platform the parameter insts_num is used as a count of
+ * instructions to be enqueued. The valid values for insts_num are:
+ * 1 - 1 CPT instruction will be enqueued during LMTST operation
+ * 2 - 2 CPT instructions will be enqueued during LMTST operation
+ */
+static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst,
+ u32 insts_num, struct otx2_cptlf_info *lf)
+{
+ void __iomem *lmtline = lf->lmtline;
+ long ret;
+
+ /*
+ * Make sure memory areas pointed in CPT_INST_S
+ * are flushed before the instruction is sent to CPT
+ */
+ dma_wmb();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+
+ /*
+ * LDEOR initiates atomic transfer to I/O device
+ * The following will cause the LMTST to fail (the LDEOR
+ * returns zero):
+ * - No stores have been performed to the LMTLINE since it was
+ * last invalidated.
+ * - The bytes which have been stored to LMTLINE since it was
+ * last invalidated form a pattern that is non-contiguous, does
+ * not start at byte 0, or does not end on a 8-byte boundary.
+ * (i.e.comprises a formation of other than 1–16 8-byte
+ * words.)
+ *
+ * These rules are designed such that an operating system
+ * context switch or hypervisor guest switch need have no
+ * knowledge of the LMTST operations; the switch code does not
+ * need to store to LMTCANCEL. Also note as LMTLINE data cannot
+ * be read, there is no information leakage between processes.
+ */
+ ret = otx2_lmt_flush(lf->ioreg);
+
+ } while (!ret);
+}
+
+static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs)
+{
+ return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED;
+}
+
+int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
+ int lfs_num);
+void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
+
+#endif /* __OTX2_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
new file mode 100644
index 000000000000..8c899ad531a5
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_H
+#define __OTX2_CPTPF_H
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptpf_dev;
+struct otx2_cptvf_info {
+ struct otx2_cptpf_dev *cptpf; /* PF pointer this VF belongs to */
+ struct work_struct vfpf_mbox_work;
+ struct pci_dev *vf_dev;
+ int vf_id;
+ int intr_idx;
+};
+
+struct cptpf_flr_work {
+ struct work_struct work;
+ struct otx2_cptpf_dev *pf;
+};
+
+struct otx2_cptpf_dev {
+ void __iomem *reg_base; /* CPT PF registers start address */
+ void __iomem *afpf_mbox_base; /* PF-AF mbox start address */
+ void __iomem *vfpf_mbox_base; /* VF-PF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
+ struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this PF */
+ /* HW capabilities for each engine type */
+ union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
+ bool is_eng_caps_discovered;
+
+ /* AF <=> PF mbox */
+ struct otx2_mbox afpf_mbox;
+ struct work_struct afpf_mbox_work;
+ struct workqueue_struct *afpf_mbox_wq;
+
+ /* VF <=> PF mbox */
+ struct otx2_mbox vfpf_mbox;
+ struct workqueue_struct *vfpf_mbox_wq;
+
+ struct workqueue_struct *flr_wq;
+ struct cptpf_flr_work *flr_work;
+
+ u8 pf_id; /* RVU PF number */
+ u8 max_vfs; /* Maximum number of VFs supported by CPT */
+ u8 enabled_vfs; /* Number of enabled VFs */
+ u8 kvf_limits; /* Kernel crypto limits */
+};
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work);
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+
+#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
new file mode 100644
index 000000000000..5277e04badd9
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/firmware.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+#define OTX2_CPT_DRV_NAME "octeontx2-cpt"
+#define OTX2_CPT_DRV_STRING "Marvell OcteonTX2 CPT Physical Function Driver"
+
+static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int ena_bits;
+
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
+
+ /* Enable VF interrupts for VFs from 0 to 63 */
+ ena_bits = ((num_vfs - 1) % 64);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
+ GENMASK_ULL(ena_bits, 0));
+
+ if (num_vfs > 64) {
+ /* Enable VF interrupts for VFs from 64 to 127 */
+ ena_bits = num_vfs - 64 - 1;
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ GENMASK_ULL(ena_bits, 0));
+ }
+}
+
+static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF-PF interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
+ /* Clear any pending interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ /* Enable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
+}
+
+static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ int vector;
+
+ /* Disable VF FLR interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
+ ~0x0ULL);
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
+ ~0x0ULL);
+
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(vector, cptpf);
+ }
+}
+
+static void cptpf_flr_wq_handler(struct work_struct *work)
+{
+ struct cptpf_flr_work *flr_work;
+ struct otx2_cptpf_dev *pf;
+ struct mbox_msghdr *req;
+ struct otx2_mbox *mbox;
+ int vf, reg = 0;
+
+ flr_work = container_of(work, struct cptpf_flr_work, work);
+ pf = flr_work->pf;
+ mbox = &pf->afpf_mbox;
+
+ vf = flr_work - pf->flr_work;
+
+ req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req)
+ return;
+
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->id = MBOX_MSG_VF_FLR;
+ req->pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ otx2_cpt_send_mbox_msg(mbox, pf->pdev);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* Clear transaction pending register */
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
+{
+ int reg, dev, vf, start_vf, num_reg = 1;
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ if (cptpf->max_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
+ /* Clear interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
+ int num_vfs)
+{
+ cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
+}
+
+static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, vector;
+
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ /* Register VF-PF mailbox interrupt handler */
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR0 irq\n");
+ goto free_mbox0_irq;
+ }
+ if (num_vfs > 64) {
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
+ "CPTVFPF Mbox1", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFVF mbox1 irq\n");
+ goto free_flr0_irq;
+ }
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
+ /* Register VF FLR interrupt handler */
+ ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
+ cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for VFFLR1 irq\n");
+ goto free_mbox1_irq;
+ }
+ }
+ cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
+ cptpf_enable_vf_flr_intrs(cptpf);
+
+ return 0;
+
+free_mbox1_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, cptpf);
+free_flr0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(vector, cptpf);
+free_mbox0_irq:
+ vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, cptpf);
+ return ret;
+}
+
+static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ kfree(pf->flr_work);
+}
+
+static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ int vf;
+
+ cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
+ if (!cptpf->flr_wq)
+ return -ENOMEM;
+
+ cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
+ GFP_KERNEL);
+ if (!cptpf->flr_work)
+ goto destroy_wq;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ cptpf->flr_work[vf].pf = cptpf;
+ INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
+ }
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(cptpf->flr_wq);
+ return -ENOMEM;
+}
+
+static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ u64 vfpf_mbox_base;
+ int err, i;
+
+ cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->vfpf_mbox_wq)
+ return -ENOMEM;
+
+ /* Map VF-PF mailbox memory */
+ vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
+ if (!vfpf_mbox_base) {
+ dev_err(dev, "VF-PF mailbox address not configured\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
+ MBOX_SIZE * cptpf->max_vfs);
+ if (!cptpf->vfpf_mbox_base) {
+ dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
+ err = -ENOMEM;
+ goto free_wqe;
+ }
+ err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
+ num_vfs);
+ if (err)
+ goto free_wqe;
+
+ for (i = 0; i < num_vfs; i++) {
+ cptpf->vf[i].vf_id = i;
+ cptpf->vf[i].cptpf = cptpf;
+ cptpf->vf[i].intr_idx = i % 64;
+ INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
+ otx2_cptpf_vfpf_mbox_handler);
+ }
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->vfpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->vfpf_mbox);
+}
+
+static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ /* Disable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
+ 0x1ULL);
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+}
+
+static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
+{
+ struct pci_dev *pdev = cptpf->pdev;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ /* Register AF-PF mailbox interrupt handler */
+ ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
+ "CPTAFPF Mbox", cptpf);
+ if (ret) {
+ dev_err(dev,
+ "IRQ registration failed for PFAF mbox irq\n");
+ return ret;
+ }
+ /* Clear interrupt if any, to avoid spurious interrupts */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
+ /* Enable AF-PF interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
+ 0x1ULL);
+
+ ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret) {
+ dev_warn(dev,
+ "AF not responding to mailbox, deferring probe\n");
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
+{
+ int err;
+
+ cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptpf->afpf_mbox_wq)
+ return -ENOMEM;
+
+ err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
+ cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
+ if (err)
+ goto error;
+
+ INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
+ return 0;
+
+error:
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ return err;
+}
+
+static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
+{
+ destroy_workqueue(cptpf->afpf_mbox_wq);
+ otx2_mbox_destroy(&cptpf->afpf_mbox);
+}
+
+static ssize_t kvf_limits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", cptpf->kvf_limits);
+}
+
+static ssize_t kvf_limits_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ int lfs_num;
+
+ if (kstrtoint(buf, 0, &lfs_num)) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ if (lfs_num < 1 || lfs_num > num_online_cpus()) {
+ dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
+ lfs_num, num_online_cpus());
+ return -EINVAL;
+ }
+ cptpf->kvf_limits = lfs_num;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(kvf_limits);
+static struct attribute *cptpf_attrs[] = {
+ &dev_attr_kvf_limits.attr,
+ NULL
+};
+
+static const struct attribute_group cptpf_sysfs_group = {
+ .attrs = cptpf_attrs,
+};
+
+static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
+{
+ u64 rev;
+
+ rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
+ rev = (rev >> 12) & 0xFF;
+ /*
+ * Check if AF has setup revision for RVUM block, otherwise
+ * driver probe should be deferred until AF driver comes up
+ */
+ if (!rev) {
+ dev_warn(&cptpf->pdev->dev,
+ "AF is not initialized, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int timeout = 10, ret;
+ u64 reg = 0;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, 0x1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_BLK_RST, &reg);
+ if (ret)
+ return ret;
+
+ if (!((reg >> 63) & 0x1))
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+ } while (1);
+
+ return ret;
+}
+
+static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
+{
+ union otx2_cptx_af_constants1 af_cnsts1 = {0};
+ int ret = 0;
+
+ /* Reset the CPT PF device */
+ ret = cptpf_device_reset(cptpf);
+ if (ret)
+ return ret;
+
+ /* Get number of SE, IE and AE engines */
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
+ cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
+ cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
+
+ /* Disable all cores */
+ ret = otx2_cpt_disable_all_cores(cptpf);
+
+ return ret;
+}
+
+static int cptpf_sriov_disable(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int num_vfs = pci_num_vf(pdev);
+
+ if (!num_vfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf_flr_wq_destroy(cptpf);
+ cptpf_vfpf_mbox_destroy(cptpf);
+ module_put(THIS_MODULE);
+ cptpf->enabled_vfs = 0;
+
+ return 0;
+}
+
+static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+ int ret;
+
+ /* Initialize VF<=>PF mailbox */
+ ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
+ if (ret)
+ return ret;
+
+ ret = cptpf_flr_wq_init(cptpf, num_vfs);
+ if (ret)
+ goto destroy_mbox;
+ /* Register VF<=>PF mailbox interrupt */
+ ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
+ if (ret)
+ goto destroy_flr;
+
+ /* Get CPT HW capabilities using LOAD_FVC operation. */
+ ret = otx2_cpt_discover_eng_capabilities(cptpf);
+ if (ret)
+ goto disable_intr;
+
+ ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
+ if (ret)
+ goto disable_intr;
+
+ cptpf->enabled_vfs = num_vfs;
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ goto disable_intr;
+
+ dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
+
+ try_module_get(THIS_MODULE);
+ return num_vfs;
+
+disable_intr:
+ cptpf_unregister_vfpf_intr(cptpf, num_vfs);
+ cptpf->enabled_vfs = 0;
+destroy_flr:
+ cptpf_flr_wq_destroy(cptpf);
+destroy_mbox:
+ cptpf_vfpf_mbox_destroy(cptpf);
+ return ret;
+}
+
+static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (num_vfs > 0) {
+ return cptpf_sriov_enable(pdev, num_vfs);
+ } else {
+ return cptpf_sriov_disable(pdev);
+ }
+}
+
+static int otx2_cptpf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptpf_dev *cptpf;
+ int err;
+
+ cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
+ if (!cptpf)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map PF's configuration registers */
+ err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPT_DRV_NAME);
+ if (err) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptpf);
+ cptpf->pdev = pdev;
+
+ cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ /* Check if AF driver is up, otherwise defer probe */
+ err = cpt_is_pf_usable(cptpf);
+ if (err)
+ goto clear_drvdata;
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map AF-PF mailbox memory */
+ cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptpf->afpf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ err = -ENODEV;
+ goto clear_drvdata;
+ }
+ err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "Request for %d msix vectors failed\n",
+ RVU_PF_INT_VEC_CNT);
+ goto clear_drvdata;
+ }
+ /* Initialize AF-PF mailbox */
+ err = cptpf_afpf_mbox_init(cptpf);
+ if (err)
+ goto clear_drvdata;
+ /* Register mailbox interrupt */
+ err = cptpf_register_afpf_mbox_intr(cptpf);
+ if (err)
+ goto destroy_afpf_mbox;
+
+ cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
+ if (err)
+ goto unregister_intr;
+
+ /* Initialize engine groups */
+ err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
+ if (err)
+ goto unregister_intr;
+
+ err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
+ if (err)
+ goto cleanup_eng_grps;
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+unregister_intr:
+ cptpf_disable_afpf_mbox_intr(cptpf);
+destroy_afpf_mbox:
+ cptpf_afpf_mbox_destroy(cptpf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void otx2_cptpf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
+
+ if (!cptpf)
+ return;
+
+ cptpf_sriov_disable(pdev);
+ /* Delete sysfs entry created for kernel VF limits */
+ sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
+ /* Cleanup engine groups */
+ otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+ /* Disable AF-PF mailbox interrupt */
+ cptpf_disable_afpf_mbox_intr(cptpf);
+ /* Destroy AF-PF mbox */
+ cptpf_afpf_mbox_destroy(cptpf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cpt_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cpt_pci_driver = {
+ .name = OTX2_CPT_DRV_NAME,
+ .id_table = otx2_cpt_id_table,
+ .probe = otx2_cptpf_probe,
+ .remove = otx2_cptpf_remove,
+ .sriov_configure = otx2_cptpf_sriov_configure
+};
+
+module_pci_driver(otx2_cpt_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
new file mode 100644
index 000000000000..186f1c1190c1
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "rvu_reg.h"
+
+/*
+ * CPT PF driver version, It will be incremented by 1 for every feature
+ * addition in CPT mailbox messages.
+ */
+#define OTX2_CPT_PF_DRV_VERSION 0x1
+
+static int forward_to_af(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ struct mbox_msghdr *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg(&cptpf->afpf_mbox, 0, size);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ memcpy((uint8_t *)msg + sizeof(struct mbox_msghdr),
+ (uint8_t *)req + sizeof(struct mbox_msghdr), size);
+ msg->id = req->id;
+ msg->pcifunc = req->pcifunc;
+ msg->sig = req->sig;
+ msg->ver = req->ver;
+
+ otx2_mbox_msg_send(&cptpf->afpf_mbox, 0);
+ ret = otx2_mbox_wait_for_rsp(&cptpf->afpf_mbox, 0);
+ if (ret == -EIO) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX timeout.\n");
+ return ret;
+ } else if (ret) {
+ dev_err(&cptpf->pdev->dev, "RVU MBOX error: %d.\n", ret);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_caps_rsp *rsp;
+
+ rsp = (struct otx2_cpt_caps_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id,
+ sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_CAPS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
+ rsp->cpt_revision = cptpf->pdev->revision;
+ memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
+
+ return 0;
+}
+
+static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_egrp_num_msg *grp_req;
+ struct otx2_cpt_egrp_num_rsp *rsp;
+
+ grp_req = (struct otx2_cpt_egrp_num_msg *)req;
+ rsp = (struct otx2_cpt_egrp_num_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->eng_type = grp_req->eng_type;
+ rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ grp_req->eng_type);
+
+ return 0;
+}
+
+static int handle_msg_kvf_limits(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req)
+{
+ struct otx2_cpt_kvf_limits_rsp *rsp;
+
+ rsp = (struct otx2_cpt_kvf_limits_rsp *)
+ otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_GET_KVF_LIMITS;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = req->pcifunc;
+ rsp->kvf_limits = cptpf->kvf_limits;
+
+ return 0;
+}
+
+static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptvf_info *vf,
+ struct mbox_msghdr *req, int size)
+{
+ int err = 0;
+
+ /* Check if msg is valid, if not reply with an invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ switch (req->id) {
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ err = handle_msg_get_eng_grp_num(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_CAPS:
+ err = handle_msg_get_caps(cptpf, vf, req);
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ err = handle_msg_kvf_limits(cptpf, vf, req);
+ break;
+ default:
+ err = forward_to_af(cptpf, vf, req, size);
+ break;
+ }
+ return err;
+
+inval_msg:
+ otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
+ otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id);
+ return err;
+}
+
+irqreturn_t otx2_cptpf_vfpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ struct otx2_cptvf_info *vf;
+ int i, vf_idx;
+ u64 intr;
+
+ /*
+ * Check which VF has raised an interrupt and schedule
+ * corresponding work queue to process the messages
+ */
+ for (i = 0; i < 2; i++) {
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_VFPF_MBOX_INTX(i));
+
+ for (vf_idx = i * 64; vf_idx < cptpf->enabled_vfs; vf_idx++) {
+ vf = &cptpf->vf[vf_idx];
+ if (intr & (1ULL << vf->intr_idx)) {
+ queue_work(cptpf->vfpf_mbox_wq,
+ &vf->vfpf_mbox_work);
+ /* Clear the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM,
+ 0, RVU_PF_VFPF_MBOX_INTX(i),
+ BIT_ULL(vf->intr_idx));
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_cptvf_info *vf;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, i, err;
+
+ vf = container_of(work, struct otx2_cptvf_info, vfpf_mbox_work);
+ cptpf = vf->cptpf;
+ mbox = &cptpf->vfpf_mbox;
+ /* sync with mbox memory region */
+ smp_rmb();
+ mdev = &mbox->dev[vf->vf_id];
+ /* Process received mbox messages */
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < req_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+
+ /* Set which VF sent this message based on mbox IRQ */
+ msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
+ ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
+
+ err = cptpf_handle_vf_req(cptpf, vf, msg,
+ msg->next_msgoff - offset);
+ /*
+ * Behave as the AF, drop the msg if there is
+ * no memory, timeout handling also goes here
+ */
+ if (err == -ENOMEM || err == -EIO)
+ break;
+ offset = msg->next_msgoff;
+ }
+ /* Send mbox responses to VF */
+ if (mdev->num_msgs)
+ otx2_mbox_msg_send(mbox, vf->vf_id);
+}
+
+irqreturn_t otx2_cptpf_afpf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptpf_dev *cptpf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptpf->afpf_mbox_wq, &cptpf->afpf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT,
+ 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
+ struct mbox_msghdr *msg)
+{
+ struct device *dev = &cptpf->pdev->dev;
+ struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(dev, "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
+ RVU_PFVF_PF_MASK;
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
+ if (msg->rc) {
+ dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_rd_wr->is_write)
+ *rsp_rd_wr->ret_val = rsp_rd_wr->val;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ if (!msg->rc)
+ cptpf->lfs.are_lfs_attached = 0;
+ break;
+
+ default:
+ dev_err(dev,
+ "Unsupported msg %d received.\n", msg->id);
+ break;
+ }
+}
+
+static void forward_to_vf(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *msg,
+ int vf_id, int size)
+{
+ struct otx2_mbox *vfpf_mbox;
+ struct mbox_msghdr *fwd;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ vfpf_mbox = &cptpf->vfpf_mbox;
+ vf_id--;
+ if (vf_id >= cptpf->enabled_vfs) {
+ dev_err(&cptpf->pdev->dev,
+ "MBOX msg to unknown VF: %d >= %d\n",
+ vf_id, cptpf->enabled_vfs);
+ return;
+ }
+ if (msg->id == MBOX_MSG_VF_FLR)
+ return;
+
+ fwd = otx2_mbox_alloc_msg(vfpf_mbox, vf_id, size);
+ if (!fwd) {
+ dev_err(&cptpf->pdev->dev,
+ "Forwarding to VF%d failed.\n", vf_id);
+ return;
+ }
+ memcpy((uint8_t *)fwd + sizeof(struct mbox_msghdr),
+ (uint8_t *)msg + sizeof(struct mbox_msghdr), size);
+ fwd->id = msg->id;
+ fwd->pcifunc = msg->pcifunc;
+ fwd->sig = msg->sig;
+ fwd->ver = msg->ver;
+ fwd->rc = msg->rc;
+}
+
+/* Handle mailbox messages received from AF */
+void otx2_cptpf_afpf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptpf_dev *cptpf;
+ struct otx2_mbox *afpf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, vf_id, i;
+
+ cptpf = container_of(work, struct otx2_cptpf_dev, afpf_mbox_work);
+ afpf_mbox = &cptpf->afpf_mbox;
+ mdev = &afpf_mbox->dev[0];
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + afpf_mbox->rx_start);
+ offset = ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + afpf_mbox->rx_start +
+ offset);
+ vf_id = (msg->pcifunc >> RVU_PFVF_FUNC_SHIFT) &
+ RVU_PFVF_FUNC_MASK;
+ if (vf_id > 0)
+ forward_to_vf(cptpf, msg, vf_id,
+ msg->next_msgoff - offset);
+ else
+ process_afpf_mbox_msg(cptpf, msg);
+
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(afpf_mbox, 0);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
new file mode 100644
index 000000000000..1dc3ba298139
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/ctype.h>
+#include <linux/firmware.h>
+#include "otx2_cptpf_ucode.h"
+#include "otx2_cpt_common.h"
+#include "otx2_cptpf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cpt_reqmgr.h"
+#include "rvu_reg.h"
+
+#define CSR_DELAY 30
+
+#define LOADFVC_RLEN 8
+#define LOADFVC_MAJOR_OP 0x01
+#define LOADFVC_MINOR_OP 0x08
+
+struct fw_info_t {
+ struct list_head ucodes;
+};
+
+static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_bitmap bmap = { {0} };
+ bool found = false;
+ int i;
+
+ if (eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(dev, "unsupported number of engines %d on octeontx2\n",
+ eng_grp->g->engs_num);
+ return bmap;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (eng_grp->engs[i].type) {
+ bitmap_or(bmap.bits, bmap.bits,
+ eng_grp->engs[i].bmap,
+ eng_grp->g->engs_num);
+ bmap.size = eng_grp->g->engs_num;
+ found = true;
+ }
+ }
+
+ if (!found)
+ dev_err(dev, "No engines reserved for engine group %d\n",
+ eng_grp->idx);
+ return bmap;
+}
+
+static int is_eng_type(int val, int eng_type)
+{
+ return val & (1 << eng_type);
+}
+
+static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ if (eng_grp->ucode[1].type)
+ return true;
+ else
+ return false;
+}
+
+static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
+ const char *filename)
+{
+ strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+}
+
+static char *get_eng_type_str(int eng_type)
+{
+ char *str = "unknown";
+
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ str = "SE";
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ str = "IE";
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ str = "AE";
+ break;
+ }
+ return str;
+}
+
+static char *get_ucode_type_str(int ucode_type)
+{
+ char *str = "unknown";
+
+ switch (ucode_type) {
+ case (1 << OTX2_CPT_SE_TYPES):
+ str = "SE";
+ break;
+
+ case (1 << OTX2_CPT_IE_TYPES):
+ str = "IE";
+ break;
+
+ case (1 << OTX2_CPT_AE_TYPES):
+ str = "AE";
+ break;
+
+ case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
+ str = "SE+IPSEC";
+ break;
+ }
+ return str;
+}
+
+static int get_ucode_type(struct device *dev,
+ struct otx2_cpt_ucode_hdr *ucode_hdr,
+ int *ucode_type)
+{
+ struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
+ char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
+ char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ struct pci_dev *pdev = cptpf->pdev;
+ int i, val = 0;
+ u8 nn;
+
+ strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ for (i = 0; i < strlen(tmp_ver_str); i++)
+ tmp_ver_str[i] = tolower(tmp_ver_str[i]);
+
+ sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
+ return -EINVAL;
+
+ nn = ucode_hdr->ver_num.nn;
+ if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
+ nn == OTX2_CPT_SE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_SE_TYPES;
+ if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
+ nn == OTX2_CPT_IE_UC_TYPE3))
+ val |= 1 << OTX2_CPT_IE_TYPES;
+ if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
+ nn == OTX2_CPT_AE_UC_TYPE)
+ val |= 1 << OTX2_CPT_AE_TYPES;
+
+ *ucode_type = val;
+
+ if (!val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
+ dma_addr_t dma_addr)
+{
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_UCODE_BASE(eng),
+ (u64)dma_addr);
+}
+
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_engs_rsvd *engs;
+ dma_addr_t dma_addr;
+ int i, bit, ret;
+
+ /* Set PF number for microcode fetches */
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_PF_FUNC,
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ dma_addr = engs->ucode->dma;
+
+ /*
+ * Set UCODE_BASE only for the cores which are not used,
+ * other cores should have already valid UCODE_BASE set
+ */
+ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
+ if (!eng_grp->g->eng_ref_cnt[bit]) {
+ ret = __write_ucode_base(cptpf, bit, dma_addr);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int i, timeout = 10;
+ int busy, ret;
+ u64 reg = 0;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Detach the cores from group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & (1ull << eng_grp->idx)) {
+ eng_grp->g->eng_ref_cnt[i]--;
+ reg &= ~(1ull << eng_grp->idx);
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores only if they are not used anymore */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ if (!eng_grp->g->eng_ref_cnt[i]) {
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ u64 reg = 0;
+ int i, ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ /* Attach the cores to the group */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), &reg);
+ if (ret)
+ return ret;
+
+ if (!(reg & (1ull << eng_grp->idx))) {
+ eng_grp->g->eng_ref_cnt[i]++;
+ reg |= 1ull << eng_grp->idx;
+
+ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), reg);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Enable the cores */
+ for_each_set_bit(i, bmap.bits, bmap.size) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1);
+ if (ret)
+ return ret;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+
+ return ret;
+}
+
+static int load_fw(struct device *dev, struct fw_info_t *fw_info,
+ char *filename)
+{
+ struct otx2_cpt_ucode_hdr *ucode_hdr;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int ucode_type, ucode_size;
+ int ret;
+
+ uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
+ if (!uc_info)
+ return -ENOMEM;
+
+ ret = request_firmware(&uc_info->fw, filename, dev);
+ if (ret)
+ goto free_uc_info;
+
+ ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ if (ret)
+ goto release_fw;
+
+ ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ if (!ucode_size) {
+ dev_err(dev, "Ucode %s invalid size\n", filename);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ set_ucode_filename(&uc_info->ucode, filename);
+ memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_num = ucode_hdr->ver_num;
+ uc_info->ucode.type = ucode_type;
+ uc_info->ucode.size = ucode_size;
+ list_add_tail(&uc_info->list, &fw_info->ucodes);
+
+ return 0;
+
+release_fw:
+ release_firmware(uc_info->fw);
+free_uc_info:
+ kfree(uc_info);
+ return ret;
+}
+
+static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr, *temp;
+
+ if (!fw_info)
+ return;
+
+ list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
+ list_del(&curr->list);
+ release_firmware(curr->fw);
+ kfree(curr);
+ }
+}
+
+static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
+ int ucode_type)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ if (!is_eng_type(curr->ucode.type, ucode_type))
+ continue;
+
+ return curr;
+ }
+ return NULL;
+}
+
+static void print_uc_info(struct fw_info_t *fw_info)
+{
+ struct otx2_cpt_uc_info_t *curr;
+
+ list_for_each_entry(curr, &fw_info->ucodes, list) {
+ pr_debug("Ucode filename %s\n", curr->ucode.filename);
+ pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
+ pr_debug("Ucode version %d.%d.%d.%d\n",
+ curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
+ curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
+ pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
+ get_ucode_type_str(curr->ucode.type));
+ pr_debug("Ucode size %d\n", curr->ucode.size);
+ pr_debug("Ucode ptr %p\n", curr->fw->data);
+ }
+}
+
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+{
+ char filename[OTX2_CPT_NAME_LENGTH];
+ char eng_type[8] = {0};
+ int ret, e, i;
+
+ INIT_LIST_HEAD(&fw_info->ucodes);
+
+ for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
+ strcpy(eng_type, get_eng_type_str(e));
+ for (i = 0; i < strlen(eng_type); i++)
+ eng_type[i] = tolower(eng_type[i]);
+
+ snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
+ pdev->revision, eng_type);
+ /* Request firmware for each engine type */
+ ret = load_fw(&pdev->dev, fw_info, filename);
+ if (ret)
+ goto release_fw;
+ }
+ print_uc_info(fw_info);
+ return 0;
+
+release_fw:
+ cpt_ucode_release_fw(fw_info);
+ return ret;
+}
+
+static struct otx2_cpt_engs_rsvd *find_engines_by_type(
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ if (eng_grp->engs[i].type == eng_type)
+ return &eng_grp->engs[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
+ int eng_type)
+{
+ struct otx2_cpt_engs_rsvd *engs;
+
+ engs = find_engines_by_type(eng_grp, eng_type);
+
+ return (engs != NULL ? 1 : 0);
+}
+
+static int update_engines_avail_count(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs, int val)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail->se_cnt += val;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail->ie_cnt += val;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail->ae_cnt += val;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int update_engines_offset(struct device *dev,
+ struct otx2_cpt_engs_available *avail,
+ struct otx2_cpt_engs_rsvd *engs)
+{
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ engs->offset = 0;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ engs->offset = avail->max_se_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int release_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type)
+ continue;
+
+ if (grp->engs[i].count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail,
+ &grp->engs[i],
+ grp->engs[i].count);
+ if (ret)
+ return ret;
+ }
+
+ grp->engs[i].type = 0;
+ grp->engs[i].count = 0;
+ grp->engs[i].offset = 0;
+ grp->engs[i].ucode = NULL;
+ bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int do_reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs)
+{
+ struct otx2_cpt_engs_rsvd *engs = NULL;
+ int i, ret;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!grp->engs[i].type) {
+ engs = &grp->engs[i];
+ break;
+ }
+ }
+
+ if (!engs)
+ return -ENOMEM;
+
+ engs->type = req_engs->type;
+ engs->count = req_engs->count;
+
+ ret = update_engines_offset(dev, &grp->g->avail, engs);
+ if (ret)
+ return ret;
+
+ if (engs->count > 0) {
+ ret = update_engines_avail_count(dev, &grp->g->avail, engs,
+ -engs->count);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_engines_availability(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_eng)
+{
+ int avail_cnt = 0;
+
+ switch (req_eng->type) {
+ case OTX2_CPT_SE_TYPES:
+ avail_cnt = grp->g->avail.se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ avail_cnt = grp->g->avail.ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ avail_cnt = grp->g->avail.ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", req_eng->type);
+ return -EINVAL;
+ }
+
+ if (avail_cnt < req_eng->count) {
+ dev_err(dev,
+ "Error available %s engines %d < than requested %d\n",
+ get_eng_type_str(req_eng->type),
+ avail_cnt, req_eng->count);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int reserve_engines(struct device *dev,
+ struct otx2_cpt_eng_grp_info *grp,
+ struct otx2_cpt_engines *req_engs, int ucodes_cnt)
+{
+ int i, ret = 0;
+
+ /* Validate if a number of requested engines are available */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = check_engines_availability(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Reserve requested engines for this engine group */
+ for (i = 0; i < ucodes_cnt; i++) {
+ ret = do_reserve_engines(dev, grp, &req_engs[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
+{
+ if (ucode->va) {
+ dma_free_coherent(dev, ucode->size, ucode->va, ucode->dma);
+ ucode->va = NULL;
+ ucode->dma = 0;
+ ucode->size = 0;
+ }
+
+ memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
+ memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
+ set_ucode_filename(ucode, "");
+ ucode->type = 0;
+}
+
+static int copy_ucode_to_dma_mem(struct device *dev,
+ struct otx2_cpt_ucode *ucode,
+ const u8 *ucode_data)
+{
+ u32 i;
+
+ /* Allocate DMAable space */
+ ucode->va = dma_alloc_coherent(dev, ucode->size, &ucode->dma,
+ GFP_KERNEL);
+ if (!ucode->va)
+ return -ENOMEM;
+
+ memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
+ ucode->size);
+
+ /* Byte swap 64-bit */
+ for (i = 0; i < (ucode->size / 8); i++)
+ cpu_to_be64s(&((u64 *)ucode->va)[i]);
+ /* Ucode needs 16-bit swap */
+ for (i = 0; i < (ucode->size / 2); i++)
+ cpu_to_be16s(&((u16 *)ucode->va)[i]);
+ return 0;
+}
+
+static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int ret;
+
+ /* Point microcode to each core of the group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Attach the cores to the group and enable them */
+ ret = cpt_attach_and_enable_cores(eng_grp, obj);
+
+ return ret;
+}
+
+static int disable_eng_grp(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ int i, ret;
+
+ /* Disable all engines used by this group */
+ ret = cpt_detach_and_disable_cores(eng_grp, obj);
+ if (ret)
+ return ret;
+
+ /* Unload ucode used by this engine group */
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ if (!eng_grp->engs[i].type)
+ continue;
+
+ eng_grp->engs[i].ucode = &eng_grp->ucode[0];
+ }
+
+ /* Clear UCODE_BASE register for each engine used by this group */
+ ret = cpt_set_ucode_base(eng_grp, obj);
+
+ return ret;
+}
+
+static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
+ struct otx2_cpt_eng_grp_info *src_grp)
+{
+ /* Setup fields for engine group which is mirrored */
+ src_grp->mirror.is_ena = false;
+ src_grp->mirror.idx = 0;
+ src_grp->mirror.ref_count++;
+
+ /* Setup fields for mirroring engine group */
+ dst_grp->mirror.is_ena = true;
+ dst_grp->mirror.idx = src_grp->idx;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
+{
+ struct otx2_cpt_eng_grp_info *src_grp;
+
+ if (!dst_grp->mirror.is_ena)
+ return;
+
+ src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
+
+ src_grp->mirror.ref_count--;
+ dst_grp->mirror.is_ena = false;
+ dst_grp->mirror.idx = 0;
+ dst_grp->mirror.ref_count = 0;
+}
+
+static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
+ struct otx2_cpt_engines *engs, int engs_cnt)
+{
+ struct otx2_cpt_engs_rsvd *mirrored_engs;
+ int i;
+
+ for (i = 0; i < engs_cnt; i++) {
+ mirrored_engs = find_engines_by_type(mirror_eng_grp,
+ engs[i].type);
+ if (!mirrored_engs)
+ continue;
+
+ /*
+ * If mirrored group has this type of engines attached then
+ * there are 3 scenarios possible:
+ * 1) mirrored_engs.count == engs[i].count then all engines
+ * from mirrored engine group will be shared with this engine
+ * group
+ * 2) mirrored_engs.count > engs[i].count then only a subset of
+ * engines from mirrored engine group will be shared with this
+ * engine group
+ * 3) mirrored_engs.count < engs[i].count then all engines
+ * from mirrored engine group will be shared with this group
+ * and additional engines will be reserved for exclusively use
+ * by this engine group
+ */
+ engs[i].count -= mirrored_engs->count;
+ }
+}
+
+static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
+ struct otx2_cpt_eng_grp_info *grp)
+{
+ struct otx2_cpt_eng_grps *eng_grps = grp->g;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ continue;
+ if (eng_grps->grp[i].ucode[0].type &&
+ eng_grps->grp[i].ucode[1].type)
+ continue;
+ if (grp->idx == i)
+ continue;
+ if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
+ grp->ucode[0].ver_str,
+ OTX2_CPT_UCODE_VER_STR_SZ))
+ return &eng_grps->grp[i];
+ }
+
+ return NULL;
+}
+
+static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ if (!eng_grps->grp[i].is_enabled)
+ return &eng_grps->grp[i];
+ }
+ return NULL;
+}
+
+static int eng_grp_update_masks(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
+ struct otx2_cpt_bitmap tmp_bmap = { {0} };
+ int i, j, cnt, max_cnt;
+ int bit;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+ if (engs->count <= 0)
+ continue;
+
+ switch (engs->type) {
+ case OTX2_CPT_SE_TYPES:
+ max_cnt = eng_grp->g->avail.max_se_cnt;
+ break;
+
+ case OTX2_CPT_IE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ie_cnt;
+ break;
+
+ case OTX2_CPT_AE_TYPES:
+ max_cnt = eng_grp->g->avail.max_ae_cnt;
+ break;
+
+ default:
+ dev_err(dev, "Invalid engine type %d\n", engs->type);
+ return -EINVAL;
+ }
+
+ cnt = engs->count;
+ WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
+ bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
+ for (j = engs->offset; j < engs->offset + max_cnt; j++) {
+ if (!eng_grp->g->eng_ref_cnt[j]) {
+ bitmap_set(tmp_bmap.bits, j, 1);
+ cnt--;
+ if (!cnt)
+ break;
+ }
+ }
+
+ if (cnt)
+ return -ENOSPC;
+
+ bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
+ }
+
+ if (!eng_grp->mirror.is_ena)
+ return 0;
+
+ for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
+ engs = &eng_grp->engs[i];
+ if (!engs->type)
+ continue;
+
+ mirrored_engs = find_engines_by_type(
+ &eng_grp->g->grp[eng_grp->mirror.idx],
+ engs->type);
+ WARN_ON(!mirrored_engs && engs->count <= 0);
+ if (!mirrored_engs)
+ continue;
+
+ bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ if (engs->count < 0) {
+ bit = find_first_bit(mirrored_engs->bmap,
+ eng_grp->g->engs_num);
+ bitmap_clear(tmp_bmap.bits, bit, -engs->count);
+ }
+ bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
+ eng_grp->g->engs_num);
+ }
+ return 0;
+}
+
+static int delete_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ int ret;
+
+ if (!eng_grp->is_enabled)
+ return 0;
+
+ if (eng_grp->mirror.ref_count)
+ return -EINVAL;
+
+ /* Removing engine group mirroring if enabled */
+ remove_eng_grp_mirroring(eng_grp);
+
+ /* Disable engine group */
+ ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
+ if (ret)
+ return ret;
+
+ /* Release all engines held by this engine group */
+ ret = release_engines(dev, eng_grp);
+ if (ret)
+ return ret;
+
+ eng_grp->is_enabled = false;
+
+ return 0;
+}
+
+static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
+{
+ struct otx2_cpt_ucode *ucode;
+
+ if (eng_grp->mirror.is_ena)
+ ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
+ else
+ ucode = &eng_grp->ucode[0];
+ WARN_ON(!eng_grp->engs[0].type);
+ eng_grp->engs[0].ucode = ucode;
+
+ if (eng_grp->engs[1].type) {
+ if (is_2nd_ucode_used(eng_grp))
+ eng_grp->engs[1].ucode = &eng_grp->ucode[1];
+ else
+ eng_grp->engs[1].ucode = ucode;
+ }
+}
+
+static int create_engine_group(struct device *dev,
+ struct otx2_cpt_eng_grps *eng_grps,
+ struct otx2_cpt_engines *engs, int ucodes_cnt,
+ void *ucode_data[], int is_print)
+{
+ struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
+ struct otx2_cpt_eng_grp_info *eng_grp;
+ struct otx2_cpt_uc_info_t *uc_info;
+ int i, ret = 0;
+
+ /* Find engine group which is not used */
+ eng_grp = find_unused_eng_grp(eng_grps);
+ if (!eng_grp) {
+ dev_err(dev, "Error all engine groups are being used\n");
+ return -ENOSPC;
+ }
+ /* Load ucode */
+ for (i = 0; i < ucodes_cnt; i++) {
+ uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
+ eng_grp->ucode[i] = uc_info->ucode;
+ ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
+ uc_info->fw->data);
+ if (ret)
+ goto unload_ucode;
+ }
+
+ /* Check if this group mirrors another existing engine group */
+ mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
+ if (mirrored_eng_grp) {
+ /* Setup mirroring */
+ setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
+
+ /*
+ * Update count of requested engines because some
+ * of them might be shared with mirrored group
+ */
+ update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
+ }
+ ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
+ if (ret)
+ goto unload_ucode;
+
+ /* Update ucode pointers used by engines */
+ update_ucode_ptrs(eng_grp);
+
+ /* Update engine masks used by this group */
+ ret = eng_grp_update_masks(dev, eng_grp);
+ if (ret)
+ goto release_engs;
+
+ /* Enable engine group */
+ ret = enable_eng_grp(eng_grp, eng_grps->obj);
+ if (ret)
+ goto release_engs;
+
+ /*
+ * If this engine group mirrors another engine group
+ * then we need to unload ucode as we will use ucode
+ * from mirrored engine group
+ */
+ if (eng_grp->mirror.is_ena)
+ ucode_unload(dev, &eng_grp->ucode[0]);
+
+ eng_grp->is_enabled = true;
+
+ if (!is_print)
+ return 0;
+
+ if (mirrored_eng_grp)
+ dev_info(dev,
+ "Engine_group%d: reuse microcode %s from group %d\n",
+ eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
+ mirrored_eng_grp->idx);
+ else
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[0].ver_str);
+ if (is_2nd_ucode_used(eng_grp))
+ dev_info(dev, "Engine_group%d: microcode loaded %s\n",
+ eng_grp->idx, eng_grp->ucode[1].ver_str);
+
+ return 0;
+
+release_engs:
+ release_engines(dev, eng_grp);
+unload_ucode:
+ ucode_unload(dev, &eng_grp->ucode[0]);
+ ucode_unload(dev, &eng_grp->ucode[1]);
+ return ret;
+}
+
+static void delete_engine_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ int i;
+
+ /* First delete all mirroring engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ if (eng_grps->grp[i].mirror.is_ena)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+
+ /* Delete remaining engine groups */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
+ delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
+}
+
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
+{
+
+ int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ struct otx2_cpt_eng_grp_info *grp;
+ int i;
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ if (!grp->is_enabled)
+ continue;
+
+ if (eng_type == OTX2_CPT_SE_TYPES) {
+ if (eng_grp_has_eng_type(grp, eng_type) &&
+ !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
+ eng_grp_num = i;
+ break;
+ }
+ } else {
+ if (eng_grp_has_eng_type(grp, eng_type)) {
+ eng_grp_num = i;
+ break;
+ }
+ }
+ }
+ return eng_grp_num;
+}
+
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ /*
+ * We don't create engine groups if it was already
+ * made (when user enabled VFs for the first time)
+ */
+ if (eng_grps->is_grps_created)
+ return 0;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Create engine group with SE engines for kernel
+ * crypto functionality (symmetric crypto)
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto release_fw;
+
+ /*
+ * Create engine group with SE+IE engines for IPSec.
+ * All SE engines will be shared with engine group 0.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+
+ if (uc_info[1] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = eng_grps->avail.max_se_cnt;
+ engs[1].type = OTX2_CPT_IE_TYPES;
+ engs[1].count = eng_grps->avail.max_ie_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ /*
+ * Create engine group with AE engines for asymmetric
+ * crypto functionality.
+ */
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = eng_grps->avail.max_ae_cnt;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 1);
+ if (ret)
+ goto delete_eng_grp;
+
+ eng_grps->is_grps_created = true;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int i, ret, busy, total_cores;
+ int timeout = 10;
+ u64 reg = 0;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ /* Disengage the cores from groups */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL2(i), 0x0);
+ if (ret)
+ return ret;
+
+ cptpf->eng_grps.eng_ref_cnt[i] = 0;
+ }
+ ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ if (ret)
+ return ret;
+
+ /* Wait for cores to become idle */
+ do {
+ busy = 0;
+ usleep_range(10000, 20000);
+ if (timeout-- < 0)
+ return -EBUSY;
+
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
+ cptpf->pdev,
+ CPT_AF_EXEX_STS(i), &reg);
+ if (ret)
+ return ret;
+
+ if (reg & 0x1) {
+ busy = 1;
+ break;
+ }
+ }
+ } while (busy);
+
+ /* Disable the cores */
+ for (i = 0; i < total_cores; i++) {
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x0);
+ if (ret)
+ return ret;
+ }
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
+
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j;
+
+ delete_engine_grps(pdev, eng_grps);
+ /* Release memory */
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ kfree(grp->engs[j].bmap);
+ grp->engs[j].bmap = NULL;
+ }
+ }
+}
+
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_eng_grp_info *grp;
+ int i, j, ret;
+
+ eng_grps->obj = pci_get_drvdata(pdev);
+ eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
+ eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
+ eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
+
+ eng_grps->engs_num = eng_grps->avail.max_se_cnt +
+ eng_grps->avail.max_ie_cnt +
+ eng_grps->avail.max_ae_cnt;
+ if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
+ dev_err(&pdev->dev,
+ "Number of engines %d > than max supported %d\n",
+ eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
+ ret = -EINVAL;
+ goto cleanup_eng_grps;
+ }
+
+ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
+ grp = &eng_grps->grp[i];
+ grp->g = eng_grps;
+ grp->idx = i;
+
+ for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
+ grp->engs[j].bmap =
+ kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
+ sizeof(long), GFP_KERNEL);
+ if (!grp->engs[j].bmap) {
+ ret = -ENOMEM;
+ goto cleanup_eng_grps;
+ }
+ }
+ }
+ return 0;
+
+cleanup_eng_grps:
+ otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
+ return ret;
+}
+
+static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps)
+{
+ struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = { };
+ struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct fw_info_t fw_info;
+ int ret;
+
+ ret = cpt_ucode_load_fw(pdev, &fw_info);
+ if (ret)
+ return ret;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for AE\n");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ engs[0].type = OTX2_CPT_AE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto release_fw;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for SE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_SE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
+ if (uc_info[0] == NULL) {
+ dev_err(&pdev->dev, "Unable to find firmware for IE\n");
+ ret = -EINVAL;
+ goto delete_eng_grp;
+ }
+ engs[0].type = OTX2_CPT_IE_TYPES;
+ engs[0].count = 2;
+
+ ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
+ (void **) uc_info, 0);
+ if (ret)
+ goto delete_eng_grp;
+
+ cpt_ucode_release_fw(&fw_info);
+ return 0;
+
+delete_eng_grp:
+ delete_engine_grps(pdev, eng_grps);
+release_fw:
+ cpt_ucode_release_fw(&fw_info);
+ return ret;
+}
+
+/*
+ * Get CPT HW capabilities using LOAD_FVC operation.
+ */
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
+{
+ struct otx2_cptlfs_info *lfs = &cptpf->lfs;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_opcode opcode;
+ union otx2_cpt_res_s *result;
+ union otx2_cpt_inst_s inst;
+ dma_addr_t rptr_baddr;
+ struct pci_dev *pdev;
+ u32 len, compl_rlen;
+ int ret, etype;
+ void *rptr;
+
+ /*
+ * We don't get capabilities if it was already done
+ * (when user enabled VFs for the first time)
+ */
+ if (cptpf->is_eng_caps_discovered)
+ return 0;
+
+ pdev = cptpf->pdev;
+ /*
+ * Create engine groups for each type to submit LOAD_FVC op and
+ * get engine's capabilities.
+ */
+ ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
+ if (ret)
+ goto delete_grps;
+
+ lfs->pdev = pdev;
+ lfs->reg_base = cptpf->reg_base;
+ lfs->mbox = &cptpf->afpf_mbox;
+ ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
+ OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret)
+ goto delete_grps;
+
+ compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
+ len = compl_rlen + LOADFVC_RLEN;
+
+ result = kzalloc(len, GFP_KERNEL);
+ if (!result) {
+ ret = -ENOMEM;
+ goto lf_cleanup;
+ }
+ rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
+ dev_err(&pdev->dev, "DMA mapping failed\n");
+ ret = -EFAULT;
+ goto free_result;
+ }
+ rptr = (u8 *)result + compl_rlen;
+
+ /* Fill in the command */
+ opcode.s.major = LOADFVC_MAJOR_OP;
+ opcode.s.minor = LOADFVC_MINOR_OP;
+
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses */
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = 0;
+ iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.cptr.u = 0;
+
+ for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+ iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
+ etype);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+
+ while (result->s.compcode == OTX2_CPT_COMPLETION_CODE_INIT)
+ cpu_relax();
+
+ cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
+ }
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+ cptpf->is_eng_caps_discovered = true;
+
+free_result:
+ kfree(result);
+lf_cleanup:
+ otx2_cptlf_shutdown(&cptpf->lfs);
+delete_grps:
+ delete_engine_grps(pdev, &cptpf->eng_grps);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
new file mode 100644
index 000000000000..6b0d432de0af
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTPF_UCODE_H
+#define __OTX2_CPTPF_UCODE_H
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include "otx2_cpt_hw_types.h"
+#include "otx2_cpt_common.h"
+
+/*
+ * On OcteonTX2 platform IPSec ucode can use both IE and SE engines therefore
+ * IE and SE engines can be attached to the same engine group.
+ */
+#define OTX2_CPT_MAX_ETYPES_PER_GRP 2
+
+/* CPT ucode signature size */
+#define OTX2_CPT_UCODE_SIGN_LEN 256
+
+/* Microcode version string length */
+#define OTX2_CPT_UCODE_VER_STR_SZ 44
+
+/* Maximum number of supported engines/cores on OcteonTX2 platform */
+#define OTX2_CPT_MAX_ENGINES 128
+
+#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
+
+/* Microcode types */
+enum otx2_cpt_ucode_type {
+ OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
+ OTX2_CPT_SE_UC_TYPE1 = 20,/* SE-MAIN - combination of 21 and 22 */
+ OTX2_CPT_SE_UC_TYPE2 = 21,/* Fast Path IPSec + AirCrypto */
+ OTX2_CPT_SE_UC_TYPE3 = 22,/*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Feature IPSec + AirCrypto + Kasumi
+ */
+ OTX2_CPT_IE_UC_TYPE1 = 30, /* IE-MAIN - combination of 31 and 32 */
+ OTX2_CPT_IE_UC_TYPE2 = 31, /* Fast Path IPSec */
+ OTX2_CPT_IE_UC_TYPE3 = 32, /*
+ * Hash + HMAC + FlexiCrypto + RNG +
+ * Full Future IPSec
+ */
+};
+
+struct otx2_cpt_bitmap {
+ unsigned long bits[OTX2_CPT_ENGS_BITMASK_LEN];
+ int size;
+};
+
+struct otx2_cpt_engines {
+ int type;
+ int count;
+};
+
+/* Microcode version number */
+struct otx2_cpt_ucode_ver_num {
+ u8 nn;
+ u8 xx;
+ u8 yy;
+ u8 zz;
+};
+
+struct otx2_cpt_ucode_hdr {
+ struct otx2_cpt_ucode_ver_num ver_num;
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
+ __be32 code_length;
+ u32 padding[3];
+};
+
+struct otx2_cpt_ucode {
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ * ucode version in readable
+ * format
+ */
+ struct otx2_cpt_ucode_ver_num ver_num;/* ucode version number */
+ char filename[OTX2_CPT_NAME_LENGTH];/* ucode filename */
+ dma_addr_t dma; /* phys address of ucode image */
+ void *va; /* virt address of ucode image */
+ u32 size; /* ucode image size */
+ int type; /* ucode image type SE, IE, AE or SE+IE */
+};
+
+struct otx2_cpt_uc_info_t {
+ struct list_head list;
+ struct otx2_cpt_ucode ucode;/* microcode information */
+ const struct firmware *fw;
+};
+
+/* Maximum and current number of engines available for all engine groups */
+struct otx2_cpt_engs_available {
+ int max_se_cnt;
+ int max_ie_cnt;
+ int max_ae_cnt;
+ int se_cnt;
+ int ie_cnt;
+ int ae_cnt;
+};
+
+/* Engines reserved to an engine group */
+struct otx2_cpt_engs_rsvd {
+ int type; /* engine type */
+ int count; /* number of engines attached */
+ int offset; /* constant offset of engine type in the bitmap */
+ unsigned long *bmap; /* attached engines bitmap */
+ struct otx2_cpt_ucode *ucode; /* ucode used by these engines */
+};
+
+struct otx2_cpt_mirror_info {
+ int is_ena; /*
+ * is mirroring enabled, it is set only for engine
+ * group which mirrors another engine group
+ */
+ int idx; /*
+ * index of engine group which is mirrored by this
+ * group, set only for engine group which mirrors
+ * another group
+ */
+ int ref_count; /*
+ * number of times this engine group is mirrored by
+ * other groups, this is set only for engine group
+ * which is mirrored by other group(s)
+ */
+};
+
+struct otx2_cpt_eng_grp_info {
+ struct otx2_cpt_eng_grps *g; /* pointer to engine_groups structure */
+ /* engines attached */
+ struct otx2_cpt_engs_rsvd engs[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* ucodes information */
+ struct otx2_cpt_ucode ucode[OTX2_CPT_MAX_ETYPES_PER_GRP];
+ /* engine group mirroring information */
+ struct otx2_cpt_mirror_info mirror;
+ int idx; /* engine group index */
+ bool is_enabled; /*
+ * is engine group enabled, engine group is enabled
+ * when it has engines attached and ucode loaded
+ */
+};
+
+struct otx2_cpt_eng_grps {
+ struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
+ struct otx2_cpt_engs_available avail;
+ void *obj; /* device specific data */
+ int engs_num; /* total number of engines supported */
+ u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
+ bool is_grps_created; /* Is the engine groups are already created */
+};
+struct otx2_cptpf_dev;
+int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
+ struct otx2_cpt_eng_grps *eng_grps);
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
+int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
+int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf);
+
+#endif /* __OTX2_CPTPF_UCODE_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
new file mode 100644
index 000000000000..4f0a169fddbd
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPTVF_H
+#define __OTX2_CPTVF_H
+
+#include "mbox.h"
+#include "otx2_cptlf.h"
+
+struct otx2_cptvf_dev {
+ void __iomem *reg_base; /* Register start address */
+ void __iomem *pfvf_mbox_base; /* PF-VF mbox start address */
+ struct pci_dev *pdev; /* PCI device handle */
+ struct otx2_cptlfs_info lfs; /* CPT LFs attached to this VF */
+ u8 vf_id; /* Virtual function index */
+
+ /* PF <=> VF mbox */
+ struct otx2_mbox pfvf_mbox;
+ struct work_struct pfvf_mbox_work;
+ struct workqueue_struct *pfvf_mbox_wq;
+};
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
+
+#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
new file mode 100644
index 000000000000..a72723455df7
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -0,0 +1,1758 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/cryptd.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/xts.h>
+#include <crypto/gcm.h>
+#include <crypto/scatterwalk.h>
+#include <linux/rtnetlink.h>
+#include <linux/sort.h>
+#include <linux/module.h>
+#include "otx2_cptvf.h"
+#include "otx2_cptvf_algs.h"
+#include "otx2_cpt_reqmgr.h"
+
+/* Size of salt in AES GCM mode */
+#define AES_GCM_SALT_SIZE 4
+/* Size of IV in AES GCM mode */
+#define AES_GCM_IV_SIZE 8
+/* Size of ICV (Integrity Check Value) in AES GCM mode */
+#define AES_GCM_ICV_SIZE 16
+/* Offset of IV in AES GCM mode */
+#define AES_GCM_IV_OFFSET 8
+#define CONTROL_WORD_LEN 8
+#define KEY2_OFFSET 48
+#define DMA_MODE_FLAG(dma_mode) \
+ (((dma_mode) == OTX2_CPT_DMA_MODE_SG) ? (1 << 7) : 0)
+
+/* Truncated SHA digest size */
+#define SHA1_TRUNC_DIGEST_SIZE 12
+#define SHA256_TRUNC_DIGEST_SIZE 16
+#define SHA384_TRUNC_DIGEST_SIZE 24
+#define SHA512_TRUNC_DIGEST_SIZE 32
+
+static DEFINE_MUTEX(mutex);
+static int is_crypto_registered;
+
+struct cpt_device_desc {
+ struct pci_dev *dev;
+ int num_queues;
+};
+
+struct cpt_device_table {
+ atomic_t count;
+ struct cpt_device_desc desc[OTX2_CPT_MAX_LFS_NUM];
+};
+
+static struct cpt_device_table se_devices = {
+ .count = ATOMIC_INIT(0)
+};
+
+static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
+{
+ int count;
+
+ count = atomic_read(&se_devices.count);
+ if (count < 1)
+ return -ENODEV;
+
+ *cpu_num = get_cpu();
+ /*
+ * On OcteonTX2 platform CPT instruction queue is bound to each
+ * local function LF, in turn LFs can be attached to PF
+ * or VF therefore we always use first device. We get maximum
+ * performance if one CPT queue is available for each cpu
+ * otherwise CPT queues need to be shared between cpus.
+ */
+ if (*cpu_num >= se_devices.desc[0].num_queues)
+ *cpu_num %= se_devices.desc[0].num_queues;
+ *pdev = se_devices.desc[0].dev;
+
+ put_cpu();
+
+ return 0;
+}
+
+static inline int validate_hmac_cipher_null(struct otx2_cpt_req_info *cpt_req)
+{
+ struct otx2_cpt_req_ctx *rctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+
+ req = container_of(cpt_req->areq, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ rctx = aead_request_ctx(req);
+ if (memcmp(rctx->fctx.hmac.s.hmac_calc,
+ rctx->fctx.hmac.s.hmac_recv,
+ crypto_aead_authsize(tfm)) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct otx2_cpt_req_info *cpt_req;
+ struct pci_dev *pdev;
+
+ if (inst_info) {
+ cpt_req = inst_info->req;
+ if (!status) {
+ /*
+ * When selected cipher is NULL we need to manually
+ * verify whether calculated hmac value matches
+ * received hmac value
+ */
+ if (cpt_req->req_type ==
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ &&
+ !cpt_req->is_enc)
+ status = validate_hmac_cipher_null(cpt_req);
+ }
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ if (areq)
+ areq->complete(areq, status);
+}
+
+static void output_iv_copyback(struct crypto_async_request *areq)
+{
+ struct otx2_cpt_req_info *req_info;
+ struct otx2_cpt_req_ctx *rctx;
+ struct skcipher_request *sreq;
+ struct crypto_skcipher *stfm;
+ struct otx2_cpt_enc_ctx *ctx;
+ u32 start, ivsize;
+
+ sreq = container_of(areq, struct skcipher_request, base);
+ stfm = crypto_skcipher_reqtfm(sreq);
+ ctx = crypto_skcipher_ctx(stfm);
+ if (ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) {
+ rctx = skcipher_request_ctx(sreq);
+ req_info = &rctx->cpt_req;
+ ivsize = crypto_skcipher_ivsize(stfm);
+ start = sreq->cryptlen - ivsize;
+
+ if (req_info->is_enc) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
+ ivsize, 0);
+ } else {
+ if (sreq->src != sreq->dst) {
+ scatterwalk_map_and_copy(sreq->iv, sreq->src,
+ start, ivsize, 0);
+ } else {
+ memcpy(sreq->iv, req_info->iv_out, ivsize);
+ kfree(req_info->iv_out);
+ }
+ }
+ }
+}
+
+static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
+{
+ struct otx2_cpt_inst_info *inst_info = arg2;
+ struct crypto_async_request *areq = arg1;
+ struct pci_dev *pdev;
+
+ if (areq) {
+ if (!status)
+ output_iv_copyback(areq);
+ if (inst_info) {
+ pdev = inst_info->pdev;
+ otx2_cpt_info_destroy(pdev, inst_info);
+ }
+ areq->complete(areq, status);
+ }
+}
+
+static inline void update_input_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *inp_sg,
+ u32 nbytes, u32 *argcnt)
+{
+ req_info->req.dlen += nbytes;
+
+ while (nbytes) {
+ u32 len = (nbytes < inp_sg->length) ? nbytes : inp_sg->length;
+ u8 *ptr = sg_virt(inp_sg);
+
+ req_info->in[*argcnt].vptr = (void *)ptr;
+ req_info->in[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ inp_sg = sg_next(inp_sg);
+ }
+}
+
+static inline void update_output_data(struct otx2_cpt_req_info *req_info,
+ struct scatterlist *outp_sg,
+ u32 offset, u32 nbytes, u32 *argcnt)
+{
+ u32 len, sg_len;
+ u8 *ptr;
+
+ req_info->rlen += nbytes;
+
+ while (nbytes) {
+ sg_len = outp_sg->length - offset;
+ len = (nbytes < sg_len) ? nbytes : sg_len;
+ ptr = sg_virt(outp_sg);
+
+ req_info->out[*argcnt].vptr = (void *) (ptr + offset);
+ req_info->out[*argcnt].size = len;
+ nbytes -= len;
+ ++(*argcnt);
+ offset = 0;
+ outp_sg = sg_next(outp_sg);
+ }
+}
+
+static inline int create_ctx_hdr(struct skcipher_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int ivsize = crypto_skcipher_ivsize(stfm);
+ u32 start = req->cryptlen - ivsize;
+ gfp_t flags;
+
+ flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ if ((ctx->cipher_type == OTX2_CPT_AES_CBC ||
+ ctx->cipher_type == OTX2_CPT_DES3_CBC) &&
+ req->src == req->dst) {
+ req_info->iv_out = kmalloc(ivsize, flags);
+ if (!req_info->iv_out)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(req_info->iv_out, req->src,
+ start, ivsize, 0);
+ }
+ }
+ /* Encryption data length */
+ req_info->req.param1 = req->cryptlen;
+ /* Authentication data length */
+ req_info->req.param2 = 0;
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+
+ if (ctx->cipher_type == OTX2_CPT_AES_XTS)
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
+ else
+ memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
+
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
+
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline int create_input_list(struct skcipher_request *req, u32 enc,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+ int ret;
+
+ ret = create_ctx_hdr(req, enc, &argcnt);
+ if (ret)
+ return ret;
+
+ update_input_data(req_info, req->src, req->cryptlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_output_list(struct skcipher_request *req,
+ u32 enc_iv_len)
+{
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0;
+
+ /*
+ * OUTPUT Buffer Processing
+ * AES encryption/decryption output would be
+ * received in the following format
+ *
+ * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
+ * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
+ */
+ update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static int skcipher_do_fallback(struct skcipher_request *req, bool is_enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ skcipher_request_set_tfm(&rctx->sk_fbk_req, ctx->fbk_cipher);
+ skcipher_request_set_callback(&rctx->sk_fbk_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->sk_fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_skcipher_encrypt(&rctx->sk_fbk_req) :
+ crypto_skcipher_decrypt(&rctx->sk_fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct otx2_cpt_req_ctx *rctx = skcipher_request_ctx(req);
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ if (!IS_ALIGNED(req->cryptlen, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (req->cryptlen > OTX2_CPT_MAX_REQ_SIZE)
+ return skcipher_do_fallback(req, enc);
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ status = create_input_list(req, enc, enc_iv_len);
+ if (status)
+ return status;
+ create_output_list(req, enc_iv_len);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->callback = otx2_cpt_skcipher_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ status = otx2_cpt_do_request(pdev, req_info, cpu_num);
+
+ return status;
+}
+
+static int otx2_cpt_skcipher_encrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, true);
+}
+
+static int otx2_cpt_skcipher_decrypt(struct skcipher_request *req)
+{
+ return cpt_enc_dec(req, false);
+}
+
+static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const u8 *key2 = key + (keylen / 2);
+ const u8 *key1 = key;
+ int ret;
+
+ ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ if (ret)
+ return ret;
+ ctx->key_len = keylen;
+ ctx->enc_align_len = 1;
+ memcpy(ctx->enc_key, key1, keylen / 2);
+ memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
+ ctx->cipher_type = OTX2_CPT_AES_XTS;
+ switch (ctx->key_len) {
+ case 2 * AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case 2 * AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case 2 * AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+ ctx->enc_align_len = 8;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ u32 keylen, u8 cipher_type)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (cipher_type == OTX2_CPT_AES_CBC || cipher_type == OTX2_CPT_AES_ECB)
+ ctx->enc_align_len = 16;
+ else
+ ctx->enc_align_len = 1;
+
+ ctx->key_len = keylen;
+ ctx->cipher_type = cipher_type;
+
+ memcpy(ctx->enc_key, key, keylen);
+
+ return crypto_skcipher_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static int otx2_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_aes_setkey(tfm, key, keylen, OTX2_CPT_AES_ECB);
+}
+
+static int otx2_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_CBC);
+}
+
+static int otx2_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, u32 keylen)
+{
+ return cpt_des_setkey(tfm, key, keylen, OTX2_CPT_DES3_ECB);
+}
+
+static int cpt_skcipher_fallback_init(struct otx2_cpt_enc_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * Additional memory for skcipher_request is
+ * allocated since the cryptd daemon uses
+ * this memory for request_ctx information
+ */
+ crypto_skcipher_set_reqsize(stfm, sizeof(struct otx2_cpt_req_ctx) +
+ sizeof(struct skcipher_request));
+
+ return cpt_skcipher_fallback_init(ctx, alg);
+}
+
+static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_skcipher(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
+ struct crypto_alg *alg)
+{
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->fbk_cipher =
+ crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fbk_cipher)) {
+ pr_err("%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fbk_cipher);
+ }
+ }
+ return 0;
+}
+
+static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(atfm);
+ struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ ctx->cipher_type = cipher_type;
+ ctx->mac_type = mac_type;
+
+ /*
+ * When selected cipher is NULL we use HMAC opcode instead of
+ * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
+ * for calculating ipad and opad
+ */
+ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
+ switch (ctx->mac_type) {
+ case OTX2_CPT_SHA1:
+ ctx->hashalg = crypto_alloc_shash("sha1", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA256:
+ ctx->hashalg = crypto_alloc_shash("sha256", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA384:
+ ctx->hashalg = crypto_alloc_shash("sha384", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+
+ case OTX2_CPT_SHA512:
+ ctx->hashalg = crypto_alloc_shash("sha512", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->hashalg))
+ return PTR_ERR(ctx->hashalg);
+ break;
+ }
+ }
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ case OTX2_CPT_AES_ECB:
+ ctx->enc_align_len = 16;
+ break;
+ case OTX2_CPT_DES3_CBC:
+ case OTX2_CPT_DES3_ECB:
+ ctx->enc_align_len = 8;
+ break;
+ case OTX2_CPT_AES_GCM:
+ case OTX2_CPT_CIPHER_NULL:
+ ctx->enc_align_len = 1;
+ break;
+ }
+ crypto_aead_set_reqsize(atfm, sizeof(struct otx2_cpt_req_ctx));
+
+ return cpt_aead_fallback_init(ctx, alg);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_CBC, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA1);
+}
+
+static int otx2_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA256);
+}
+
+static int otx2_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA384);
+}
+
+static int otx2_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_CIPHER_NULL, OTX2_CPT_SHA512);
+}
+
+static int otx2_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
+{
+ return cpt_aead_init(tfm, OTX2_CPT_AES_GCM, OTX2_CPT_MAC_NULL);
+}
+
+static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ kfree(ctx->ipad);
+ kfree(ctx->opad);
+ if (ctx->hashalg)
+ crypto_free_shash(ctx->hashalg);
+ kfree(ctx->sdesc);
+
+ if (ctx->fbk_cipher) {
+ crypto_free_aead(ctx->fbk_cipher);
+ ctx->fbk_cipher = NULL;
+ }
+}
+
+static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (crypto_rfc4106_check_authsize(authsize))
+ return -EINVAL;
+
+ tfm->authsize = authsize;
+ /* Set authsize for fallback case */
+ if (ctx->fbk_cipher)
+ ctx->fbk_cipher->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_null_set_authsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ ctx->is_trunc_hmac = true;
+ tfm->authsize = authsize;
+
+ return 0;
+}
+
+static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct otx2_cpt_sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return NULL;
+
+ sdesc->shash.tfm = alg;
+
+ return sdesc;
+}
+
+static inline void swap_data32(void *buf, u32 len)
+{
+ cpu_to_be32_array(buf, buf, len / 4);
+}
+
+static inline void swap_data64(void *buf, u32 len)
+{
+ u64 *src = buf;
+ int i = 0;
+
+ for (i = 0 ; i < len / 8; i++, src++)
+ cpu_to_be64s(src);
+}
+
+static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
+{
+ struct sha512_state *sha512;
+ struct sha256_state *sha256;
+ struct sha1_state *sha1;
+
+ switch (mac_type) {
+ case OTX2_CPT_SHA1:
+ sha1 = (struct sha1_state *) in_pad;
+ swap_data32(sha1->state, SHA1_DIGEST_SIZE);
+ memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA256:
+ sha256 = (struct sha256_state *) in_pad;
+ swap_data32(sha256->state, SHA256_DIGEST_SIZE);
+ memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
+ break;
+
+ case OTX2_CPT_SHA384:
+ case OTX2_CPT_SHA512:
+ sha512 = (struct sha512_state *) in_pad;
+ swap_data64(sha512->state, SHA512_DIGEST_SIZE);
+ memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aead_hmac_init(struct crypto_aead *cipher)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ int state_size = crypto_shash_statesize(ctx->hashalg);
+ int ds = crypto_shash_digestsize(ctx->hashalg);
+ int bs = crypto_shash_blocksize(ctx->hashalg);
+ int authkeylen = ctx->auth_key_len;
+ u8 *ipad = NULL, *opad = NULL;
+ int ret = 0, icount = 0;
+
+ ctx->sdesc = alloc_sdesc(ctx->hashalg);
+ if (!ctx->sdesc)
+ return -ENOMEM;
+
+ ctx->ipad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ctx->opad = kzalloc(bs, GFP_KERNEL);
+ if (!ctx->opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ ipad = kzalloc(state_size, GFP_KERNEL);
+ if (!ipad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ opad = kzalloc(state_size, GFP_KERNEL);
+ if (!opad) {
+ ret = -ENOMEM;
+ goto calc_fail;
+ }
+
+ if (authkeylen > bs) {
+ ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
+ authkeylen, ipad);
+ if (ret)
+ goto calc_fail;
+
+ authkeylen = ds;
+ } else {
+ memcpy(ipad, ctx->key, authkeylen);
+ }
+
+ memset(ipad + authkeylen, 0, bs - authkeylen);
+ memcpy(opad, ipad, bs);
+
+ for (icount = 0; icount < bs; icount++) {
+ ipad[icount] ^= 0x36;
+ opad[icount] ^= 0x5c;
+ }
+
+ /*
+ * Partial Hash calculated from the software
+ * algorithm is retrieved for IPAD & OPAD
+ */
+
+ /* IPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, ipad);
+ ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
+ if (ret)
+ goto calc_fail;
+
+ /* OPAD Calculation */
+ crypto_shash_init(&ctx->sdesc->shash);
+ crypto_shash_update(&ctx->sdesc->shash, opad, bs);
+ crypto_shash_export(&ctx->sdesc->shash, opad);
+ ret = copy_pad(ctx->mac_type, ctx->opad, opad);
+ if (ret)
+ goto calc_fail;
+
+ kfree(ipad);
+ kfree(opad);
+
+ return 0;
+
+calc_fail:
+ kfree(ctx->ipad);
+ ctx->ipad = NULL;
+ kfree(ctx->opad);
+ ctx->opad = NULL;
+ kfree(ipad);
+ kfree(opad);
+ kfree(ctx->sdesc);
+ ctx->sdesc = NULL;
+
+ return ret;
+}
+
+static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ int enckeylen = 0, authkeylen = 0;
+ struct rtattr *rta = (void *)key;
+ int status;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < enckeylen)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ authkeylen = keylen - enckeylen;
+ memcpy(ctx->key, key, keylen);
+
+ switch (enckeylen) {
+ case AES_KEYSIZE_128:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ break;
+ default:
+ /* Invalid key length */
+ return -EINVAL;
+ }
+
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = authkeylen;
+
+ status = aead_hmac_init(cipher);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta = (void *)key;
+ int enckeylen = 0;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ return -EINVAL;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (enckeylen != 0)
+ return -EINVAL;
+
+ if (keylen > OTX2_CPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->enc_key_len = enckeylen;
+ ctx->auth_key_len = keylen;
+
+ return 0;
+}
+
+static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
+
+ /*
+ * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
+ * and salt (4 bytes)
+ */
+ switch (keylen) {
+ case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_128_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_128;
+ break;
+ case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_192_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_192;
+ break;
+ case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
+ ctx->key_type = OTX2_CPT_AES_256_BIT;
+ ctx->enc_key_len = AES_KEYSIZE_256;
+ break;
+ default:
+ /* Invalid key and salt length */
+ return -EINVAL;
+ }
+
+ /* Store encryption key and salt */
+ memcpy(ctx->key, key, keylen);
+
+ return crypto_aead_setkey(ctx->fbk_cipher, key, keylen);
+}
+
+static inline int create_aead_ctx_hdr(struct aead_request *req, u32 enc,
+ u32 *argcnt)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct otx2_cpt_fc_ctx *fctx = &rctx->fctx;
+ int mac_len = crypto_aead_authsize(tfm);
+ int ds;
+
+ rctx->ctrl_word.e.enc_data_offset = req->assoclen;
+
+ switch (ctx->cipher_type) {
+ case OTX2_CPT_AES_CBC:
+ if (req->assoclen > 248 || !IS_ALIGNED(req->assoclen, 8))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_CPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
+ ctx->enc_key_len);
+ /* Copy IV to context */
+ memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
+
+ ds = crypto_shash_digestsize(ctx->hashalg);
+ if (ctx->mac_type == OTX2_CPT_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+ if (ctx->ipad)
+ memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
+ if (ctx->opad)
+ memcpy(fctx->hmac.e.opad, ctx->opad, ds);
+ break;
+
+ case OTX2_CPT_AES_GCM:
+ if (crypto_ipsec_check_assoclen(req->assoclen))
+ return -EINVAL;
+
+ fctx->enc.enc_ctrl.e.iv_source = OTX2_CPT_FROM_DPTR;
+ /* Copy encryption key to context */
+ memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
+ /* Copy salt to context */
+ memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
+ AES_GCM_SALT_SIZE);
+
+ rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
+ break;
+
+ default:
+ /* Unknown cipher type */
+ return -EINVAL;
+ }
+ cpu_to_be64s(&rctx->ctrl_word.flags);
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_FC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ if (enc) {
+ req_info->req.opcode.s.minor = 2;
+ req_info->req.param1 = req->cryptlen;
+ req_info->req.param2 = req->cryptlen + req->assoclen;
+ } else {
+ req_info->req.opcode.s.minor = 3;
+ req_info->req.param1 = req->cryptlen - mac_len;
+ req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
+ }
+
+ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
+ fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
+ fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
+ fctx->enc.enc_ctrl.e.mac_len = mac_len;
+ cpu_to_be64s(&fctx->enc.enc_ctrl.u);
+
+ /*
+ * Storing Packet Data Information in offset
+ * Control Word First 8 bytes
+ */
+ req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
+ req_info->in[*argcnt].size = CONTROL_WORD_LEN;
+ req_info->req.dlen += CONTROL_WORD_LEN;
+ ++(*argcnt);
+
+ req_info->in[*argcnt].vptr = (u8 *)fctx;
+ req_info->in[*argcnt].size = sizeof(struct otx2_cpt_fc_ctx);
+ req_info->req.dlen += sizeof(struct otx2_cpt_fc_ctx);
+ ++(*argcnt);
+
+ return 0;
+}
+
+static inline void create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
+ u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+
+ req_info->ctrl.s.dma_mode = OTX2_CPT_DMA_MODE_SG;
+ req_info->ctrl.s.se_req = 1;
+ req_info->req.opcode.s.major = OTX2_CPT_MAJOR_OP_HMAC |
+ DMA_MODE_FLAG(OTX2_CPT_DMA_MODE_SG);
+ req_info->is_trunc_hmac = ctx->is_trunc_hmac;
+
+ req_info->req.opcode.s.minor = 0;
+ req_info->req.param1 = ctx->auth_key_len;
+ req_info->req.param2 = ctx->mac_type << 8;
+
+ /* Add authentication key */
+ req_info->in[*argcnt].vptr = ctx->key;
+ req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
+ req_info->req.dlen += round_up(ctx->auth_key_len, 8);
+ ++(*argcnt);
+}
+
+static inline int create_aead_input_list(struct aead_request *req, u32 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen = req->cryptlen + req->assoclen;
+ u32 status, argcnt = 0;
+
+ status = create_aead_ctx_hdr(req, enc, &argcnt);
+ if (status)
+ return status;
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+
+ return 0;
+}
+
+static inline void create_aead_output_list(struct aead_request *req, u32 enc,
+ u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 argcnt = 0, outputlen = 0;
+
+ if (enc)
+ outputlen = req->cryptlen + req->assoclen + mac_len;
+ else
+ outputlen = req->cryptlen + req->assoclen - mac_len;
+
+ update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
+ req_info->out_cnt = argcnt;
+}
+
+static inline void create_aead_null_input_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ u32 inputlen, argcnt = 0;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ create_hmac_ctx_hdr(req, &argcnt, enc);
+ update_input_data(req_info, req->src, inputlen, &argcnt);
+ req_info->in_cnt = argcnt;
+}
+
+static inline int create_aead_null_output_list(struct aead_request *req,
+ u32 enc, u32 mac_len)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct scatterlist *dst;
+ u8 *ptr = NULL;
+ int argcnt = 0, status, offset;
+ u32 inputlen;
+
+ if (enc)
+ inputlen = req->cryptlen + req->assoclen;
+ else
+ inputlen = req->cryptlen + req->assoclen - mac_len;
+
+ /*
+ * If source and destination are different
+ * then copy payload to destination
+ */
+ if (req->src != req->dst) {
+
+ ptr = kmalloc(inputlen, (req_info->areq->flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
+ inputlen);
+ if (status != inputlen) {
+ status = -EINVAL;
+ goto error_free;
+ }
+ kfree(ptr);
+ }
+
+ if (enc) {
+ /*
+ * In an encryption scenario hmac needs
+ * to be appended after payload
+ */
+ dst = req->dst;
+ offset = inputlen;
+ while (offset >= dst->length) {
+ offset -= dst->length;
+ dst = sg_next(dst);
+ if (!dst)
+ return -ENOENT;
+ }
+
+ update_output_data(req_info, dst, offset, mac_len, &argcnt);
+ } else {
+ /*
+ * In a decryption scenario calculated hmac for received
+ * payload needs to be compare with hmac received
+ */
+ status = sg_copy_buffer(req->src, sg_nents(req->src),
+ rctx->fctx.hmac.s.hmac_recv, mac_len,
+ inputlen, true);
+ if (status != mac_len)
+ return -EINVAL;
+
+ req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
+ req_info->out[argcnt].size = mac_len;
+ argcnt++;
+ }
+
+ req_info->out_cnt = argcnt;
+ return 0;
+
+error_free:
+ kfree(ptr);
+ return status;
+}
+
+static int aead_do_fallback(struct aead_request *req, bool is_enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (ctx->fbk_cipher) {
+ /* Store the cipher tfm and then use the fallback tfm */
+ aead_request_set_tfm(&rctx->fbk_req, ctx->fbk_cipher);
+ aead_request_set_callback(&rctx->fbk_req, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&rctx->fbk_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
+ crypto_aead_decrypt(&rctx->fbk_req);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
+{
+ struct otx2_cpt_req_ctx *rctx = aead_request_ctx(req);
+ struct otx2_cpt_req_info *req_info = &rctx->cpt_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct pci_dev *pdev;
+ int status, cpu_num;
+
+ /* Clear control words */
+ rctx->ctrl_word.flags = 0;
+ rctx->fctx.enc.enc_ctrl.u = 0;
+
+ req_info->callback = otx2_cpt_aead_callback;
+ req_info->areq = &req->base;
+ req_info->req_type = reg_type;
+ req_info->is_enc = enc;
+ req_info->is_trunc_hmac = false;
+
+ switch (reg_type) {
+ case OTX2_CPT_AEAD_ENC_DEC_REQ:
+ status = create_aead_input_list(req, enc);
+ if (status)
+ return status;
+ create_aead_output_list(req, enc, crypto_aead_authsize(tfm));
+ break;
+
+ case OTX2_CPT_AEAD_ENC_DEC_NULL_REQ:
+ create_aead_null_input_list(req, enc,
+ crypto_aead_authsize(tfm));
+ status = create_aead_null_output_list(req, enc,
+ crypto_aead_authsize(tfm));
+ if (status)
+ return status;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(req_info->req.param1, ctx->enc_align_len))
+ return -EINVAL;
+
+ if (!req_info->req.param2 ||
+ (req_info->req.param1 > OTX2_CPT_MAX_REQ_SIZE) ||
+ (req_info->req.param2 > OTX2_CPT_MAX_REQ_SIZE))
+ return aead_do_fallback(req, enc);
+
+ status = get_se_device(&pdev, &cpu_num);
+ if (status)
+ return status;
+
+ req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+
+ /*
+ * We perform an asynchronous send and once
+ * the request is completed the driver would
+ * intimate through registered call back functions
+ */
+ return otx2_cpt_do_request(pdev, req_info, cpu_num);
+}
+
+static int otx2_cpt_aead_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, true);
+}
+
+static int otx2_cpt_aead_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_REQ, false);
+}
+
+static int otx2_cpt_aead_null_encrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, true);
+}
+
+static int otx2_cpt_aead_null_decrypt(struct aead_request *req)
+{
+ return cpt_aead_enc_dec(req, OTX2_CPT_AEAD_ENC_DEC_NULL_REQ, false);
+}
+
+static struct skcipher_alg otx2_cpt_skciphers[] = { {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "cpt_xts_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_xts_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cpt_cbc_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "cpt_ecb_aes",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .ivsize = 0,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = otx2_cpt_skcipher_ecb_aes_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cpt_cbc_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = otx2_cpt_skcipher_cbc_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+}, {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "cpt_ecb_des3_ede",
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
+ .base.cra_alignmask = 7,
+ .base.cra_priority = 4001,
+ .base.cra_module = THIS_MODULE,
+
+ .init = otx2_cpt_enc_dec_init,
+ .exit = otx2_cpt_skcipher_exit,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ .setkey = otx2_cpt_skcipher_ecb_des3_setkey,
+ .encrypt = otx2_cpt_skcipher_encrypt,
+ .decrypt = otx2_cpt_skcipher_decrypt,
+} };
+
+static struct aead_alg otx2_cpt_aeads[] = { {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_cbc_aes_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_cbc_aes_sha_setkey,
+ .setauthsize = otx2_cpt_aead_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha1_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha1_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha256_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha256_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha384_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha384_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .cra_driver_name = "cpt_hmac_sha512_ecb_null",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_ecb_null_sha512_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_ecb_null_sha_setkey,
+ .setauthsize = otx2_cpt_aead_null_set_authsize,
+ .encrypt = otx2_cpt_aead_null_encrypt,
+ .decrypt = otx2_cpt_aead_null_decrypt,
+ .ivsize = 0,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "cpt_rfc4106_gcm_aes",
+ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct otx2_cpt_aead_ctx),
+ .cra_priority = 4001,
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = otx2_cpt_aead_gcm_aes_init,
+ .exit = otx2_cpt_aead_exit,
+ .setkey = otx2_cpt_aead_gcm_aes_setkey,
+ .setauthsize = otx2_cpt_aead_gcm_set_authsize,
+ .encrypt = otx2_cpt_aead_encrypt,
+ .decrypt = otx2_cpt_aead_decrypt,
+ .ivsize = AES_GCM_IV_SIZE,
+ .maxauthsize = AES_GCM_ICV_SIZE,
+} };
+
+static inline int cpt_register_algs(void)
+{
+ int i, err = 0;
+
+ if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_skciphers); i++)
+ otx2_cpt_skciphers[i].base.cra_flags &=
+ ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(otx2_cpt_aeads); i++)
+ otx2_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
+
+ err = crypto_register_aeads(otx2_cpt_aeads,
+ ARRAY_SIZE(otx2_cpt_aeads));
+ if (err) {
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ return err;
+ }
+
+ return 0;
+}
+
+static inline void cpt_unregister_algs(void)
+{
+ crypto_unregister_skciphers(otx2_cpt_skciphers,
+ ARRAY_SIZE(otx2_cpt_skciphers));
+ crypto_unregister_aeads(otx2_cpt_aeads, ARRAY_SIZE(otx2_cpt_aeads));
+}
+
+static int compare_func(const void *lptr, const void *rptr)
+{
+ const struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
+ const struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
+
+ if (ldesc->dev->devfn < rdesc->dev->devfn)
+ return -1;
+ if (ldesc->dev->devfn > rdesc->dev->devfn)
+ return 1;
+ return 0;
+}
+
+static void swap_func(void *lptr, void *rptr, int size)
+{
+ struct cpt_device_desc *ldesc = lptr;
+ struct cpt_device_desc *rdesc = rptr;
+ struct cpt_device_desc desc;
+
+ desc = *ldesc;
+ *ldesc = *rdesc;
+ *rdesc = desc;
+}
+
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices)
+{
+ int ret = 0;
+ int count;
+
+ mutex_lock(&mutex);
+ count = atomic_read(&se_devices.count);
+ if (count >= OTX2_CPT_MAX_LFS_NUM) {
+ dev_err(&pdev->dev, "No space to add a new device\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+ se_devices.desc[count].num_queues = num_queues;
+ se_devices.desc[count++].dev = pdev;
+ atomic_inc(&se_devices.count);
+
+ if (atomic_read(&se_devices.count) == num_devices &&
+ is_crypto_registered == false) {
+ if (cpt_register_algs()) {
+ dev_err(&pdev->dev,
+ "Error in registering crypto algorithms\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ try_module_get(mod);
+ is_crypto_registered = true;
+ }
+ sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
+ compare_func, swap_func);
+
+unlock:
+ mutex_unlock(&mutex);
+ return ret;
+}
+
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod)
+{
+ struct cpt_device_table *dev_tbl;
+ bool dev_found = false;
+ int i, j, count;
+
+ mutex_lock(&mutex);
+
+ dev_tbl = &se_devices;
+ count = atomic_read(&dev_tbl->count);
+ for (i = 0; i < count; i++) {
+ if (pdev == dev_tbl->desc[i].dev) {
+ for (j = i; j < count-1; j++)
+ dev_tbl->desc[j] = dev_tbl->desc[j+1];
+ dev_found = true;
+ break;
+ }
+ }
+
+ if (!dev_found) {
+ dev_err(&pdev->dev, "%s device not found\n", __func__);
+ goto unlock;
+ }
+ if (atomic_dec_and_test(&se_devices.count)) {
+ cpt_unregister_algs();
+ module_put(mod);
+ is_crypto_registered = false;
+ }
+
+unlock:
+ mutex_unlock(&mutex);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
new file mode 100644
index 000000000000..f04184bd1744
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef __OTX2_CPT_ALGS_H
+#define __OTX2_CPT_ALGS_H
+
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <crypto/aead.h>
+#include "otx2_cpt_common.h"
+
+#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
+#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
+#define OTX2_CPT_MAX_KEY_SIZE (OTX2_CPT_MAX_ENC_KEY_SIZE + \
+ OTX2_CPT_MAX_HASH_KEY_SIZE)
+enum otx2_cpt_request_type {
+ OTX2_CPT_ENC_DEC_REQ = 0x1,
+ OTX2_CPT_AEAD_ENC_DEC_REQ = 0x2,
+ OTX2_CPT_AEAD_ENC_DEC_NULL_REQ = 0x3,
+ OTX2_CPT_PASSTHROUGH_REQ = 0x4
+};
+
+enum otx2_cpt_major_opcodes {
+ OTX2_CPT_MAJOR_OP_MISC = 0x01,
+ OTX2_CPT_MAJOR_OP_FC = 0x33,
+ OTX2_CPT_MAJOR_OP_HMAC = 0x35,
+};
+
+enum otx2_cpt_cipher_type {
+ OTX2_CPT_CIPHER_NULL = 0x0,
+ OTX2_CPT_DES3_CBC = 0x1,
+ OTX2_CPT_DES3_ECB = 0x2,
+ OTX2_CPT_AES_CBC = 0x3,
+ OTX2_CPT_AES_ECB = 0x4,
+ OTX2_CPT_AES_CFB = 0x5,
+ OTX2_CPT_AES_CTR = 0x6,
+ OTX2_CPT_AES_GCM = 0x7,
+ OTX2_CPT_AES_XTS = 0x8
+};
+
+enum otx2_cpt_mac_type {
+ OTX2_CPT_MAC_NULL = 0x0,
+ OTX2_CPT_MD5 = 0x1,
+ OTX2_CPT_SHA1 = 0x2,
+ OTX2_CPT_SHA224 = 0x3,
+ OTX2_CPT_SHA256 = 0x4,
+ OTX2_CPT_SHA384 = 0x5,
+ OTX2_CPT_SHA512 = 0x6,
+ OTX2_CPT_GMAC = 0x7
+};
+
+enum otx2_cpt_aes_key_len {
+ OTX2_CPT_AES_128_BIT = 0x1,
+ OTX2_CPT_AES_192_BIT = 0x2,
+ OTX2_CPT_AES_256_BIT = 0x3
+};
+
+union otx2_cpt_encr_ctrl {
+ u64 u;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 enc_cipher:4;
+ u64 reserved_59:1;
+ u64 aes_key:2;
+ u64 iv_source:1;
+ u64 mac_type:4;
+ u64 reserved_49_51:3;
+ u64 auth_input_type:1;
+ u64 mac_len:8;
+ u64 reserved_32_39:8;
+ u64 encr_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 encr_offset:16;
+ u64 reserved_32_39:8;
+ u64 mac_len:8;
+ u64 auth_input_type:1;
+ u64 reserved_49_51:3;
+ u64 mac_type:4;
+ u64 iv_source:1;
+ u64 aes_key:2;
+ u64 reserved_59:1;
+ u64 enc_cipher:4;
+#endif
+ } e;
+};
+
+struct otx2_cpt_cipher {
+ const char *name;
+ u8 value;
+};
+
+struct otx2_cpt_fc_enc_ctx {
+ union otx2_cpt_encr_ctrl enc_ctrl;
+ u8 encr_key[32];
+ u8 encr_iv[16];
+};
+
+union otx2_cpt_fc_hmac_ctx {
+ struct {
+ u8 ipad[64];
+ u8 opad[64];
+ } e;
+ struct {
+ u8 hmac_calc[64]; /* HMAC calculated */
+ u8 hmac_recv[64]; /* HMAC received */
+ } s;
+};
+
+struct otx2_cpt_fc_ctx {
+ struct otx2_cpt_fc_enc_ctx enc;
+ union otx2_cpt_fc_hmac_ctx hmac;
+};
+
+struct otx2_cpt_enc_ctx {
+ u32 key_len;
+ u8 enc_key[OTX2_CPT_MAX_KEY_SIZE];
+ u8 cipher_type;
+ u8 key_type;
+ u8 enc_align_len;
+ struct crypto_skcipher *fbk_cipher;
+};
+
+union otx2_cpt_offset_ctrl {
+ u64 flags;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved:32;
+ u64 enc_data_offset:16;
+ u64 iv_offset:8;
+ u64 auth_offset:8;
+#else
+ u64 auth_offset:8;
+ u64 iv_offset:8;
+ u64 enc_data_offset:16;
+ u64 reserved:32;
+#endif
+ } e;
+};
+
+struct otx2_cpt_req_ctx {
+ struct otx2_cpt_req_info cpt_req;
+ union otx2_cpt_offset_ctrl ctrl_word;
+ struct otx2_cpt_fc_ctx fctx;
+ union {
+ struct skcipher_request sk_fbk_req;
+ struct aead_request fbk_req;
+ };
+};
+
+struct otx2_cpt_sdesc {
+ struct shash_desc shash;
+};
+
+struct otx2_cpt_aead_ctx {
+ u8 key[OTX2_CPT_MAX_KEY_SIZE];
+ struct crypto_shash *hashalg;
+ struct otx2_cpt_sdesc *sdesc;
+ struct crypto_aead *fbk_cipher;
+ u8 *ipad;
+ u8 *opad;
+ u32 enc_key_len;
+ u32 auth_key_len;
+ u8 cipher_type;
+ u8 mac_type;
+ u8 key_type;
+ u8 is_trunc_hmac;
+ u8 enc_align_len;
+};
+int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
+ int num_queues, int num_devices);
+void otx2_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod);
+
+#endif /* __OTX2_CPT_ALGS_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
new file mode 100644
index 000000000000..47f378731024
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include "otx2_cptlf.h"
+#include "otx2_cptvf_algs.h"
+#include <rvu_reg.h>
+
+#define OTX2_CPTVF_DRV_NAME "octeontx2-cptvf"
+
+static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+
+ /* Enable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
+}
+
+static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
+{
+ /* Disable PF-VF interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
+
+ /* Clear interrupt if any */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
+ 0x1ULL);
+}
+
+static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
+{
+ int ret, irq;
+ int num_vec;
+
+ num_vec = pci_msix_vec_count(cptvf->pdev);
+ if (num_vec <= 0)
+ return -EINVAL;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&cptvf->pdev->dev,
+ "Request for %d msix vectors failed\n", num_vec);
+ return ret;
+ }
+ irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
+ /* Register VF<=>PF mailbox interrupt handler */
+ ret = devm_request_irq(&cptvf->pdev->dev, irq,
+ otx2_cptvf_pfvf_mbox_intr, 0,
+ "CPTPFVF Mbox", cptvf);
+ if (ret)
+ return ret;
+ /* Enable PF-VF mailbox interrupts */
+ cptvf_enable_pfvf_mbox_intrs(cptvf);
+
+ ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
+ if (ret) {
+ dev_warn(&cptvf->pdev->dev,
+ "PF not responding to mailbox, deferring probe\n");
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
+{
+ int ret;
+
+ cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!cptvf->pfvf_mbox_wq)
+ return -ENOMEM;
+
+ ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
+ cptvf->pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
+ if (ret)
+ goto free_wqe;
+
+ INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
+ return 0;
+
+free_wqe:
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ return ret;
+}
+
+static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
+{
+ destroy_workqueue(cptvf->pfvf_mbox_wq);
+ otx2_mbox_destroy(&cptvf->pfvf_mbox);
+}
+
+static void cptlf_work_handler(unsigned long data)
+{
+ otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
+}
+
+static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].wqe)
+ continue;
+
+ tasklet_kill(&lfs->lf[i].wqe->work);
+ kfree(lfs->lf[i].wqe);
+ lfs->lf[i].wqe = NULL;
+ }
+}
+
+static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_cptlf_wqe *wqe;
+ int i, ret = 0;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
+ if (!wqe) {
+ ret = -ENOMEM;
+ goto cleanup_tasklet;
+ }
+
+ tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
+ wqe->lfs = lfs;
+ wqe->lf_num = i;
+ lfs->lf[i].wqe = wqe;
+ }
+ return 0;
+
+cleanup_tasklet:
+ cleanup_tasklet_work(lfs);
+ return ret;
+}
+
+static void free_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int i;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ kfree(lfs->lf[i].pqueue.head);
+ lfs->lf[i].pqueue.head = NULL;
+ }
+}
+
+static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
+{
+ int size, ret, i;
+
+ if (!lfs->lfs_num)
+ return -EINVAL;
+
+ for (i = 0; i < lfs->lfs_num; i++) {
+ lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
+ size = lfs->lf[i].pqueue.qlen *
+ sizeof(struct otx2_cpt_pending_entry);
+
+ lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
+ if (!lfs->lf[i].pqueue.head) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize spin lock */
+ spin_lock_init(&lfs->lf[i].pqueue.lock);
+ }
+ return 0;
+
+error:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ cleanup_tasklet_work(lfs);
+ free_pending_queues(lfs);
+}
+
+static int lf_sw_init(struct otx2_cptlfs_info *lfs)
+{
+ int ret;
+
+ ret = alloc_pending_queues(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Allocating pending queues failed\n");
+ return ret;
+ }
+ ret = init_tasklet_work(lfs);
+ if (ret) {
+ dev_err(&lfs->pdev->dev,
+ "Tasklet work init failed\n");
+ goto pending_queues_free;
+ }
+ return 0;
+
+pending_queues_free:
+ free_pending_queues(lfs);
+ return ret;
+}
+
+static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
+{
+ atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
+
+ /* Remove interrupts affinity */
+ otx2_cptlf_free_irqs_affinity(lfs);
+ /* Disable instruction queue */
+ otx2_cptlf_disable_iqueues(lfs);
+ /* Unregister crypto algorithms */
+ otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
+ /* Unregister LFs interrupts */
+ otx2_cptlf_unregister_interrupts(lfs);
+ /* Cleanup LFs software side */
+ lf_sw_cleanup(lfs);
+ /* Send request to detach LFs */
+ otx2_cpt_detach_rsrcs_msg(lfs);
+}
+
+static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct device *dev = &cptvf->pdev->dev;
+ int ret, lfs_num;
+ u8 eng_grp_msk;
+
+ /* Get engine group number for symmetric crypto */
+ cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
+ if (ret)
+ return ret;
+
+ if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev, "Engine group for kernel crypto not available\n");
+ ret = -ENOENT;
+ return ret;
+ }
+ eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
+ if (ret)
+ return ret;
+
+ lfs->reg_base = cptvf->reg_base;
+ lfs->pdev = cptvf->pdev;
+ lfs->mbox = &cptvf->pfvf_mbox;
+
+ lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
+ num_online_cpus();
+ ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
+ lfs_num);
+ if (ret)
+ return ret;
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Initialize LFs software side */
+ ret = lf_sw_init(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register LFs interrupts */
+ ret = otx2_cptlf_register_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ /* Set interrupts affinity */
+ ret = otx2_cptlf_set_irqs_affinity(lfs);
+ if (ret)
+ goto unregister_intr;
+
+ atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
+ /* Register crypto algorithms */
+ ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
+ if (ret) {
+ dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
+ goto disable_irqs;
+ }
+ return 0;
+
+disable_irqs:
+ otx2_cptlf_free_irqs_affinity(lfs);
+unregister_intr:
+ otx2_cptlf_unregister_interrupts(lfs);
+cleanup_lf_sw:
+ lf_sw_cleanup(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+
+ return ret;
+}
+
+static int otx2_cptvf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t offset, size;
+ struct otx2_cptvf_dev *cptvf;
+ int ret;
+
+ cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
+ if (!cptvf)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto clear_drvdata;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto clear_drvdata;
+ }
+ /* Map VF's configuration registers */
+ ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
+ OTX2_CPTVF_DRV_NAME);
+ if (ret) {
+ dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
+ goto clear_drvdata;
+ }
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, cptvf);
+ cptvf->pdev = pdev;
+
+ cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+
+ offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
+ size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
+ /* Map PF-VF mailbox memory */
+ cptvf->pfvf_mbox_base = devm_ioremap_wc(dev, offset, size);
+ if (!cptvf->pfvf_mbox_base) {
+ dev_err(&pdev->dev, "Unable to map BAR4\n");
+ ret = -ENODEV;
+ goto clear_drvdata;
+ }
+ /* Initialize PF<=>VF mailbox */
+ ret = cptvf_pfvf_mbox_init(cptvf);
+ if (ret)
+ goto clear_drvdata;
+
+ /* Register interrupts */
+ ret = cptvf_register_interrupts(cptvf);
+ if (ret)
+ goto destroy_pfvf_mbox;
+
+ /* Initialize CPT LFs */
+ ret = cptvf_lf_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
+ return 0;
+
+unregister_interrupts:
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+destroy_pfvf_mbox:
+ cptvf_pfvf_mbox_destroy(cptvf);
+clear_drvdata:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void otx2_cptvf_remove(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ if (!cptvf) {
+ dev_err(&pdev->dev, "Invalid CPT VF device.\n");
+ return;
+ }
+ cptvf_lf_shutdown(&cptvf->lfs);
+ /* Disable PF-VF mailbox interrupt */
+ cptvf_disable_pfvf_mbox_intrs(cptvf);
+ /* Destroy PF-VF mbox */
+ cptvf_pfvf_mbox_destroy(cptvf);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/* Supported devices */
+static const struct pci_device_id otx2_cptvf_id_table[] = {
+ {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
+ { 0, } /* end of table */
+};
+
+static struct pci_driver otx2_cptvf_pci_driver = {
+ .name = OTX2_CPTVF_DRV_NAME,
+ .id_table = otx2_cptvf_id_table,
+ .probe = otx2_cptvf_probe,
+ .remove = otx2_cptvf_remove,
+};
+
+module_pci_driver(otx2_cptvf_pci_driver);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell OcteonTX2 CPT Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
new file mode 100644
index 000000000000..5d73b711cba6
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cpt_common.h"
+#include "otx2_cptvf.h"
+#include <rvu_reg.h>
+
+irqreturn_t otx2_cptvf_pfvf_mbox_intr(int __always_unused irq, void *arg)
+{
+ struct otx2_cptvf_dev *cptvf = arg;
+ u64 intr;
+
+ /* Read the interrupt bits */
+ intr = otx2_cpt_read64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT);
+
+ if (intr & 0x1ULL) {
+ /* Schedule work queue function to process the MBOX request */
+ queue_work(cptvf->pfvf_mbox_wq, &cptvf->pfvf_mbox_work);
+ /* Clear and ack the interrupt */
+ otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
+ OTX2_RVU_VF_INT, 0x1ULL);
+ }
+ return IRQ_HANDLED;
+}
+
+static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
+ struct mbox_msghdr *msg)
+{
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ struct otx2_cpt_kvf_limits_rsp *rsp_limits;
+ struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct cpt_rd_wr_reg_msg *rsp_reg;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with unknown ID %d\n", msg->id);
+ return;
+ }
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(&cptvf->pdev->dev,
+ "MBOX msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ cptvf->vf_id = ((msg->pcifunc >> RVU_PFVF_FUNC_SHIFT)
+ & RVU_PFVF_FUNC_MASK) - 1;
+ break;
+ case MBOX_MSG_ATTACH_RESOURCES:
+ /* Check if resources were successfully attached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 1;
+ break;
+ case MBOX_MSG_DETACH_RESOURCES:
+ /* Check if resources were successfully detached */
+ if (!msg->rc)
+ lfs->are_lfs_attached = 0;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+ break;
+ case MBOX_MSG_CPT_RD_WR_REGISTER:
+ rsp_reg = (struct cpt_rd_wr_reg_msg *) msg;
+ if (msg->rc) {
+ dev_err(&cptvf->pdev->dev,
+ "Reg %llx rd/wr(%d) failed %d\n",
+ rsp_reg->reg_offset, rsp_reg->is_write,
+ msg->rc);
+ return;
+ }
+ if (!rsp_reg->is_write)
+ *rsp_reg->ret_val = rsp_reg->val;
+ break;
+ case MBOX_MSG_GET_ENG_GRP_NUM:
+ rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
+ cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ break;
+ case MBOX_MSG_GET_KVF_LIMITS:
+ rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
+ cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
+ break;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
+ msg->id);
+ break;
+ }
+}
+
+void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_cptvf_dev *cptvf;
+ struct otx2_mbox *pfvf_mbox;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ int offset, i;
+
+ /* sync with mbox memory region */
+ smp_rmb();
+
+ cptvf = container_of(work, struct otx2_cptvf_dev, pfvf_mbox_work);
+ pfvf_mbox = &cptvf->pfvf_mbox;
+ mdev = &pfvf_mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + pfvf_mbox->rx_start);
+ if (rsp_hdr->num_msgs == 0)
+ return;
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+ for (i = 0; i < rsp_hdr->num_msgs; i++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + pfvf_mbox->rx_start +
+ offset);
+ process_pfvf_mbox_mbox_msg(cptvf, msg);
+ offset = msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+ otx2_mbox_reset(pfvf_mbox, 0);
+}
+
+int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct otx2_cpt_egrp_num_msg *req;
+
+ req = (struct otx2_cpt_egrp_num_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_egrp_num_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->eng_type = eng_type;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+
+int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+ int ret;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_kvf_limits_rsp));
+ if (req == NULL) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_KVF_LIMITS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+
+ return ret;
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
new file mode 100644
index 000000000000..d5c1c1b7c7e4
--- /dev/null
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include "otx2_cptvf.h"
+#include "otx2_cpt_common.h"
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+
+/* Default timeout when waiting for free pending entry in us */
+#define CPT_PENTRY_TIMEOUT 1000
+#define CPT_PENTRY_STEP 50
+
+/* Default threshold for stopping and resuming sender requests */
+#define CPT_IQ_STOP_MARGIN 128
+#define CPT_IQ_RESUME_MARGIN 512
+
+/* Default command timeout in seconds */
+#define CPT_COMMAND_TIMEOUT 4
+#define CPT_TIME_IN_RESET_COUNT 5
+
+static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req)
+{
+ int i;
+
+ pr_debug("Gather list size %d\n", req->in_cnt);
+ for (i = 0; i < req->in_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->in[i].size, req->in[i].vptr,
+ (void *) req->in[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n",
+ req->in[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->in[i].vptr, req->in[i].size, false);
+ }
+ pr_debug("Scatter list size %d\n", req->out_cnt);
+ for (i = 0; i < req->out_cnt; i++) {
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ req->out[i].size, req->out[i].vptr,
+ (void *) req->out[i].dma_addr);
+ pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
+ print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
+ req->out[i].vptr, req->out[i].size, false);
+ }
+}
+
+static inline struct otx2_cpt_pending_entry *get_free_pending_entry(
+ struct otx2_cpt_pending_queue *q,
+ int qlen)
+{
+ struct otx2_cpt_pending_entry *ent = NULL;
+
+ ent = &q->head[q->rear];
+ if (unlikely(ent->busy))
+ return NULL;
+
+ q->rear++;
+ if (unlikely(q->rear == qlen))
+ q->rear = 0;
+
+ return ent;
+}
+
+static inline u32 modulo_inc(u32 index, u32 length, u32 inc)
+{
+ if (WARN_ON(inc > length))
+ inc = length;
+
+ index += inc;
+ if (unlikely(index >= length))
+ index -= length;
+
+ return index;
+}
+
+static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
+{
+ pentry->completion_addr = NULL;
+ pentry->info = NULL;
+ pentry->callback = NULL;
+ pentry->areq = NULL;
+ pentry->resume_sender = false;
+ pentry->busy = false;
+}
+
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr = NULL;
+ int ret = 0, i, j;
+ int components;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ ret = -EIO;
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / 4;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % 4;
+
+ switch (components) {
+ case 3:
+ sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
+ fallthrough;
+ case 2:
+ sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
+ fallthrough;
+ case 1:
+ sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return ret;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return ret;
+}
+
+static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
+ struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ struct otx2_cpt_pending_queue *pqueue,
+ struct otx2_cptlf_info *lf)
+{
+ struct otx2_cptvf_request *cpt_req = &req->req;
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_ctrl_info *ctrl = &req->ctrl;
+ struct otx2_cpt_inst_info *info = NULL;
+ union otx2_cpt_res_s *result = NULL;
+ struct otx2_cpt_iq_command iq_cmd;
+ union otx2_cpt_inst_s cptinst;
+ int retry, ret = 0;
+ u8 resume_sender;
+ gfp_t gfp;
+
+ gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+ if (unlikely(!otx2_cptlf_started(lf->lfs)))
+ return -ENODEV;
+
+ info = info_create(pdev, req, gfp);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Setting up cpt inst info failed");
+ return -ENOMEM;
+ }
+ cpt_req->dlen = info->dlen;
+
+ result = info->completion_addr;
+ result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
+
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ retry = CPT_PENTRY_TIMEOUT / CPT_PENTRY_STEP;
+ while (unlikely(!pentry) && retry--) {
+ spin_unlock_bh(&pqueue->lock);
+ udelay(CPT_PENTRY_STEP);
+ spin_lock_bh(&pqueue->lock);
+ pentry = get_free_pending_entry(pqueue, pqueue->qlen);
+ }
+
+ if (unlikely(!pentry)) {
+ ret = -ENOSPC;
+ goto destroy_info;
+ }
+
+ /*
+ * Check if we are close to filling in entire pending queue,
+ * if so then tell the sender to stop/sleep by returning -EBUSY
+ * We do it only for context which can sleep (GFP_KERNEL)
+ */
+ if (gfp == GFP_KERNEL &&
+ pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
+ pentry->resume_sender = true;
+ } else
+ pentry->resume_sender = false;
+ resume_sender = pentry->resume_sender;
+ pqueue->pending_count++;
+
+ pentry->completion_addr = info->completion_addr;
+ pentry->info = info;
+ pentry->callback = req->callback;
+ pentry->areq = req->areq;
+ pentry->busy = true;
+ info->pentry = pentry;
+ info->time_in = jiffies;
+ info->req = req;
+
+ /* Fill in the command */
+ iq_cmd.cmd.u = 0;
+ iq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags);
+ iq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1);
+ iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
+ iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
+
+ /* 64-bit swap for microcode data reads, not needed for addresses*/
+ cpu_to_be64s(&iq_cmd.cmd.u);
+ iq_cmd.dptr = info->dptr_baddr;
+ iq_cmd.rptr = 0;
+ iq_cmd.cptr.u = 0;
+ iq_cmd.cptr.s.grp = ctrl->s.grp;
+
+ /* Fill in the CPT_INST_S type command for HW interpretation */
+ otx2_cpt_fill_inst(&cptinst, &iq_cmd, info->comp_baddr);
+
+ /* Print debug info if enabled */
+ otx2_cpt_dump_sg_list(pdev, req);
+ pr_debug("Cpt_inst_s hexdump (%d bytes)\n", OTX2_CPT_INST_SIZE);
+ print_hex_dump_debug("", 0, 16, 1, &cptinst, OTX2_CPT_INST_SIZE, false);
+ pr_debug("Dptr hexdump (%d bytes)\n", cpt_req->dlen);
+ print_hex_dump_debug("", 0, 16, 1, info->in_buffer,
+ cpt_req->dlen, false);
+
+ /* Send CPT command */
+ otx2_cpt_send_cmd(&cptinst, 1, lf);
+
+ /*
+ * We allocate and prepare pending queue entry in critical section
+ * together with submitting CPT instruction to CPT instruction queue
+ * to make sure that order of CPT requests is the same in both
+ * pending and instruction queues
+ */
+ spin_unlock_bh(&pqueue->lock);
+
+ ret = resume_sender ? -EBUSY : -EINPROGRESS;
+ return ret;
+
+destroy_info:
+ spin_unlock_bh(&pqueue->lock);
+ otx2_cpt_info_destroy(pdev, info);
+ return ret;
+}
+
+int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ int cpu_num)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+
+ return process_request(lfs->pdev, req, &lfs->lf[cpu_num].pqueue,
+ &lfs->lf[cpu_num]);
+}
+
+static int cpt_process_ccode(struct pci_dev *pdev,
+ union otx2_cpt_res_s *cpt_status,
+ struct otx2_cpt_inst_info *info,
+ u32 *res_code)
+{
+ u8 uc_ccode = cpt_status->s.uc_compcode;
+ u8 ccode = cpt_status->s.compcode;
+
+ switch (ccode) {
+ case OTX2_CPT_COMP_E_FAULT:
+ dev_err(&pdev->dev,
+ "Request failed with DMA fault\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_HWERR:
+ dev_err(&pdev->dev,
+ "Request failed with hardware error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_INSTERR:
+ dev_err(&pdev->dev,
+ "Request failed with instruction error\n");
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+
+ case OTX2_CPT_COMP_E_NOTDONE:
+ /* check for timeout */
+ if (time_after_eq(jiffies, info->time_in +
+ CPT_COMMAND_TIMEOUT * HZ))
+ dev_warn(&pdev->dev,
+ "Request timed out 0x%p", info->req);
+ else if (info->extra_time < CPT_TIME_IN_RESET_COUNT) {
+ info->time_in = jiffies;
+ info->extra_time++;
+ }
+ return 1;
+
+ case OTX2_CPT_COMP_E_GOOD:
+ /*
+ * Check microcode completion code, it is only valid
+ * when completion code is CPT_COMP_E::GOOD
+ */
+ if (uc_ccode != OTX2_CPT_UCC_SUCCESS) {
+ /*
+ * If requested hmac is truncated and ucode returns
+ * s/g write length error then we report success
+ * because ucode writes as many bytes of calculated
+ * hmac as available in gather buffer and reports
+ * s/g write length error if number of bytes in gather
+ * buffer is less than full hmac size.
+ */
+ if (info->req->is_trunc_hmac &&
+ uc_ccode == OTX2_CPT_UCC_SG_WRITE_LENGTH) {
+ *res_code = 0;
+ break;
+ }
+
+ dev_err(&pdev->dev,
+ "Request failed with software error code 0x%x\n",
+ cpt_status->s.uc_compcode);
+ otx2_cpt_dump_sg_list(pdev, info->req);
+ break;
+ }
+ /* Request has been processed with success */
+ *res_code = 0;
+ break;
+
+ default:
+ dev_err(&pdev->dev,
+ "Request returned invalid status %d\n", ccode);
+ break;
+ }
+ return 0;
+}
+
+static inline void process_pending_queue(struct pci_dev *pdev,
+ struct otx2_cpt_pending_queue *pqueue)
+{
+ struct otx2_cpt_pending_entry *resume_pentry = NULL;
+ void (*callback)(int status, void *arg, void *req);
+ struct otx2_cpt_pending_entry *pentry = NULL;
+ union otx2_cpt_res_s *cpt_status = NULL;
+ struct otx2_cpt_inst_info *info = NULL;
+ struct otx2_cpt_req_info *req = NULL;
+ struct crypto_async_request *areq;
+ u32 res_code, resume_index;
+
+ while (1) {
+ spin_lock_bh(&pqueue->lock);
+ pentry = &pqueue->head[pqueue->front];
+
+ if (WARN_ON(!pentry)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ res_code = -EINVAL;
+ if (unlikely(!pentry->busy)) {
+ spin_unlock_bh(&pqueue->lock);
+ break;
+ }
+
+ if (unlikely(!pentry->callback)) {
+ dev_err(&pdev->dev, "Callback NULL\n");
+ goto process_pentry;
+ }
+
+ info = pentry->info;
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Pending entry post arg NULL\n");
+ goto process_pentry;
+ }
+
+ req = info->req;
+ if (unlikely(!req)) {
+ dev_err(&pdev->dev, "Request NULL\n");
+ goto process_pentry;
+ }
+
+ cpt_status = pentry->completion_addr;
+ if (unlikely(!cpt_status)) {
+ dev_err(&pdev->dev, "Completion address NULL\n");
+ goto process_pentry;
+ }
+
+ if (cpt_process_ccode(pdev, cpt_status, info, &res_code)) {
+ spin_unlock_bh(&pqueue->lock);
+ return;
+ }
+ info->pdev = pdev;
+
+process_pentry:
+ /*
+ * Check if we should inform sending side to resume
+ * We do it CPT_IQ_RESUME_MARGIN elements in advance before
+ * pending queue becomes empty
+ */
+ resume_index = modulo_inc(pqueue->front, pqueue->qlen,
+ CPT_IQ_RESUME_MARGIN);
+ resume_pentry = &pqueue->head[resume_index];
+ if (resume_pentry &&
+ resume_pentry->resume_sender) {
+ resume_pentry->resume_sender = false;
+ callback = resume_pentry->callback;
+ areq = resume_pentry->areq;
+
+ if (callback) {
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * EINPROGRESS is an indication for sending
+ * side that it can resume sending requests
+ */
+ callback(-EINPROGRESS, areq, info);
+ spin_lock_bh(&pqueue->lock);
+ }
+ }
+
+ callback = pentry->callback;
+ areq = pentry->areq;
+ free_pentry(pentry);
+
+ pqueue->pending_count--;
+ pqueue->front = modulo_inc(pqueue->front, pqueue->qlen, 1);
+ spin_unlock_bh(&pqueue->lock);
+
+ /*
+ * Call callback after current pending entry has been
+ * processed, we don't do it if the callback pointer is
+ * invalid.
+ */
+ if (callback)
+ callback(res_code, areq, info);
+ }
+}
+
+void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
+{
+ process_pending_queue(wqe->lfs->pdev,
+ &wqe->lfs->lf[wqe->lf_num].pqueue);
+}
+
+int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+
+ return cptvf->lfs.kcrypto_eng_grp_num;
+}