summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 01:01:47 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 01:01:47 +0200
commitdafa5f6577a9eecd2941add553d1672c30b02364 (patch)
treeff9d3d2dffafd6eba1b6ac21ba50623812041b70
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next (diff)
parentcrypto: arm64/ghash-ce - implement 4-way aggregation (diff)
downloadlinux-dafa5f6577a9eecd2941add553d1672c30b02364.tar.xz
linux-dafa5f6577a9eecd2941add553d1672c30b02364.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Fix dcache flushing crash in skcipher. - Add hash finup self-tests. - Reschedule during speed tests. Algorithms: - Remove insecure vmac and replace it with vmac64. - Add public key verification for DH/ECDH. Drivers: - Decrease priority of sha-mb on x86. - Improve NEON latency/throughput on ARM64. - Add md5/sha384/sha512/des/3des to inside-secure. - Support eip197d in inside-secure. - Only register algorithms supported by the host in virtio. - Add cts and remove incompatible cts1 from ccree. - Add hisilicon SEC security accelerator driver. - Replace msm hwrng driver with qcom pseudo rng driver. Misc: - Centralize CRC polynomials" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (121 commits) crypto: arm64/ghash-ce - implement 4-way aggregation crypto: arm64/ghash-ce - replace NEON yield check with block limit crypto: hisilicon - sec_send_request() can be static lib/mpi: remove redundant variable esign crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable crypto: arm64/aes-ce-gcm - implement 2-way aggregation crypto: arm64/aes-ce-gcm - operate on two input blocks at a time crypto: dh - make crypto_dh_encode_key() make robust crypto: dh - fix calculating encoded key size crypto: ccp - Check for NULL PSP pointer at module unload crypto: arm/chacha20 - always use vrev for 16-bit rotates crypto: ccree - allow bigger than sector XTS op crypto: ccree - zero all of request ctx before use crypto: ccree - remove cipher ivgen left overs crypto: ccree - drop useless type flag during reg crypto: ablkcipher - fix crash flushing dcache in error path crypto: blkcipher - fix crash flushing dcache in error path crypto: skcipher - fix crash flushing dcache in error path crypto: skcipher - remove unnecessary setting of walk->nbytes crypto: scatterwalk - remove scatterwalk_samebuf() ...
-rw-r--r--Documentation/crypto/api-samples.rst2
-rw-r--r--Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt67
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt15
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.txt (renamed from Documentation/devicetree/bindings/rng/qcom,prng.txt)4
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S10
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm/crypto/sha1_glue.c1
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c1
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm/crypto/sha256_glue.c2
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c2
-rw-r--r--arch/arm/crypto/sha512-glue.c2
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi284
-rw-r--r--arch/arm64/crypto/aes-glue.c3
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S271
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c204
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c1
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha256-glue.c8
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c4
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha512-glue.c2
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-md5.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha1.c1
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha256.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-sha512.c2
-rw-r--r--arch/powerpc/crypto/md5-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c2
-rw-r--r--arch/s390/crypto/aes_s390.c1
-rw-r--r--arch/s390/crypto/ghash_s390.c1
-rw-r--r--arch/s390/crypto/sha1_s390.c1
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/sparc/crypto/md5_glue.c1
-rw-r--r--arch/sparc/crypto/sha1_glue.c1
-rw-r--r--arch/sparc/crypto/sha256_glue.c2
-rw-r--r--arch/sparc/crypto/sha512_glue.c2
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c6
-rw-r--r--arch/x86/crypto/poly1305_glue.c1
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c17
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c4
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c18
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S2
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c18
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--crypto/ablkcipher.c59
-rw-r--r--crypto/aegis128.c1
-rw-r--r--crypto/aegis128l.c3
-rw-r--r--crypto/aegis256.c1
-rw-r--r--crypto/blkcipher.c55
-rw-r--r--crypto/crypto_null.c1
-rw-r--r--crypto/dh.c66
-rw-r--r--crypto/dh_helper.c43
-rw-r--r--crypto/drbg.c39
-rw-r--r--crypto/ecc.c42
-rw-r--r--crypto/ecc_curve_defs.h22
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/md4.c1
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/morus1280.c1
-rw-r--r--crypto/morus640.c1
-rw-r--r--crypto/poly1305_generic.c1
-rw-r--r--crypto/rmd128.c1
-rw-r--r--crypto/rmd160.c1
-rw-r--r--crypto/rmd256.c11
-rw-r--r--crypto/rmd320.c13
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c4
-rw-r--r--crypto/sha3_generic.c4
-rw-r--r--crypto/sha512_generic.c26
-rw-r--r--crypto/skcipher.c57
-rw-r--r--crypto/sm3_generic.c1
-rw-r--r--crypto/tcrypt.c38
-rw-r--r--crypto/testmgr.c59
-rw-r--r--crypto/testmgr.h233
-rw-r--r--crypto/tgr192.c3
-rw-r--r--crypto/vmac.c444
-rw-r--r--crypto/wp512.c3
-rw-r--r--crypto/xts.c4
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/msm-rng.c183
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/atmel-ecc.c35
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c28
-rw-r--r--drivers/crypto/bcm/cipher.c8
-rw-r--r--drivers/crypto/caam/caamhash.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c35
-rw-r--r--drivers/crypto/ccp/psp-dev.h19
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c36
-rw-r--r--drivers/crypto/ccree/cc_aead.c16
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c8
-rw-r--r--drivers/crypto/ccree/cc_cipher.c170
-rw-r--r--drivers/crypto/ccree/cc_cipher.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_hash.c85
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_hw.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig14
-rw-r--r--drivers/crypto/hisilicon/Makefile2
-rw-r--r--drivers/crypto/hisilicon/sec/Makefile3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c1122
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c1323
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h428
-rw-r--r--drivers/crypto/inside-secure/safexcel.c474
-rw-r--r--drivers/crypto/inside-secure/safexcel.h201
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c492
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c560
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c63
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c1
-rw-r--r--drivers/crypto/nx/nx-sha256.c1
-rw-r--r--drivers/crypto/nx/nx-sha512.c1
-rw-r--r--drivers/crypto/omap-sham.c36
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/qce/sha.c3
-rw-r--r--drivers/crypto/qcom-rng.c229
-rw-r--r--drivers/crypto/s5p-sss.c9
-rw-r--r--drivers/crypto/sahara.c10
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c62
-rw-r--r--drivers/crypto/stm32/stm32-hash.c95
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c71
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c20
-rw-r--r--drivers/crypto/talitos.c37
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c15
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c116
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h25
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c29
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c81
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c5
-rw-r--r--drivers/staging/skein/skein_generic.c3
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/crc32defs.h14
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/gen_crc32table.c5
-rw-r--r--lib/mpi/mpi-pow.c3
-rw-r--r--lib/xz/xz_crc32.c3
-rw-r--r--net/tls/tls_device_fallback.c2
-rw-r--r--security/keys/dh.c2
174 files changed, 6686 insertions, 2009 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 006827e30d06..0f6ca8b7261e 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -162,7 +162,7 @@ Code Example For Use of Operational State Memory With SHASH
char *hash_alg_name = "sha1-padlock-nano";
int ret;
- alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0);
+ alg = crypto_alloc_shash(hash_alg_name, 0, 0);
if (IS_ERR(alg)) {
pr_info("can't alloc alg %s\n", hash_alg_name);
return PTR_ERR(alg);
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
new file mode 100644
index 000000000000..78d2db9d4de5
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
@@ -0,0 +1,67 @@
+* Hisilicon hip07 Security Accelerator (SEC)
+
+Required properties:
+- compatible: Must contain one of
+ - "hisilicon,hip06-sec"
+ - "hisilicon,hip07-sec"
+- reg: Memory addresses and lengths of the memory regions through which
+ this device is controlled.
+ Region 0 has registers to control the backend processing engines.
+ Region 1 has registers for functionality common to all queues.
+ Regions 2-18 have registers for the 16 individual queues which are isolated
+ both in hardware and within the driver.
+- interrupts: Interrupt specifiers.
+ Refer to interrupt-controller/interrupts.txt for generic interrupt client node
+ bindings.
+ Interrupt 0 is for the SEC unit error queue.
+ Interrupt 2N + 1 is the completion interrupt for queue N.
+ Interrupt 2N + 2 is the error interrupt for queue N.
+- dma-coherent: The driver assumes coherent dma is possible.
+
+Optional properties:
+- iommus: The SEC units are behind smmu-v3 iommus.
+ Refer to iommu/arm,smmu-v3.txt for more information.
+
+Example:
+
+p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+};
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 5dba55cdfa63..3bbf144c9988 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,8 +1,9 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197" or
- "inside-secure,safexcel-eip97".
+- compatible: Should be "inside-secure,safexcel-eip197b",
+ "inside-secure,safexcel-eip197d" or
+ "inside-secure,safexcel-eip97ies".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
@@ -14,10 +15,18 @@ Optional properties:
name must be "core" for the first clock and "reg" for
the second one.
+Backward compatibility:
+Two compatibles are kept for backward compatibility, but shouldn't be used for
+new submissions:
+- "inside-secure,safexcel-eip197" is equivalent to
+ "inside-secure,safexcel-eip197b".
+- "inside-secure,safexcel-eip97" is equivalent to
+ "inside-secure,safexcel-eip97ies".
+
Example:
crypto: crypto@800000 {
- compatible = "inside-secure,safexcel-eip197";
+ compatible = "inside-secure,safexcel-eip197b";
reg = <0x800000 0x200000>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
index 8e5853c2879b..7ee0e9eac973 100644
--- a/Documentation/devicetree/bindings/rng/qcom,prng.txt
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
@@ -2,7 +2,9 @@ Qualcomm MSM pseudo random number generator.
Required properties:
-- compatible : should be "qcom,prng"
+- compatible : should be "qcom,prng" for 8916 etc
+ : should be "qcom,prng-ee" for 8996 and later using EE
+ (Execution Environment) slice of prng
- reg : specifies base physical address and size of the registers map
- clocks : phandle to clock-controller plus clock-specifier pair
- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
diff --git a/MAINTAINERS b/MAINTAINERS
index 7d1282b2e3f9..967ce8cdd1cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7364,7 +7364,7 @@ M: Megha Dey <megha.dey@linux.intel.com>
R: Tim Chen <tim.c.chen@linux.intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
-F: arch/x86/crypto/sha*-mb
+F: arch/x86/crypto/sha*-mb/
F: crypto/mcryptd.c
INTEL TELEMETRY DRIVER
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 3fecb2124c35..451a849ad518 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -51,9 +51,8 @@ ENTRY(chacha20_block_xor_neon)
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
@@ -82,9 +81,8 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #16
- vsri.u32 q3, q4, #16
+ veor q3, q3, q0
+ vrev32.16 q3, q3
// x2 += x3, x1 = rotl32(x1 ^ x2, 12)
vadd.i32 q2, q2, q3
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index d9bb52cae2ac..8930fc4e7c22 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -152,7 +152,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_key),
.cra_module = THIS_MODULE,
@@ -308,9 +308,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index 555f72b5e659..b732522e20f8 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -75,7 +75,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 6fc73bf8766d..98ab8239f919 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -67,7 +67,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 4e22f122f966..d15e0ea2c95e 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -83,7 +83,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index df4dcef054ae..1211a5c129fc 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -78,7 +78,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -93,7 +92,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index a84e869ef900..bf8ccff2c9d0 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -71,7 +71,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -86,7 +85,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-asm",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 39ccd658817e..9bbee56fbdc8 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -79,7 +79,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -94,7 +93,6 @@ struct shash_alg sha256_neon_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-neon",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 269a394e4a53..86540cd4a6fa 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -78,7 +77,6 @@ static struct shash_alg sha512_arm_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-arm",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 32693684a3ab..8a5642b41fd6 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -75,7 +75,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
@@ -91,7 +90,6 @@ struct shash_alg sha512_neon_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 9c10030a07f8..c33adefc3061 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1049,7 +1049,74 @@
num-pins = <2>;
};
};
+ p0_mbigen_alg_a:interrupt-controller@d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x0 0xd0080000 0x0 0x10000>;
+ p0_mbigen_sec_a: intc_sec {
+ msi-parent = <&p0_its_dsa_a 0x40400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_a 0x40b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p0_mbigen_alg_b:interrupt-controller@8,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x8 0xd0080000 0x0 0x10000>;
+
+ p0_mbigen_sec_b: intc_sec {
+ msi-parent = <&p0_its_dsa_b 0x42400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p0_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p0_its_dsa_b 0x42b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_a:interrupt-controller@400,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x400 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_a: intc_sec {
+ msi-parent = <&p1_its_dsa_a 0x44400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_a: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_a 0x44b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+ p1_mbigen_alg_b:interrupt-controller@408,d0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x408 0xd0080000 0x0 0x10000>;
+
+ p1_mbigen_sec_b: intc_sec {
+ msi-parent = <&p1_its_dsa_b 0x46400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <33>;
+ };
+ p1_mbigen_smmu_alg_b: intc_smmu_alg {
+ msi-parent = <&p1_its_dsa_b 0x46b1b>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
p0_mbigen_dsa_a: interrupt-controller@c0080000 {
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xc0080000 0x0 0x10000>;
@@ -1107,6 +1174,58 @@
hisilicon,broken-prefetch-cmd;
status = "disabled";
};
+ p0_smmu_alg_a: smmu_alg@d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p0_smmu_alg_b: smmu_alg@8,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x8 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p0_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_a: smmu_alg@400,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x400 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_a>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
+ p1_smmu_alg_b: smmu_alg@408,d0040000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x408 0xd0040000 0x0 0x20000>;
+ interrupt-parent = <&p1_mbigen_smmu_alg_b>;
+ interrupts = <733 1>,
+ <734 1>,
+ <735 1>;
+ interrupt-names = "eventq", "gerror", "priq";
+ #iommu-cells = <1>;
+ dma-coherent;
+ hisilicon,broken-prefetch-cmd;
+ /* smmu-cb-memtype = <0x0 0x1>;*/
+ };
soc {
compatible = "simple-bus";
@@ -1603,5 +1722,170 @@
0x0 0 0 4 &mbigen_pcie2_a 671 4>;
status = "disabled";
};
+ p0_sec_a: crypto@d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x0 0xd0000000 0x0 0x10000
+ 0x0 0xd2000000 0x0 0x10000
+ 0x0 0xd2010000 0x0 0x10000
+ 0x0 0xd2020000 0x0 0x10000
+ 0x0 0xd2030000 0x0 0x10000
+ 0x0 0xd2040000 0x0 0x10000
+ 0x0 0xd2050000 0x0 0x10000
+ 0x0 0xd2060000 0x0 0x10000
+ 0x0 0xd2070000 0x0 0x10000
+ 0x0 0xd2080000 0x0 0x10000
+ 0x0 0xd2090000 0x0 0x10000
+ 0x0 0xd20a0000 0x0 0x10000
+ 0x0 0xd20b0000 0x0 0x10000
+ 0x0 0xd20c0000 0x0 0x10000
+ 0x0 0xd20d0000 0x0 0x10000
+ 0x0 0xd20e0000 0x0 0x10000
+ 0x0 0xd20f0000 0x0 0x10000
+ 0x0 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_a>;
+ iommus = <&p0_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p0_sec_b: crypto@8,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x8 0xd0000000 0x0 0x10000
+ 0x8 0xd2000000 0x0 0x10000
+ 0x8 0xd2010000 0x0 0x10000
+ 0x8 0xd2020000 0x0 0x10000
+ 0x8 0xd2030000 0x0 0x10000
+ 0x8 0xd2040000 0x0 0x10000
+ 0x8 0xd2050000 0x0 0x10000
+ 0x8 0xd2060000 0x0 0x10000
+ 0x8 0xd2070000 0x0 0x10000
+ 0x8 0xd2080000 0x0 0x10000
+ 0x8 0xd2090000 0x0 0x10000
+ 0x8 0xd20a0000 0x0 0x10000
+ 0x8 0xd20b0000 0x0 0x10000
+ 0x8 0xd20c0000 0x0 0x10000
+ 0x8 0xd20d0000 0x0 0x10000
+ 0x8 0xd20e0000 0x0 0x10000
+ 0x8 0xd20f0000 0x0 0x10000
+ 0x8 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p0_mbigen_sec_b>;
+ iommus = <&p0_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_a: crypto@400,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x400 0xd0000000 0x0 0x10000
+ 0x400 0xd2000000 0x0 0x10000
+ 0x400 0xd2010000 0x0 0x10000
+ 0x400 0xd2020000 0x0 0x10000
+ 0x400 0xd2030000 0x0 0x10000
+ 0x400 0xd2040000 0x0 0x10000
+ 0x400 0xd2050000 0x0 0x10000
+ 0x400 0xd2060000 0x0 0x10000
+ 0x400 0xd2070000 0x0 0x10000
+ 0x400 0xd2080000 0x0 0x10000
+ 0x400 0xd2090000 0x0 0x10000
+ 0x400 0xd20a0000 0x0 0x10000
+ 0x400 0xd20b0000 0x0 0x10000
+ 0x400 0xd20c0000 0x0 0x10000
+ 0x400 0xd20d0000 0x0 0x10000
+ 0x400 0xd20e0000 0x0 0x10000
+ 0x400 0xd20f0000 0x0 0x10000
+ 0x400 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_a>;
+ iommus = <&p1_smmu_alg_a 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+ p1_sec_b: crypto@408,d2000000 {
+ compatible = "hisilicon,hip07-sec";
+ reg = <0x408 0xd0000000 0x0 0x10000
+ 0x408 0xd2000000 0x0 0x10000
+ 0x408 0xd2010000 0x0 0x10000
+ 0x408 0xd2020000 0x0 0x10000
+ 0x408 0xd2030000 0x0 0x10000
+ 0x408 0xd2040000 0x0 0x10000
+ 0x408 0xd2050000 0x0 0x10000
+ 0x408 0xd2060000 0x0 0x10000
+ 0x408 0xd2070000 0x0 0x10000
+ 0x408 0xd2080000 0x0 0x10000
+ 0x408 0xd2090000 0x0 0x10000
+ 0x408 0xd20a0000 0x0 0x10000
+ 0x408 0xd20b0000 0x0 0x10000
+ 0x408 0xd20c0000 0x0 0x10000
+ 0x408 0xd20d0000 0x0 0x10000
+ 0x408 0xd20e0000 0x0 0x10000
+ 0x408 0xd20f0000 0x0 0x10000
+ 0x408 0xd2100000 0x0 0x10000>;
+ interrupt-parent = <&p1_mbigen_sec_b>;
+ iommus = <&p1_smmu_alg_b 0x600>;
+ dma-coherent;
+ interrupts = <576 4>,
+ <577 1>, <578 4>,
+ <579 1>, <580 4>,
+ <581 1>, <582 4>,
+ <583 1>, <584 4>,
+ <585 1>, <586 4>,
+ <587 1>, <588 4>,
+ <589 1>, <590 4>,
+ <591 1>, <592 4>,
+ <593 1>, <594 4>,
+ <595 1>, <596 4>,
+ <597 1>, <598 4>,
+ <599 1>, <600 4>,
+ <601 1>, <602 4>,
+ <603 1>, <604 4>,
+ <605 1>, <606 4>,
+ <607 1>, <608 4>;
+ };
+
};
};
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index e3e50950a863..adcb83eb683c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -567,7 +567,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cmac(aes)",
.base.cra_driver_name = "cmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -583,7 +582,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "xcbc(aes)",
.base.cra_driver_name = "xcbc-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
2 * AES_BLOCK_SIZE,
@@ -599,7 +597,6 @@ static struct shash_alg mac_algs[] = { {
.base.cra_name = "cbcmac(aes)",
.base.cra_driver_name = "cbcmac-aes-" MODE,
.base.cra_priority = PRIO,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
.base.cra_module = THIS_MODULE,
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index c723647b37db..1b319b716d5e 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,6 +46,19 @@
ss3 .req v26
ss4 .req v27
+ XL2 .req v8
+ XM2 .req v9
+ XH2 .req v10
+ XL3 .req v11
+ XM3 .req v12
+ XH3 .req v13
+ TT3 .req v14
+ TT4 .req v15
+ HH .req v16
+ HH3 .req v17
+ HH4 .req v18
+ HH34 .req v19
+
.text
.arch armv8-a+crypto
@@ -134,11 +147,25 @@
.endm
.macro __pmull_pre_p64
+ add x8, x3, #16
+ ld1 {HH.2d-HH4.2d}, [x8]
+
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
+ eor SHASH2.16b, SHASH2.16b, T1.16b
+
+ trn1 HH34.2d, HH3.2d, HH4.2d
+ trn2 T1.2d, HH3.2d, HH4.2d
+ eor HH34.16b, HH34.16b, T1.16b
+
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
.endm
.macro __pmull_pre_p8
+ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ eor SHASH2.16b, SHASH2.16b, SHASH.16b
+
// k00_16 := 0x0000000000000000_000000000000ffff
// k32_48 := 0x00000000ffffffff_0000ffffffffffff
movi k32_48.2d, #0xffffffff
@@ -213,31 +240,88 @@
.endm
.macro __pmull_ghash, pn
- frame_push 5
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-0: ld1 {SHASH.2d}, [x22]
- ld1 {XL.2d}, [x20]
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ ld1 {SHASH.2d}, [x3]
+ ld1 {XL.2d}, [x1]
__pmull_pre_\pn
/* do the head block first, if supplied */
- cbz x23, 1f
- ld1 {T1.2d}, [x23]
- mov x23, xzr
- b 2f
+ cbz x4, 0f
+ ld1 {T1.2d}, [x4]
+ mov x4, xzr
+ b 3f
+
+0: .ifc \pn, p64
+ tbnz w0, #0, 2f // skip until #blocks is a
+ tbnz w0, #1, 2f // round multiple of 4
+
+1: ld1 {XM3.16b-TT4.16b}, [x2], #64
+
+ sub w0, w0, #4
+
+ rev64 T1.16b, XM3.16b
+ rev64 T2.16b, XH3.16b
+ rev64 TT4.16b, TT4.16b
+ rev64 TT3.16b, TT3.16b
+
+ ext IN1.16b, TT4.16b, TT4.16b, #8
+ ext XL3.16b, TT3.16b, TT3.16b, #8
+
+ eor TT4.16b, TT4.16b, IN1.16b
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ pmull XM2.1q, SHASH2.1d, TT4.1d // (a1 + a0)(b1 + b0)
+
+ eor TT3.16b, TT3.16b, XL3.16b
+ pmull2 XH3.1q, HH.2d, XL3.2d // a1 * b1
+ pmull XL3.1q, HH.1d, XL3.1d // a0 * b0
+ pmull2 XM3.1q, SHASH2.2d, TT3.2d // (a1 + a0)(b1 + b0)
-1: ld1 {T1.2d}, [x21], #16
- sub w19, w19, #1
+ ext IN1.16b, T2.16b, T2.16b, #8
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
+
+ eor T2.16b, T2.16b, IN1.16b
+ pmull2 XH3.1q, HH3.2d, IN1.2d // a1 * b1
+ pmull XL3.1q, HH3.1d, IN1.1d // a0 * b0
+ pmull XM3.1q, HH34.1d, T2.1d // (a1 + a0)(b1 + b0)
+
+ eor XL2.16b, XL2.16b, XL3.16b
+ eor XH2.16b, XH2.16b, XH3.16b
+ eor XM2.16b, XM2.16b, XM3.16b
+
+ ext IN1.16b, T1.16b, T1.16b, #8
+ ext TT3.16b, XL.16b, XL.16b, #8
+ eor XL.16b, XL.16b, IN1.16b
+ eor T1.16b, T1.16b, TT3.16b
+
+ pmull2 XH.1q, HH4.2d, XL.2d // a1 * b1
+ eor T1.16b, T1.16b, XL.16b
+ pmull XL.1q, HH4.1d, XL.1d // a0 * b0
+ pmull2 XM.1q, HH34.2d, T1.2d // (a1 + a0)(b1 + b0)
+
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ eor XM.16b, XM.16b, XM2.16b
+
+ eor T2.16b, XL.16b, XH.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor XM.16b, XM.16b, T2.16b
+
+ __pmull_reduce_p64
+
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
+
+ cbz w0, 5f
+ b 1b
+ .endif
-2: /* multiply XL by SHASH in GF(2^128) */
+2: ld1 {T1.2d}, [x2], #16
+ sub w0, w0, #1
+
+3: /* multiply XL by SHASH in GF(2^128) */
CPU_LE( rev64 T1.16b, T1.16b )
ext T2.16b, XL.16b, XL.16b, #8
@@ -250,7 +334,7 @@ CPU_LE( rev64 T1.16b, T1.16b )
__pmull_\pn XL, XL, SHASH // a0 * b0
__pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
- eor T2.16b, XL.16b, XH.16b
+4: eor T2.16b, XL.16b, XH.16b
ext T1.16b, XL.16b, XH.16b, #8
eor XM.16b, XM.16b, T2.16b
@@ -259,18 +343,9 @@ CPU_LE( rev64 T1.16b, T1.16b )
eor T2.16b, T2.16b, XH.16b
eor XL.16b, XL.16b, T2.16b
- cbz w19, 3f
-
- if_will_cond_yield_neon
- st1 {XL.2d}, [x20]
- do_cond_yield_neon
- b 0b
- endif_yield_neon
-
- b 1b
+ cbnz w0, 0b
-3: st1 {XL.2d}, [x20]
- frame_pop
+5: st1 {XL.2d}, [x1]
ret
.endm
@@ -286,9 +361,10 @@ ENTRY(pmull_ghash_update_p8)
__pmull_ghash p8
ENDPROC(pmull_ghash_update_p8)
- KS .req v8
- CTR .req v9
- INP .req v10
+ KS0 .req v12
+ KS1 .req v13
+ INP0 .req v14
+ INP1 .req v15
.macro load_round_keys, rounds, rk
cmp \rounds, #12
@@ -322,98 +398,128 @@ ENDPROC(pmull_ghash_update_p8)
.endm
.macro pmull_gcm_do_crypt, enc
- ld1 {SHASH.2d}, [x4]
+ ld1 {SHASH.2d}, [x4], #16
+ ld1 {HH.2d}, [x4]
ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter
- load_round_keys w7, x6
-
movi MASK.16b, #0xe1
- ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+ trn1 SHASH2.2d, SHASH.2d, HH.2d
+ trn2 T1.2d, SHASH.2d, HH.2d
CPU_LE( rev x8, x8 )
shl MASK.2d, MASK.2d, #57
- eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ eor SHASH2.16b, SHASH2.16b, T1.16b
.if \enc == 1
ldr x10, [sp]
- ld1 {KS.16b}, [x10]
+ ld1 {KS0.16b-KS1.16b}, [x10]
.endif
-0: ld1 {CTR.8b}, [x5] // load upper counter
- ld1 {INP.16b}, [x3], #16
+ cbnz x6, 4f
+
+0: ld1 {INP0.16b-INP1.16b}, [x3], #32
+
rev x9, x8
- add x8, x8, #1
- sub w0, w0, #1
- ins CTR.d[1], x9 // set lower counter
+ add x11, x8, #1
+ add x8, x8, #2
.if \enc == 1
- eor INP.16b, INP.16b, KS.16b // encrypt input
- st1 {INP.16b}, [x2], #16
+ eor INP0.16b, INP0.16b, KS0.16b // encrypt input
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
- rev64 T1.16b, INP.16b
+ ld1 {KS0.8b}, [x5] // load upper counter
+ rev x11, x11
+ sub w0, w0, #2
+ mov KS1.8b, KS0.8b
+ ins KS0.d[1], x9 // set lower counter
+ ins KS1.d[1], x11
+
+ rev64 T1.16b, INP1.16b
cmp w7, #12
b.ge 2f // AES-192/256?
-1: enc_round CTR, v21
-
- ext T2.16b, XL.16b, XL.16b, #8
+1: enc_round KS0, v21
ext IN1.16b, T1.16b, T1.16b, #8
- enc_round CTR, v22
+ enc_round KS1, v21
+ pmull2 XH2.1q, SHASH.2d, IN1.2d // a1 * b1
+
+ enc_round KS0, v22
+ eor T1.16b, T1.16b, IN1.16b
+
+ enc_round KS1, v22
+ pmull XL2.1q, SHASH.1d, IN1.1d // a0 * b0
+ enc_round KS0, v23
+ pmull XM2.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+
+ enc_round KS1, v23
+ rev64 T1.16b, INP0.16b
+ ext T2.16b, XL.16b, XL.16b, #8
+
+ enc_round KS0, v24
+ ext IN1.16b, T1.16b, T1.16b, #8
eor T1.16b, T1.16b, T2.16b
- eor XL.16b, XL.16b, IN1.16b
- enc_round CTR, v23
+ enc_round KS1, v24
+ eor XL.16b, XL.16b, IN1.16b
- pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
+ enc_round KS0, v25
eor T1.16b, T1.16b, XL.16b
- enc_round CTR, v24
+ enc_round KS1, v25
+ pmull2 XH.1q, HH.2d, XL.2d // a1 * b1
+
+ enc_round KS0, v26
+ pmull XL.1q, HH.1d, XL.1d // a0 * b0
- pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
- pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+ enc_round KS1, v26
+ pmull2 XM.1q, SHASH2.2d, T1.2d // (a1 + a0)(b1 + b0)
- enc_round CTR, v25
+ enc_round KS0, v27
+ eor XL.16b, XL.16b, XL2.16b
+ eor XH.16b, XH.16b, XH2.16b
+ enc_round KS1, v27
+ eor XM.16b, XM.16b, XM2.16b
ext T1.16b, XL.16b, XH.16b, #8
+
+ enc_round KS0, v28
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
- enc_round CTR, v26
-
+ enc_round KS1, v28
eor XM.16b, XM.16b, T2.16b
- pmull T2.1q, XL.1d, MASK.1d
- enc_round CTR, v27
+ enc_round KS0, v29
+ pmull T2.1q, XL.1d, MASK.1d
+ enc_round KS1, v29
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
- enc_round CTR, v28
-
+ aese KS0.16b, v30.16b
eor XL.16b, XM.16b, T2.16b
- enc_round CTR, v29
-
+ aese KS1.16b, v30.16b
ext T2.16b, XL.16b, XL.16b, #8
- aese CTR.16b, v30.16b
-
+ eor KS0.16b, KS0.16b, v31.16b
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
- eor KS.16b, CTR.16b, v31.16b
-
+ eor KS1.16b, KS1.16b, v31.16b
eor XL.16b, XL.16b, T2.16b
.if \enc == 0
- eor INP.16b, INP.16b, KS.16b
- st1 {INP.16b}, [x2], #16
+ eor INP0.16b, INP0.16b, KS0.16b
+ eor INP1.16b, INP1.16b, KS1.16b
.endif
+ st1 {INP0.16b-INP1.16b}, [x2], #32
+
cbnz w0, 0b
CPU_LE( rev x8, x8 )
@@ -421,17 +527,24 @@ CPU_LE( rev x8, x8 )
str x8, [x5, #8] // store lower counter
.if \enc == 1
- st1 {KS.16b}, [x10]
+ st1 {KS0.16b-KS1.16b}, [x10]
.endif
ret
2: b.eq 3f // AES-192?
- enc_round CTR, v17
- enc_round CTR, v18
-3: enc_round CTR, v19
- enc_round CTR, v20
+ enc_round KS0, v17
+ enc_round KS1, v17
+ enc_round KS0, v18
+ enc_round KS1, v18
+3: enc_round KS0, v19
+ enc_round KS1, v19
+ enc_round KS0, v20
+ enc_round KS1, v20
b 1b
+
+4: load_round_keys w7, x6
+ b 0b
.endm
/*
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 8a10f1d7199a..6e9f33d14930 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -33,9 +33,12 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GCM_IV_SIZE 12
struct ghash_key {
- u64 a;
- u64 b;
- be128 k;
+ u64 h[2];
+ u64 h2[2];
+ u64 h3[2];
+ u64 h4[2];
+
+ be128 k;
};
struct ghash_desc_ctx {
@@ -113,6 +116,9 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
}
}
+/* avoid hogging the CPU for too long */
+#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
+
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
@@ -136,11 +142,16 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
blocks = len / GHASH_BLOCK_SIZE;
len %= GHASH_BLOCK_SIZE;
- ghash_do_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
+ do {
+ int chunk = min(blocks, MAX_BLOCKS);
- src += blocks * GHASH_BLOCK_SIZE;
- partial = 0;
+ ghash_do_update(chunk, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+
+ blocks -= chunk;
+ src += chunk * GHASH_BLOCK_SIZE;
+ partial = 0;
+ } while (unlikely(blocks > 0));
}
if (len)
memcpy(ctx->buf + partial, src, len);
@@ -166,23 +177,36 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
+static void ghash_reflect(u64 h[], const be128 *k)
+{
+ u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
+
+ h[0] = (be64_to_cpu(k->b) << 1) | carry;
+ h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
+
+ if (carry)
+ h[1] ^= 0xc200000000000000UL;
+}
+
static int __ghash_setkey(struct ghash_key *key,
const u8 *inkey, unsigned int keylen)
{
- u64 a, b;
+ be128 h;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
- /* perform multiplication by 'x' in GF(2^128) */
- b = get_unaligned_be64(inkey);
- a = get_unaligned_be64(inkey + 8);
+ ghash_reflect(key->h, &key->k);
- key->a = (a << 1) | (b >> 63);
- key->b = (b << 1) | (a >> 63);
+ h = key->k;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h2, &h);
- if (b >> 63)
- key->b ^= 0xc200000000000000UL;
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h3, &h);
+
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h4, &h);
return 0;
}
@@ -204,7 +228,6 @@ static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
@@ -245,7 +268,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
__aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
num_rounds(&ctx->aes_key));
- return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
+ return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -349,9 +372,10 @@ static int gcm_encrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
struct skcipher_walk walk;
u8 iv[AES_BLOCK_SIZE];
- u8 ks[AES_BLOCK_SIZE];
+ u8 ks[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -360,39 +384,39 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
+
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- pmull_gcm_encrypt_block(ks, iv, NULL,
- num_rounds(&ctx->aes_key));
+ pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
put_unaligned_be32(3, iv + GCM_IV_SIZE);
- kernel_neon_end();
+ pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
+ put_unaligned_be32(4, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key), ks);
+ iv, rk, nrounds, ks);
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_encrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -400,8 +424,7 @@ static int gcm_encrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- ks, iv,
- num_rounds(&ctx->aes_key));
+ ks, iv, nrounds);
crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -418,19 +441,28 @@ static int gcm_encrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
u8 buf[GHASH_BLOCK_SIZE];
+ unsigned int nbytes = walk.nbytes;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *head = NULL;
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
walk.nbytes);
- memcpy(buf, walk.dst.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = dst;
+ dst += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, dst, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
err = skcipher_walk_done(&walk, 0);
}
@@ -453,10 +485,11 @@ static int gcm_decrypt(struct aead_request *req)
struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
struct skcipher_walk walk;
- u8 iv[AES_BLOCK_SIZE];
+ u8 iv[2 * AES_BLOCK_SIZE];
u8 tag[AES_BLOCK_SIZE];
- u8 buf[GHASH_BLOCK_SIZE];
+ u8 buf[2 * GHASH_BLOCK_SIZE];
u64 dg[2] = {};
+ int nrounds = num_rounds(&ctx->aes_key);
int err;
if (req->assoclen)
@@ -465,43 +498,53 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE);
- if (likely(may_use_simd())) {
- kernel_neon_begin();
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+
+ if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ u32 const *rk = NULL;
- pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ kernel_neon_begin();
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- kernel_neon_end();
- err = skcipher_walk_aead_decrypt(&walk, req, false);
+ do {
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
+ int rem = walk.total - blocks * AES_BLOCK_SIZE;
- while (walk.nbytes >= AES_BLOCK_SIZE) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ if (rk)
+ kernel_neon_begin();
- kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, &ctx->ghash_key,
- iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
+ iv, rk, nrounds);
+
+ /* check if this is the final iteration of the loop */
+ if (rem < (2 * AES_BLOCK_SIZE)) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ if (rem > AES_BLOCK_SIZE) {
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+ }
+
+ pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
+
+ if (rem > AES_BLOCK_SIZE)
+ pmull_gcm_encrypt_block(iv2, iv2, NULL,
+ nrounds);
+ }
+
kernel_neon_end();
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
- }
- if (walk.nbytes) {
- kernel_neon_begin();
- pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
- num_rounds(&ctx->aes_key));
- kernel_neon_end();
- }
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
+ rk = ctx->aes_key.key_enc;
+ } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else {
- __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
- num_rounds(&ctx->aes_key));
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- err = skcipher_walk_aead_decrypt(&walk, req, false);
-
while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
@@ -512,8 +555,7 @@ static int gcm_decrypt(struct aead_request *req)
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
- buf, iv,
- num_rounds(&ctx->aes_key));
+ buf, iv, nrounds);
crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
crypto_inc(iv, AES_BLOCK_SIZE);
@@ -526,14 +568,24 @@ static int gcm_decrypt(struct aead_request *req)
}
if (walk.nbytes)
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
- num_rounds(&ctx->aes_key));
+ nrounds);
}
/* handle the tail */
if (walk.nbytes) {
- memcpy(buf, walk.src.virt.addr, walk.nbytes);
- memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ const u8 *src = walk.src.virt.addr;
+ const u8 *head = NULL;
+ unsigned int nbytes = walk.nbytes;
+
+ if (walk.nbytes > GHASH_BLOCK_SIZE) {
+ head = src;
+ src += GHASH_BLOCK_SIZE;
+ nbytes %= GHASH_BLOCK_SIZE;
+ }
+
+ memcpy(buf, src, nbytes);
+ memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
+ ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
walk.nbytes);
@@ -558,7 +610,7 @@ static int gcm_decrypt(struct aead_request *req)
static struct aead_alg gcm_aes_alg = {
.ivsize = GCM_IV_SIZE,
- .chunksize = AES_BLOCK_SIZE,
+ .chunksize = 2 * AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_setkey,
.setauthsize = gcm_setauthsize,
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index efbeb3e0dcfb..17fac2889f56 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -99,7 +99,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index fd1ff2b13dfa..261f5195cab7 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -114,7 +114,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -129,7 +128,6 @@ static struct shash_alg algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ce",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index e8880ccdc71f..4aedeaefd61f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -67,8 +67,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -80,8 +79,7 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha256_state),
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64",
- .base.cra_priority = 100,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_priority = 125,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
@@ -153,7 +151,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha256",
.base.cra_driver_name = "sha256-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -166,7 +163,6 @@ static struct shash_alg neon_algs[] = { {
.base.cra_name = "sha224",
.base.cra_driver_name = "sha224-arm64-neon",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index da8222e528bd..a336feac0f59 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -105,7 +105,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -117,7 +116,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -129,7 +127,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
@@ -141,7 +138,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index a77c8632a589..f2c5f28c622a 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -87,7 +87,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -100,7 +99,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-ce",
.base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index 27db4851e380..325b23b43a9b 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -63,7 +63,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha512",
.base.cra_driver_name = "sha512-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -76,7 +75,6 @@ static struct shash_alg algs[] = { {
.base.cra_name = "sha384",
.base.cra_driver_name = "sha384-arm64",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 3b4948f7e26f..88938a20d9b2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -72,7 +72,6 @@ static struct shash_alg sm3_alg = {
.descsize = sizeof(struct sm3_state),
.base.cra_name = "sm3",
.base.cra_driver_name = "sm3-ce",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
.base.cra_priority = 200,
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index af4c712f7afc..d1ed066e1a17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -182,7 +182,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
index 2b74b5b67cae..80d71e775936 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c
@@ -215,7 +215,6 @@ static struct shash_alg octeon_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
index 97e96fead08a..8b931e640926 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c
@@ -239,7 +239,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "octeon-sha256",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -252,7 +251,6 @@ static struct shash_alg octeon_sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "octeon-sha224",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
index d5fb3c6f22ae..6c9561496257 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c
@@ -235,7 +235,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +248,6 @@ static struct shash_alg octeon_sha512_algs[2] = { {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 92289679b4c4..7e44cec37bdb 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-ppc",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index f9ebc38d3fe7..9e1814d99318 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -185,7 +185,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index c154cebc1041..3911d5c254fa 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -132,7 +132,6 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-powerpc",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 718a079dcdbf..6227888dcf7e 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -231,7 +231,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha256",
.cra_driver_name= "sha256-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -248,7 +247,6 @@ static struct shash_alg algs[2] = { {
.cra_name = "sha224",
.cra_driver_name= "sha224-ppc-spe",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index ad47abd08630..c54cb26eb7f5 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -1035,7 +1035,6 @@ static struct aead_alg gcm_aes_aead = {
.chunksize = AES_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_priority = 900,
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 3b7f96c9eead..86aed30fad3a 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -128,7 +128,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a00c17f761c1..009572e8276d 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -78,7 +78,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 944aa6b237cd..62833a1d8724 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -71,7 +71,6 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -108,7 +107,6 @@ static struct shash_alg sha224_alg = {
.cra_name = "sha224",
.cra_driver_name= "sha224-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index b17eded532b1..be589c340d15 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -76,7 +76,6 @@ static struct shash_alg sha512_alg = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -115,7 +114,6 @@ static struct shash_alg sha384_alg = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index c9d2b922734b..bc9cc26efa3d 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -144,7 +144,6 @@ static struct shash_alg alg = {
.cra_name = "md5",
.cra_driver_name= "md5-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 1b3e47accc74..4d6d7faf728e 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -139,7 +139,6 @@ static struct shash_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 285268ca9279..54c4de2db188 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg sha256 = {
.cra_name = "sha256",
.cra_driver_name= "sha256-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +184,6 @@ static struct shash_alg sha224 = {
.cra_name = "sha224",
.cra_driver_name= "sha224-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index 11eb36c3fc8c..4c55e97a4408 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -154,7 +154,6 @@ static struct shash_alg sha512 = {
.cra_name = "sha512",
.cra_driver_name= "sha512-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -170,7 +169,6 @@ static struct shash_alg sha384 = {
.cra_name = "sha384",
.cra_driver_name= "sha384-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 2ddbe3a1868b..3582ae885ee1 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -154,8 +154,7 @@ static struct shash_alg ghash_alg = {
.cra_name = "__ghash",
.cra_driver_name = "__ghash-pclmulqdqni",
.cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
@@ -315,9 +314,8 @@ static struct ahash_alg ghash_async_alg = {
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
.cra_ctxsize = sizeof(struct ghash_async_ctx),
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_init = ghash_async_init_tfm,
.cra_exit = ghash_async_exit_tfm,
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 790377797544..f012b7e28ad1 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -169,7 +169,6 @@ static struct shash_alg alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index e17655ffde79..b93805664c1d 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -746,9 +746,8 @@ static struct ahash_alg sha1_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -871,10 +870,16 @@ static struct ahash_alg sha1_mb_async_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
.cra_init = sha1_mb_async_init_tfm,
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739150e7..7391c7de72c7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -104,7 +104,6 @@ static struct shash_alg sha1_ssse3_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -157,7 +156,6 @@ static struct shash_alg sha1_avx_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -249,7 +247,6 @@ static struct shash_alg sha1_avx2_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -307,7 +304,6 @@ static struct shash_alg sha1_ni_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 4c46ac1b6653..97c5fc43e115 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -745,9 +745,8 @@ static struct ahash_alg sha256_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -870,11 +869,16 @@ static struct ahash_alg sha256_mb_async_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha256_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 16c4ccb1f154..d2364c55bbde 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest(state , idx, 4) , %xmm0
+ vmovd _args_digest+4*32(state, idx, 4), %xmm1
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 9e79baf03a4b..773a873d2b28 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha256_ssse3_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -177,7 +175,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -192,7 +189,6 @@ static struct shash_alg sha256_avx_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha256_avx2_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -343,7 +337,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha256",
.cra_driver_name = "sha256-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -358,7 +351,6 @@ static struct shash_alg sha256_ni_algs[] = { {
.cra_name = "sha224",
.cra_driver_name = "sha224-ni",
.cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 39e2bbdc1836..26b85678012d 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -778,9 +778,8 @@ static struct ahash_alg sha512_mb_areq_alg = {
* algo may not have completed before hashing thread
* sleep
*/
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_INTERNAL,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
@@ -904,11 +903,16 @@ static struct ahash_alg sha512_mb_async_alg = {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512_mb",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ /*
+ * Low priority, since with few concurrent hash requests
+ * this is extremely slow due to the flush delay. Users
+ * whose workloads would benefit from this can request
+ * it explicitly by driver name, or can increase its
+ * priority at runtime using NETLINK_CRYPTO.
+ */
+ .cra_priority = 50,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_type = &crypto_ahash_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT
(sha512_mb_async_alg.halg.base.cra_list),
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2b0e2a6825f3..f1b811b60ba6 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -109,7 +109,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -124,7 +123,6 @@ static struct shash_alg sha512_ssse3_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-ssse3",
.cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -188,7 +186,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -203,7 +200,6 @@ static struct shash_alg sha512_avx_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx",
.cra_priority = 160,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -261,7 +257,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha512",
.cra_driver_name = "sha512-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -276,7 +271,6 @@ static struct shash_alg sha512_avx2_algs[] = { {
.cra_name = "sha384",
.cra_driver_name = "sha384-avx2",
.cra_priority = 170,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index d880a4897159..8882e90e868e 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
- unsigned int bsize)
+static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int n)
{
- unsigned int n = bsize;
-
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
-
- return bsize;
}
-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
- unsigned int n)
+static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
- n = ablkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = ablkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
+ ablkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ ablkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
-
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@@ -373,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -447,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 38271303ce16..c22f4414856d 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -429,7 +429,6 @@ static struct aead_alg crypto_aegis128_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 0cc1a7525c85..b6fb21ebdc3e 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -121,7 +121,7 @@ static void crypto_aegis128l_ad(struct aegis_state *state,
(const union aegis_chunk *)src;
while (size >= AEGIS128L_CHUNK_SIZE) {
- crypto_aegis128l_update_a(state, src_chunk);
+ crypto_aegis128l_update_a(state, src_chunk);
size -= AEGIS128L_CHUNK_SIZE;
src_chunk += 1;
@@ -493,7 +493,6 @@ static struct aead_alg crypto_aegis128l_alg = {
.chunksize = AEGIS128L_CHUNK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index a489d741d33a..11f0f8ec9c7c 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -444,7 +444,6 @@ static struct aead_alg crypto_aegis256_alg = {
.chunksize = AEGIS_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx),
.cra_alignmask = 0,
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 01c0d4aa2563..f93abf13b5d4 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
- unsigned int bsize)
+static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return bsize;
}
-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
- unsigned int n)
+static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
+ unsigned int n)
{
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
@@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
- n = blkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = blkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
+ blkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ blkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
- if (nbytes) {
+ if (more) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
-
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@@ -512,6 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 20ff2c746e0b..0959b268966c 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -104,7 +104,6 @@ static struct shash_alg digest_null = {
.final = null_final,
.base = {
.cra_name = "digest_null",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/dh.c b/crypto/dh.c
index 5659fe7f446d..09a44de4209d 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -16,14 +16,16 @@
#include <linux/mpi.h>
struct dh_ctx {
- MPI p;
- MPI g;
- MPI xa;
+ MPI p; /* Value is guaranteed to be set. */
+ MPI q; /* Value is optional. */
+ MPI g; /* Value is guaranteed to be set. */
+ MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
+ mpi_free(ctx->q);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
@@ -60,6 +62,12 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
if (!ctx->p)
return -EINVAL;
+ if (params->q && params->q_size) {
+ ctx->q = mpi_read_raw_data(params->q, params->q_size);
+ if (!ctx->q)
+ return -EINVAL;
+ }
+
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
@@ -93,6 +101,55 @@ err_clear_ctx:
return -EINVAL;
}
+/*
+ * SP800-56A public key verification:
+ *
+ * * If Q is provided as part of the domain paramenters, a full validation
+ * according to SP800-56A section 5.6.2.3.1 is performed.
+ *
+ * * If Q is not provided, a partial validation according to SP800-56A section
+ * 5.6.2.3.2 is performed.
+ */
+static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
+{
+ if (unlikely(!ctx->p))
+ return -EINVAL;
+
+ /*
+ * Step 1: Verify that 2 <= y <= p - 2.
+ *
+ * The upper limit check is actually y < p instead of y < p - 1
+ * as the mpi_sub_ui function is yet missing.
+ */
+ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
+ return -EINVAL;
+
+ /* Step 2: Verify that 1 = y^q mod p */
+ if (ctx->q) {
+ MPI val = mpi_alloc(0);
+ int ret;
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = mpi_powm(val, y, ctx->q, ctx->p);
+
+ if (ret) {
+ mpi_free(val);
+ return ret;
+ }
+
+ ret = mpi_cmp_ui(val, 1);
+
+ mpi_free(val);
+
+ if (ret != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -115,6 +172,9 @@ static int dh_compute_value(struct kpp_request *req)
ret = -EINVAL;
goto err_free_val;
}
+ ret = dh_is_pubkey_valid(ctx, base);
+ if (ret)
+ goto err_free_base;
} else {
base = ctx->g;
}
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 24fdb2ecaa85..edacda5f6a4d 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -14,10 +14,12 @@
#include <crypto/dh.h>
#include <crypto/kpp.h>
-#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
+#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int))
-static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
+static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
+ if (!dst || size > end - dst)
+ return NULL;
memcpy(dst, src, size);
return dst + size;
}
@@ -30,7 +32,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
static inline unsigned int dh_data_size(const struct dh *p)
{
- return p->key_size + p->p_size + p->g_size;
+ return p->key_size + p->p_size + p->q_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
@@ -42,25 +44,27 @@ EXPORT_SYMBOL_GPL(crypto_dh_key_len);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
{
u8 *ptr = buf;
+ u8 * const end = ptr + len;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_DH,
.len = len
};
- if (unlikely(!buf))
+ if (unlikely(!len))
return -EINVAL;
- if (len != crypto_dh_key_len(params))
+ ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
+ ptr = dh_pack_data(ptr, end, &params->key_size,
+ sizeof(params->key_size));
+ ptr = dh_pack_data(ptr, end, &params->p_size, sizeof(params->p_size));
+ ptr = dh_pack_data(ptr, end, &params->q_size, sizeof(params->q_size));
+ ptr = dh_pack_data(ptr, end, &params->g_size, sizeof(params->g_size));
+ ptr = dh_pack_data(ptr, end, params->key, params->key_size);
+ ptr = dh_pack_data(ptr, end, params->p, params->p_size);
+ ptr = dh_pack_data(ptr, end, params->q, params->q_size);
+ ptr = dh_pack_data(ptr, end, params->g, params->g_size);
+ if (ptr != end)
return -EINVAL;
-
- ptr = dh_pack_data(ptr, &secret, sizeof(secret));
- ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
- ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
- ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
- ptr = dh_pack_data(ptr, params->key, params->key_size);
- ptr = dh_pack_data(ptr, params->p, params->p_size);
- dh_pack_data(ptr, params->g, params->g_size);
-
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
@@ -79,6 +83,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
+ ptr = dh_unpack_data(&params->q_size, ptr, sizeof(params->q_size));
ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
@@ -88,7 +93,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
- params->g_size > params->p_size)
+ params->g_size > params->p_size || params->q_size > params->p_size)
return -EINVAL;
/* Don't allocate memory. Set pointers to data within
@@ -96,7 +101,9 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
*/
params->key = (void *)ptr;
params->p = (void *)(ptr + params->key_size);
- params->g = (void *)(ptr + params->key_size + params->p_size);
+ params->q = (void *)(ptr + params->key_size + params->p_size);
+ params->g = (void *)(ptr + params->key_size + params->p_size +
+ params->q_size);
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
@@ -106,6 +113,10 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
+ /* It is permissible to not provide Q. */
+ if (params->q_size == 0)
+ params->q = NULL;
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 466a112a4446..bc52d9562611 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -261,8 +261,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
-#define DRBG_CTR_NULL_LEN 128
-#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN
+#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -555,8 +554,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
}
/* 10.2.1.5.2 step 4.1 */
- ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
- buf, len);
+ ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
@@ -1644,9 +1642,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
- kfree(drbg->ctr_null_value_buf);
- drbg->ctr_null_value = NULL;
-
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
@@ -1697,15 +1692,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
- drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
- GFP_KERNEL);
- if (!drbg->ctr_null_value_buf) {
- drbg_fini_sym_kernel(drbg);
- return -ENOMEM;
- }
- drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
- alignmask + 1);
-
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
@@ -1715,6 +1701,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
+ sg_init_table(&drbg->sg_in, 1);
+ sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+
return alignmask;
}
@@ -1743,17 +1732,25 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
- struct scatterlist sg_in, sg_out;
+ struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
+ u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
- sg_init_one(&sg_in, inbuf, inlen);
- sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
+ if (inbuf) {
+ /* Use caller-provided input buffer */
+ sg_set_buf(sg_in, inbuf, inlen);
+ } else {
+ /* Use scratchpad for in-place operation */
+ inlen = scratchpad_use;
+ memset(drbg->outscratchpad, 0, scratchpad_use);
+ sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
+ }
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
- skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
+ skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
@@ -1763,6 +1760,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
+ memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
@@ -1770,7 +1768,6 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
ret = 0;
out:
- memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 815541309a95..8facafd67802 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1019,6 +1019,36 @@ out:
return ret;
}
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [1, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+ vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */
+ vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */
+ vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */
+ vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */
+ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
+ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
+ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
+ return -EINVAL;
+
+ return 0;
+
+}
+
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@@ -1046,16 +1076,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto out;
}
+ ecc_swap_digits(public_key, pk->x, ndigits);
+ ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
+ ret = ecc_is_pubkey_valid_partial(curve, pk);
+ if (ret)
+ goto err_alloc_product;
+
+ ecc_swap_digits(private_key, priv, ndigits);
+
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
- ecc_swap_digits(public_key, pk->x, ndigits);
- ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
- ecc_swap_digits(private_key, priv, ndigits);
-
ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
ecc_swap_digits(product->x, secret, ndigits);
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index b80f45da829c..336ab1805639 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -13,9 +13,11 @@ struct ecc_curve {
struct ecc_point g;
u64 *p;
u64 *n;
+ u64 *a;
+ u64 *b;
};
-/* NIST P-192 */
+/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
@@ -24,6 +26,10 @@ static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
0xFFFFFFFFFFFFFFFFull };
static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull,
+ 0xFFFFFFFFFFFFFFFFull };
+static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull,
+ 0x64210519E59C80E7ull };
static struct ecc_curve nist_p192 = {
.name = "nist_192",
.g = {
@@ -32,10 +38,12 @@ static struct ecc_curve nist_p192 = {
.ndigits = 3,
},
.p = nist_p192_p,
- .n = nist_p192_n
+ .n = nist_p192_n,
+ .a = nist_p192_a,
+ .b = nist_p192_b
};
-/* NIST P-256 */
+/* NIST P-256: a = p - 3 */
static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
@@ -44,6 +52,10 @@ static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
0x0000000000000000ull, 0xFFFFFFFF00000001ull };
static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
+static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull,
+ 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
+static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull,
+ 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull };
static struct ecc_curve nist_p256 = {
.name = "nist_256",
.g = {
@@ -52,7 +64,9 @@ static struct ecc_curve nist_p256 = {
.ndigits = 4,
},
.p = nist_p256_p,
- .n = nist_p256_n
+ .n = nist_p256_n,
+ .a = nist_p256_a,
+ .b = nist_p256_b
};
#endif
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 1bffb3f712dd..d9f192b953b2 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -132,7 +132,6 @@ static struct shash_alg ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 954a7064a179..393a782679c7 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -188,7 +188,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -265,7 +265,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/crypto/md4.c b/crypto/md4.c
index 810fefb0a007..9965ec40d9f9 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -217,7 +217,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct md4_ctx),
.base = {
.cra_name = "md4",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/md5.c b/crypto/md5.c
index f776ef43d621..94dd78144ba3 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -229,7 +229,6 @@ static struct shash_alg alg = {
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 6180b2557836..d057cf5ac4a8 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -514,7 +514,6 @@ static struct aead_alg crypto_morus1280_alg = {
.chunksize = MORUS1280_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus1280_ctx),
.cra_alignmask = 0,
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 5eede3749e64..1ca76e54281b 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -511,7 +511,6 @@ static struct aead_alg crypto_morus640_alg = {
.chunksize = MORUS640_BLOCK_SIZE,
.base = {
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct morus640_ctx),
.cra_alignmask = 0,
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b7a3a0613a30..47d3a6b83931 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -279,7 +279,6 @@ static struct shash_alg poly1305_alg = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 40e053b97b69..5f4472256e27 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -303,7 +303,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd128_ctx),
.base = {
.cra_name = "rmd128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD128_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 5f3e6ea35268..737645344d1c 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -347,7 +347,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index f50c025cc962..0e9d30676a01 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -49,7 +49,7 @@ struct rmd256_ctx {
static void rmd256_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
+ u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
/* Initialize left lane */
aa = state[0];
@@ -100,7 +100,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane */
ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -139,7 +139,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane */
ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
@@ -178,7 +178,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane */
ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
@@ -217,7 +217,7 @@ static void rmd256_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* combine results */
state[0] += aa;
@@ -322,7 +322,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd256_ctx),
.base = {
.cra_name = "rmd256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index e1315e4869e8..3ae1df5bb48c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -53,7 +53,7 @@ struct rmd320_ctx {
static void rmd320_transform(u32 *state, const __le32 *in)
{
- u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
+ u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
@@ -106,7 +106,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* Swap contents of "a" registers */
- tmp = aa; aa = aaa; aaa = tmp;
+ swap(aa, aaa);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
@@ -145,7 +145,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* Swap contents of "b" registers */
- tmp = bb; bb = bbb; bbb = tmp;
+ swap(bb, bbb);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
@@ -184,7 +184,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* Swap contents of "c" registers */
- tmp = cc; cc = ccc; ccc = tmp;
+ swap(cc, ccc);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
@@ -223,7 +223,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* Swap contents of "d" registers */
- tmp = dd; dd = ddd; ddd = tmp;
+ swap(dd, ddd);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
@@ -262,7 +262,7 @@ static void rmd320_transform(u32 *state, const __le32 *in)
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* Swap contents of "e" registers */
- tmp = ee; ee = eee; eee = tmp;
+ swap(ee, eee);
/* combine results */
state[0] += aa;
@@ -371,7 +371,6 @@ static struct shash_alg alg = {
.descsize = sizeof(struct rmd320_ctx),
.base = {
.cra_name = "rmd320",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = RMD320_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index c16c94f88733..d0b92c1cd6e9 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -91,7 +91,7 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
- scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
+ scatterwalk_crypto_chain(dst, sg_next(src), 2);
return dst;
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 6877cbb9105f..2af64ef81f40 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -76,7 +76,7 @@ static struct shash_alg alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 8f9c47e1a96e..1e5ba6649e8d 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -271,7 +271,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -285,7 +285,7 @@ static struct shash_alg sha256_algs[2] = { {
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7f6735d9003f..7ed98367d4fb 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -250,7 +250,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -261,7 +260,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -272,7 +270,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@@ -283,7 +280,6 @@ static struct shash_alg algs[] = { {
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
- .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index eba965d18bfc..4097cd555eb6 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -23,6 +23,28 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
+ 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
+ 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
+ 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
+ 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
+ 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
+ 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
+};
+EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
+
+const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
+ 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
+ 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
+ 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
+ 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
+ 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
+ 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
+ 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
+ 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
+};
+EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
+
static inline u64 Ch(u64 x, u64 y, u64 z)
{
return z ^ (x & (y ^ z));
@@ -171,7 +193,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -185,7 +207,7 @@ static struct shash_alg sha512_algs[2] = { {
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 100,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0fe2a2923ad0..0bd8c6caa498 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
- return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
- unsigned int n = walk->nbytes - err;
- unsigned int nbytes;
-
- nbytes = walk->total - n;
-
- if (unlikely(err < 0)) {
- nbytes = 0;
- n = 0;
- } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
- SKCIPHER_WALK_SLOW |
- SKCIPHER_WALK_COPY |
- SKCIPHER_WALK_DIFF)))) {
+ unsigned int n; /* bytes processed */
+ bool more;
+
+ if (unlikely(err < 0))
+ goto finish;
+
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+ SKCIPHER_WALK_COPY |
+ SKCIPHER_WALK_DIFF)))) {
unmap_src:
skcipher_unmap_src(walk);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
@@ -131,28 +132,28 @@ unmap_src:
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
err = -EINVAL;
- nbytes = 0;
- } else
- n = skcipher_done_slow(walk, n);
+ goto finish;
+ }
+ skcipher_done_slow(walk, n);
+ goto already_advanced;
}
- if (err > 0)
- err = 0;
-
- walk->total = nbytes;
- walk->nbytes = nbytes;
-
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
+already_advanced:
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
+ err = 0;
+finish:
+ walk->nbytes = 0;
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
@@ -387,7 +388,6 @@ set_phys_lowmem:
}
return err;
}
-EXPORT_SYMBOL_GPL(skcipher_walk_next);
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
@@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
unsigned size;
u8 *iv;
- aligned_bs = ALIGN(bs, alignmask);
+ aligned_bs = ALIGN(bs, alignmask + 1);
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
@@ -437,7 +437,6 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
}
walk->page = NULL;
- walk->nbytes = walk->total;
return skcipher_walk_next(walk);
}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9e823d99f095..9a5c60f08aad 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -184,7 +184,6 @@ static struct shash_alg sm3_alg = {
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d5bcdd905007..bdde95e8d369 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
- if (secs)
+ if (secs) {
ret = test_mb_aead_jiffies(data, enc, *b_size,
secs, num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_aead_cycles(data, enc, *b_size,
num_mb);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
*b_size + (enc ? 0 : authsize),
iv);
- if (secs)
+ if (secs) {
ret = test_aead_jiffies(req, enc, *b_size,
secs);
- else
+ cond_resched();
+ } else {
ret = test_aead_cycles(req, enc, *b_size);
+ }
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
@@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
if (ret) {
@@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
ahash_request_set_crypt(req, sg, output, speed[i].plen);
- if (secs)
+ if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
- else
+ cond_resched();
+ } else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
+ }
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
@@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
iv);
}
- if (secs)
+ if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
*b_size, secs,
num_mb);
- else
+ cond_resched();
+ } else {
ret = test_mb_acipher_cycles(data, enc,
*b_size, num_mb);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
- if (secs)
+ if (secs) {
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
- else
+ cond_resched();
+ } else {
ret = test_acipher_cycles(req, enc,
*b_size);
+ }
if (ret) {
pr_err("%s() failed flags=%x\n", e,
@@ -1939,7 +1951,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 109:
- ret += tcrypt_test("vmac(aes)");
+ ret += tcrypt_test("vmac64(aes)");
break;
case 111:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 11e45352fd0b..a1d42245082a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -259,9 +259,15 @@ out_nostate:
return ret;
}
+enum hash_test {
+ HASH_TEST_DIGEST,
+ HASH_TEST_FINAL,
+ HASH_TEST_FINUP
+};
+
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
- bool use_digest, const int align_offset)
+ enum hash_test test_type, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- if (use_digest) {
+ switch (test_type) {
+ case HASH_TEST_DIGEST:
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- } else {
+ break;
+
+ case HASH_TEST_FINAL:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
@@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm,
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ break;
+
+ case HASH_TEST_FINUP:
+ memset(result, 1, digest_size);
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ break;
}
if (memcmp(result, template[i].digest,
@@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm,
}
}
+ if (test_type)
+ goto out;
+
j = 0;
for (i = 0; i < tcount; i++) {
/* alignment tests are only done with continuous buffers */
@@ -540,24 +575,24 @@ out_nobuf:
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
- unsigned int tcount, bool use_digest)
+ unsigned int tcount, enum hash_test test_type)
{
unsigned int alignmask;
int ret;
- ret = __test_hash(tfm, template, tcount, use_digest, 0);
+ ret = __test_hash(tfm, template, tcount, test_type, 0);
if (ret)
return ret;
/* test unaligned buffers, check with one byte offset */
- ret = __test_hash(tfm, template, tcount, use_digest, 1);
+ ret = __test_hash(tfm, template, tcount, test_type, 1);
if (ret)
return ret;
alignmask = crypto_tfm_alg_alignmask(&tfm->base);
if (alignmask) {
/* Check if alignment mask for tfm is correctly set. */
- ret = __test_hash(tfm, template, tcount, use_digest,
+ ret = __test_hash(tfm, template, tcount, test_type,
alignmask + 1);
if (ret)
return ret;
@@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, template, tcount, true);
+ err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
+ if (!err)
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
if (!err)
- err = test_hash(tfm, template, tcount, false);
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
crypto_free_ahash(tfm);
return err;
}
@@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(tgr192_tv_template)
}
}, {
- .alg = "vmac(aes)",
+ .alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
- .hash = __VECS(aes_vmac128_tv_template)
+ .hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index b950aa234e43..173111c70746 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -641,15 +641,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -739,7 +741,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
"\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
"\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -748,15 +750,17 @@ static const struct kpp_testvec dh_tv_template[] = {
.secret =
#ifdef __LITTLE_ENDIAN
"\x01\x00" /* type */
- "\x11\x02" /* len */
+ "\x15\x02" /* len */
"\x00\x01\x00\x00" /* key_size */
"\x00\x01\x00\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x01\x00\x00\x00" /* g_size */
#else
"\x00\x01" /* type */
- "\x02\x11" /* len */
+ "\x02\x15" /* len */
"\x00\x00\x01\x00" /* key_size */
"\x00\x00\x01\x00" /* p_size */
+ "\x00\x00\x00\x00" /* q_size */
"\x00\x00\x00\x01" /* g_size */
#endif
/* xa */
@@ -846,7 +850,7 @@ static const struct kpp_testvec dh_tv_template[] = {
"\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
"\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
"\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
- .secret_size = 529,
+ .secret_size = 533,
.b_public_size = 256,
.expected_a_public_size = 256,
.expected_ss_size = 256,
@@ -4603,105 +4607,158 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
- '\x02', '\x03', '\x02', '\x02',
- '\x02', '\x04', '\x01', '\x07',
- '\x04', '\x01', '\x04', '\x03',};
-static const char vmac_string2[128] = {'a', 'b', 'c',};
-static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- 'a', 'b', 'c', 'a', 'b', 'c',
- };
+static const char vmac64_string1[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03',
+};
-static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
- 'i', 'j', 'l', 'm',
- 'o', 'p', 'r', 's',
- 't', 'u', 'w', 'x', 'z'};
+static const char vmac64_string2[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c',
+};
-static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
- 'o', 'l', 'k', ']', '%',
- '9', '2', '7', '!', 'A'};
+static const char vmac64_string3[144] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b',
+ 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a',
+ 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c',
+};
-static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
- 'i', '!', '#', 'w', '0',
- 'z', '/', '4', 'A', 'n'};
+static const char vmac64_string4[33] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm',
+ 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
+ 'z',
+};
-static const struct hash_testvec aes_vmac128_tv_template[] = {
- {
+static const char vmac64_string5[143] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k',
+ ']', '%', '9', '2', '7', '!', 'A',
+};
+
+static const char vmac64_string6[145] = {
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ 'p', 't', '*', '7', 'l', 'i', '!', '#',
+ 'w', '0', 'z', '/', '4', 'A', 'n',
+};
+
+static const struct hash_testvec vmac64_aes_tv_template[] = {
+ { /* draft-krovetz-vmac-01 test vector 1 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi",
+ .psize = 16,
+ .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b",
+ }, { /* draft-krovetz-vmac-01 test vector 2 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc",
+ .psize = 19,
+ .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5",
+ }, { /* draft-krovetz-vmac-01 test vector 3 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 64,
+ .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98",
+ }, { /* draft-krovetz-vmac-01 test vector 4 */
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = "\0\0\0\0\0\0\0\0bcdefghi"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc"
+ "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc",
+ .psize = 316,
+ .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe",
+ .tap = { 1, 100, 200, 15 },
+ .np = 4,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = NULL,
- .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string1,
- .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string2,
- .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9",
}, {
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string3,
- .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
- .psize = 128,
- .ksize = 16,
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d",
}, {
.key = "abcdefghijklmnop",
- .plaintext = NULL,
- .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
- .psize = 0,
.ksize = 16,
+ .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .psize = 16,
+ .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b",
}, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string1,
- .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string2,
- .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "abcdefghijklmnop",
- .plaintext = vmac_string3,
- .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
- .psize = 128,
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string4,
- .digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f",
- .psize = sizeof(vmac_string4),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string5,
- .digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60",
- .psize = sizeof(vmac_string5),
- .ksize = 16,
- }, {
- .key = "a09b5cd!f#07K\x00\x00\x00",
- .plaintext = vmac_string6,
- .digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41",
- .psize = sizeof(vmac_string6),
- .ksize = 16,
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string1,
+ .psize = sizeof(vmac64_string1),
+ .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string2,
+ .psize = sizeof(vmac64_string2),
+ .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11",
+ }, {
+ .key = "abcdefghijklmnop",
+ .ksize = 16,
+ .plaintext = vmac64_string3,
+ .psize = sizeof(vmac64_string3),
+ .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string4,
+ .psize = sizeof(vmac64_string4),
+ .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string5,
+ .psize = sizeof(vmac64_string5),
+ .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25",
+ }, {
+ .key = "a09b5cd!f#07K\x00\x00\x00",
+ .ksize = 16,
+ .plaintext = vmac64_string6,
+ .psize = sizeof(vmac64_string6),
+ .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4",
},
};
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 321bc6ff2a9d..022d3dd76c3b 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -636,7 +636,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr192",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -648,7 +647,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr160",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -660,7 +658,6 @@ static struct shash_alg tgr_algs[3] = { {
.descsize = sizeof(struct tgr192_ctx),
.base = {
.cra_name = "tgr128",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = TGR192_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a816cfb2..5f436dfdfc61 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
/*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
* Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Last modified: 17 APR 08, 1700 PDT
+ */
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
@@ -31,10 +36,42 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+#define VMAC_NONCEBYTES 16
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+ struct crypto_cipher *cipher;
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+ union {
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ __le64 partial_words[VMAC_NHBYTES / 8];
+ };
+ unsigned int partial_size; /* size of the partial block */
+ bool first_block_processed;
+ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
+ union {
+ u8 bytes[VMAC_NONCEBYTES];
+ __be64 pads[VMAC_NONCEBYTES / 8];
+ } nonce;
+ unsigned int nonce_size; /* nonce bytes filled so far */
+};
+
+/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
@@ -318,13 +355,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
} while (0)
#endif
-static void vhash_abort(struct vmac_ctx *ctx)
-{
- ctx->polytmp[0] = ctx->polykey[0] ;
- ctx->polytmp[1] = ctx->polykey[1] ;
- ctx->first_block_processed = 0;
-}
-
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -364,280 +394,227 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
return rl;
}
-static void vhash_update(const unsigned char *m,
- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
- struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx,
+ const __le64 *mptr, unsigned int blocks)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- if (!mbytes)
- return;
-
- BUG_ON(mbytes % VMAC_NHBYTES);
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
-
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
-
- if (!ctx->first_block_processed) {
- ctx->first_block_processed = 1;
+ const u64 *kptr = tctx->nhkey;
+ const u64 pkh = tctx->polykey[0];
+ const u64 pkl = tctx->polykey[1];
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+ u64 rh, rl;
+
+ if (!dctx->first_block_processed) {
+ dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
+ blocks--;
}
- while (i--) {
+ while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
- ctx->polytmp[0] = ch;
- ctx->polytmp[1] = cl;
+ dctx->polytmp[0] = ch;
+ dctx->polytmp[1] = cl;
}
-static u64 vhash(unsigned char m[], unsigned int mbytes,
- u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i, remaining;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES;
- remaining = mbytes % VMAC_NHBYTES;
-
- if (ctx->first_block_processed) {
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
- } else if (i) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
- } else if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- goto do_l3;
- } else {/* Empty String */
- ch = pkh; cl = pkl;
- goto do_l3;
- }
-
- while (i--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
- if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- }
-
-do_l3:
- vhash_abort(ctx);
- remaining *= 8;
- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
+ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+ __be64 out[2];
+ u8 in[16] = { 0 };
+ unsigned int i;
+ int err;
-static u64 vmac(unsigned char m[], unsigned int mbytes,
- const unsigned char n[16], u64 *tagl,
- struct vmac_ctx_t *ctx)
-{
- u64 *in_n, *out_p;
- u64 p, h;
- int i;
-
- in_n = ctx->__vmac_ctx.cached_nonce;
- out_p = ctx->__vmac_ctx.cached_aes;
-
- i = n[15] & 1;
- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
- in_n[0] = *(u64 *)(n);
- in_n[1] = *(u64 *)(n+8);
- ((unsigned char *)in_n)[15] &= 0xFE;
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out_p, (unsigned char *)in_n);
-
- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
- p = be64_to_cpup(out_p + i);
- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return le64_to_cpu(p + h);
-}
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
- u64 in[2] = {0}, out[2];
- unsigned i;
- int err = 0;
-
- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
- ((unsigned char *)in)[0] = 0x80;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0x80;
+ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->nhkey[i] = be64_to_cpu(out[0]);
+ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
}
/* Fill poly key */
- ((unsigned char *)in)[0] = 0xC0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.polytmp[i] =
- ctx->__vmac_ctx.polykey[i] =
- be64_to_cpup(out) & mpoly;
- ctx->__vmac_ctx.polytmp[i+1] =
- ctx->__vmac_ctx.polykey[i+1] =
- be64_to_cpup(out+1) & mpoly;
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0xC0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+ in[15]++;
}
/* Fill ip key */
- ((unsigned char *)in)[0] = 0xE0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ in[0] = 0xE0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
- } while (ctx->__vmac_ctx.l3key[i] >= p64
- || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->l3key[i] = be64_to_cpu(out[0]);
+ tctx->l3key[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
+ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
- /* Invalidate nonce/aes cache and reset other elements */
- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
- ctx->__vmac_ctx.first_block_processed = 0;
-
- return err;
+ return 0;
}
-static int vmac_setkey(struct crypto_shash *parent,
- const u8 *key, unsigned int keylen)
+static int vmac_init(struct shash_desc *desc)
{
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- if (keylen != VMAC_KEY_LEN) {
- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
+ dctx->partial_size = 0;
+ dctx->first_block_processed = false;
+ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
+ dctx->nonce_size = 0;
return 0;
}
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
- unsigned int len)
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- int expand;
- int min;
-
- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
- VMAC_NHBYTES - ctx->partial_size : 0;
-
- min = len < expand ? len : expand;
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int n;
+
+ /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
+ if (dctx->nonce_size < VMAC_NONCEBYTES) {
+ n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
+ memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
+ dctx->nonce_size += n;
+ p += n;
+ len -= n;
+ }
- memcpy(ctx->partial + ctx->partial_size, p, min);
- ctx->partial_size += min;
+ if (dctx->partial_size) {
+ n = min(len, VMAC_NHBYTES - dctx->partial_size);
+ memcpy(&dctx->partial[dctx->partial_size], p, n);
+ dctx->partial_size += n;
+ p += n;
+ len -= n;
+ if (dctx->partial_size == VMAC_NHBYTES) {
+ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+ dctx->partial_size = 0;
+ }
+ }
- if (len < expand)
- return 0;
+ if (len >= VMAC_NHBYTES) {
+ n = round_down(len, VMAC_NHBYTES);
+ /* TODO: 'p' may be misaligned here */
+ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+ p += n;
+ len -= n;
+ }
- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
- ctx->partial_size = 0;
+ if (len) {
+ memcpy(dctx->partial, p, len);
+ dctx->partial_size = len;
+ }
- len -= expand;
- p += expand;
+ return 0;
+}
- if (len % VMAC_NHBYTES) {
- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
- len % VMAC_NHBYTES);
- ctx->partial_size = len % VMAC_NHBYTES;
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx)
+{
+ unsigned int partial = dctx->partial_size;
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+
+ /* L1 and L2-hash the final block if needed */
+ if (partial) {
+ /* Zero-pad to next 128-bit boundary */
+ unsigned int n = round_up(partial, 16);
+ u64 rh, rl;
+
+ memset(&dctx->partial[partial], 0, n - partial);
+ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+ rh &= m62;
+ if (dctx->first_block_processed)
+ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+ rh, rl);
+ else
+ ADD128(ch, cl, rh, rl);
}
- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
-
- return 0;
+ /* L3-hash the 128-bit output of L2-hash */
+ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
+static int vmac_final(struct shash_desc *desc, u8 *out)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- vmac_t mac;
- u8 nonce[16] = {};
-
- /* vmac() ends up accessing outside the array bounds that
- * we specify. In appears to access up to the next 2-word
- * boundary. We'll just be uber cautious and zero the
- * unwritten bytes in the buffer.
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ int index;
+ u64 hash, pad;
+
+ if (dctx->nonce_size != VMAC_NONCEBYTES)
+ return -EINVAL;
+
+ /*
+ * The VMAC specification requires a nonce at least 1 bit shorter than
+ * the block cipher's block length, so we actually only accept a 127-bit
+ * nonce. We define the unused bit to be the first one and require that
+ * it be 0, so the needed prepending of a 0 bit is implicit.
*/
- if (ctx->partial_size) {
- memset(ctx->partial + ctx->partial_size, 0,
- VMAC_NHBYTES - ctx->partial_size);
- }
- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
- memcpy(out, &mac, sizeof(vmac_t));
- memzero_explicit(&mac, sizeof(vmac_t));
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
- ctx->partial_size = 0;
+ if (dctx->nonce.bytes[0] & 0x80)
+ return -EINVAL;
+
+ /* Finish calculating the VHASH of the message */
+ hash = vhash_final(tctx, dctx);
+
+ /* Generate pseudorandom pad by encrypting the nonce */
+ BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
+ index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
+ dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
+ crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
+ dctx->nonce.bytes);
+ pad = be64_to_cpu(dctx->nonce.pads[index]);
+
+ /* The VMAC is the sum of VHASH and the pseudorandom pad */
+ put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,7 +632,11 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(alg))
return PTR_ERR(alg);
- inst = shash_alloc_instance("vmac", alg);
+ err = -EINVAL;
+ if (alg->cra_blocksize != VMAC_NONCEBYTES)
+ goto out_put_alg;
+
+ inst = shash_alloc_instance(tmpl->name, alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
@@ -670,11 +651,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
- inst->alg.digestsize = sizeof(vmac_t);
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
+ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+ inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
@@ -691,8 +673,8 @@ out_put_alg:
return err;
}
-static struct crypto_template vmac_tmpl = {
- .name = "vmac",
+static struct crypto_template vmac64_tmpl = {
+ .name = "vmac64",
.create = vmac_create,
.free = shash_free_instance,
.module = THIS_MODULE,
@@ -700,12 +682,12 @@ static struct crypto_template vmac_tmpl = {
static int __init vmac_module_init(void)
{
- return crypto_register_template(&vmac_tmpl);
+ return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
- crypto_unregister_template(&vmac_tmpl);
+ crypto_unregister_template(&vmac64_tmpl);
}
module_init(vmac_module_init);
@@ -713,4 +695,4 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
-MODULE_ALIAS_CRYPTO("vmac");
+MODULE_ALIAS_CRYPTO("vmac64");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 7ee5a043a988..149e577fb772 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1127,7 +1127,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1139,7 +1138,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -1151,7 +1149,6 @@ static struct shash_alg wp_algs[3] = { {
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/crypto/xts.c b/crypto/xts.c
index 12284183bd20..ccf55fbb8bc2 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -138,7 +138,7 @@ static int post_crypt(struct skcipher_request *req)
if (rctx->dst != sg) {
rctx->dst[0] = *sg;
sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
}
rctx->dst[0].length -= offset - sg->offset;
rctx->dst[0].offset = offset;
@@ -204,7 +204,7 @@ static int pre_crypt(struct skcipher_request *req)
if (rctx->src != sg) {
rctx->src[0] = *sg;
sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+ scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
}
rctx->src[0].length -= offset - sg->offset;
rctx->src[0].offset = offset;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c34b257d852d..dac895dc01b9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -307,19 +307,6 @@ config HW_RANDOM_HISI
If unsure, say Y.
-config HW_RANDOM_MSM
- tristate "Qualcomm SoCs Random Number Generator support"
- depends on HW_RANDOM && ARCH_QCOM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Qualcomm SoCs.
-
- To compile this driver as a module, choose M here. the
- module will be called msm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && ARCH_STI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 533e913c93d1..e35ec3ce3a20 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
-obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
deleted file mode 100644
index 841fee845ec9..000000000000
--- a/drivers/char/hw_random/msm-rng.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/hw_random.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-/* Device specific register offsets */
-#define PRNG_DATA_OUT 0x0000
-#define PRNG_STATUS 0x0004
-#define PRNG_LFSR_CFG 0x0100
-#define PRNG_CONFIG 0x0104
-
-/* Device specific register masks and config values */
-#define PRNG_LFSR_CFG_MASK 0x0000ffff
-#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
-#define PRNG_CONFIG_HW_ENABLE BIT(1)
-#define PRNG_STATUS_DATA_AVAIL BIT(0)
-
-#define MAX_HW_FIFO_DEPTH 16
-#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4)
-#define WORD_SZ 4
-
-struct msm_rng {
- void __iomem *base;
- struct clk *clk;
- struct hwrng hwrng;
-};
-
-#define to_msm_rng(p) container_of(p, struct msm_rng, hwrng)
-
-static int msm_rng_enable(struct hwrng *hwrng, int enable)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- u32 val;
- int ret;
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- if (enable) {
- /* Enable PRNG only if it is not already enabled */
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- if (val & PRNG_CONFIG_HW_ENABLE)
- goto already_enabled;
-
- val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
- val &= ~PRNG_LFSR_CFG_MASK;
- val |= PRNG_LFSR_CFG_CLOCKS;
- writel(val, rng->base + PRNG_LFSR_CFG);
-
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val |= PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- } else {
- val = readl_relaxed(rng->base + PRNG_CONFIG);
- val &= ~PRNG_CONFIG_HW_ENABLE;
- writel(val, rng->base + PRNG_CONFIG);
- }
-
-already_enabled:
- clk_disable_unprepare(rng->clk);
- return 0;
-}
-
-static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
-{
- struct msm_rng *rng = to_msm_rng(hwrng);
- size_t currsize = 0;
- u32 *retdata = data;
- size_t maxsize;
- int ret;
- u32 val;
-
- /* calculate max size bytes to transfer back to caller */
- maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
-
- ret = clk_prepare_enable(rng->clk);
- if (ret)
- return ret;
-
- /* read random data from hardware */
- do {
- val = readl_relaxed(rng->base + PRNG_STATUS);
- if (!(val & PRNG_STATUS_DATA_AVAIL))
- break;
-
- val = readl_relaxed(rng->base + PRNG_DATA_OUT);
- if (!val)
- break;
-
- *retdata++ = val;
- currsize += WORD_SZ;
-
- /* make sure we stay on 32bit boundary */
- if ((maxsize - currsize) < WORD_SZ)
- break;
- } while (currsize < maxsize);
-
- clk_disable_unprepare(rng->clk);
-
- return currsize;
-}
-
-static int msm_rng_init(struct hwrng *hwrng)
-{
- return msm_rng_enable(hwrng, 1);
-}
-
-static void msm_rng_cleanup(struct hwrng *hwrng)
-{
- msm_rng_enable(hwrng, 0);
-}
-
-static int msm_rng_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct msm_rng *rng;
- int ret;
-
- rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
- if (!rng)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, rng);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(rng->base))
- return PTR_ERR(rng->base);
-
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
-
- rng->hwrng.name = KBUILD_MODNAME,
- rng->hwrng.init = msm_rng_init,
- rng->hwrng.cleanup = msm_rng_cleanup,
- rng->hwrng.read = msm_rng_read,
-
- ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register hwrng\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id msm_rng_of_match[] = {
- { .compatible = "qcom,prng", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_rng_of_match);
-
-static struct platform_driver msm_rng_driver = {
- .probe = msm_rng_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(msm_rng_of_match),
- }
-};
-module_platform_driver(msm_rng_driver);
-
-MODULE_ALIAS("platform:" KBUILD_MODNAME);
-MODULE_AUTHOR("The Linux Foundation");
-MODULE_DESCRIPTION("Qualcomm MSM random number generator driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43cccf6aff61..a8c4ce07fc9d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -585,6 +585,17 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
+config CRYPTO_DEV_QCOM_RNG
+ tristate "Qualcomm Random Number Generator Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select CRYPTO_RNG
+ help
+ This driver provides support for the Random Number
+ Generator hardware found on Qualcomm SoCs.
+
+ To compile this driver as a module, choose M here. The
+ module will be called qcom-rng. If unsure, say N.
+
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
depends on PPC64 && VSX
@@ -689,8 +700,10 @@ config CRYPTO_DEV_SAFEXCEL
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
select CRYPTO_HASH
select CRYPTO_HMAC
+ select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -746,4 +759,6 @@ config CRYPTO_DEV_CCREE
cryptographic operations on the system REE.
If unsure say Y.
+source "drivers/crypto/hisilicon/Kconfig"
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7ae87b4f6c8d..c23396f32c8a 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
@@ -45,3 +46,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-y += hisilicon/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 05981ccd9901..6eaec9ba0f68 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1132,8 +1132,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1153,8 +1152,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1174,8 +1172,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1196,8 +1193,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1217,8 +1213,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1237,8 +1232,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index e66f18a0ddd0..74f083f45e97 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -186,7 +186,10 @@ static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
* always be the same. Use a macro for the key size to avoid unnecessary
* computations.
*/
- copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ copied = sg_copy_to_buffer(pubkey,
+ sg_nents_for_len(pubkey,
+ ATMEL_ECC_PUBKEY_SIZE),
+ cmd->data, ATMEL_ECC_PUBKEY_SIZE);
if (copied != ATMEL_ECC_PUBKEY_SIZE)
return -EINVAL;
@@ -268,15 +271,17 @@ static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_ecc_cmd *cmd = &work_data->cmd;
- size_t copied;
- size_t n_sz = ctx->n_sz;
+ size_t copied, n_sz;
if (status)
goto free_work_data;
+ /* might want less than we've got */
+ n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+
/* copy the shared secret */
- copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
- n_sz);
+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
+ &cmd->data[RSP_DATA_IDX], n_sz);
if (copied != n_sz)
status = -EINVAL;
@@ -440,7 +445,7 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
- size_t copied;
+ size_t copied, nbytes;
int ret = 0;
if (ctx->do_fallback) {
@@ -448,10 +453,14 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req)
return crypto_kpp_generate_public_key(req);
}
+ /* might want less than we've got */
+ nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len);
+
/* public key was saved at private key generation */
- copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
- ATMEL_ECC_PUBKEY_SIZE);
- if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ ctx->public_key, nbytes);
+ if (copied != nbytes)
ret = -EINVAL;
return ret;
@@ -470,6 +479,10 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
return crypto_kpp_compute_shared_secret(req);
}
+ /* must have exactly two points to be on the curve */
+ if (req->src_len != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
GFP_ATOMIC;
@@ -554,10 +567,6 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
}
crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
-
- dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
-
ctx->fallback = fallback;
return 0;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 4d43081120db..8a19df2fba6a 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2316,9 +2316,7 @@ struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
goto error;
}
- tfm = crypto_alloc_ahash(name,
- CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(name, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto error;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0fb8bbf41a8d..7f07a5085e9b 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2704,7 +2704,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2727,7 +2727,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2751,7 +2751,7 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2777,7 +2777,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha384",
.cra_driver_name = "artpec-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2801,7 +2801,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "artpec-hmac-sha384",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2824,7 +2824,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "sha512",
.cra_driver_name = "artpec-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2848,7 +2848,7 @@ static struct ahash_alg artpec7_hash_algos[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "artpec-hmac-sha512",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2867,8 +2867,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2888,8 +2887,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2911,8 +2909,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2933,8 +2930,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2964,7 +2960,7 @@ static struct aead_alg aead_algos[] = {
.cra_name = "gcm(aes)",
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 309c67c7012f..2d1f1db9f807 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3914,8 +3914,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.cipher_info = {
@@ -4649,8 +4648,7 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_type = &crypto_ahash_type;
- hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4691,7 +4689,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
INIT_LIST_HEAD(&aead->base.cra_list);
- aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0beb28196e20..43975ab5f09c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1846,8 +1846,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index df21d996db7e..600336d169a9 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -351,7 +351,7 @@ static int cvm_enc_dec_init(struct crypto_tfm *tfm)
return 0;
}
-struct crypto_alg algs[] = { {
+static struct crypto_alg algs[] = { {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cvm_enc_ctx),
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4fdc921ba611..ebe267379ac9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -148,7 +148,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
+ vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
if (!vaddr)
return NULL;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 26687f318de6..3c6fe57f91f8 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -399,13 +399,12 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_aes_cmac_cra_init;
base->cra_exit = ccp_aes_cmac_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 871c9628a2ee..2ca64bb57d2e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -497,13 +497,12 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
+ base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
base->cra_ctxsize = sizeof(struct ccp_ctx);
base->cra_priority = CCP_CRA_PRIORITY;
- base->cra_type = &crypto_ahash_type;
base->cra_init = ccp_sha_cra_init;
base->cra_exit = ccp_sha_cra_exit;
base->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index ff478d826d7d..218739b961fe 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -62,14 +62,14 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
int reg;
/* Read the interrupt status: */
- status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+ status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Check if it is command completion: */
- if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ if (!(status & PSP_CMD_COMPLETE))
goto done;
/* Check if it is SEV command completion: */
- reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
if (reg & PSP_CMDRESP_RESP) {
psp->sev_int_rcvd = 1;
wake_up(&psp->sev_int_queue);
@@ -77,17 +77,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
done:
/* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}
static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
{
- psp->sev_int_rcvd = 0;
-
wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
- *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
}
static int sev_cmd_buffer_len(int cmd)
@@ -145,13 +143,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
- iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
- iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+ iowrite32(phys_lsb, psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg);
+ iowrite32(phys_msb, psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg);
+
+ psp->sev_int_rcvd = 0;
reg = cmd;
reg <<= PSP_CMDRESP_CMD_SHIFT;
reg |= PSP_CMDRESP_IOC;
- iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+ iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
sev_wait_cmd_ioc(psp, &reg);
@@ -789,7 +789,7 @@ static int sev_misc_init(struct psp_device *psp)
static int sev_init(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
dev_dbg(psp->dev, "device does not support SEV\n");
return 1;
}
@@ -817,11 +817,11 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
- psp->io_regs = sp->io_map + psp->vdata->offset;
+ psp->io_regs = sp->io_map;
/* Disable and clear interrupts until ready */
- iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+ iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
+ iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
/* Request an irq */
ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
@@ -838,7 +838,9 @@ int psp_dev_init(struct sp_device *sp)
sp->set_psp_master_device(sp);
/* Enable interrupt */
- iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
+
+ dev_notice(dev, "psp enabled\n");
return 0;
@@ -856,6 +858,9 @@ void psp_dev_destroy(struct sp_device *sp)
{
struct psp_device *psp = sp->psp_data;
+ if (!psp)
+ return;
+
if (psp->sev_misc)
kref_put(&misc_dev->refcount, sev_exit);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index c7e9098a233c..8b53a9674ecb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -30,24 +30,7 @@
#include "sp-dev.h"
-#define PSP_C2PMSG(_num) ((_num) << 2)
-#define PSP_CMDRESP PSP_C2PMSG(32)
-#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
-#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
-#define PSP_FEATURE_REG PSP_C2PMSG(63)
-
-#define PSP_P2CMSG(_num) ((_num) << 2)
-#define PSP_CMD_COMPLETE_REG 1
-#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
-
-#define PSP_P2CMSG_INTEN 0x0110
-#define PSP_P2CMSG_INTSTS 0x0114
-
-#define PSP_C2PMSG_ATTR_0 0x0118
-#define PSP_C2PMSG_ATTR_1 0x011c
-#define PSP_C2PMSG_ATTR_2 0x0120
-#define PSP_C2PMSG_ATTR_3 0x0124
-#define PSP_P2CMSG_ATTR_0 0x0128
+#define PSP_CMD_COMPLETE BIT(1)
#define PSP_CMDRESP_CMD_SHIFT 16
#define PSP_CMDRESP_IOC BIT(0)
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index acb197b66ced..14398cad1625 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -44,7 +44,12 @@ struct ccp_vdata {
};
struct psp_vdata {
- const unsigned int offset;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int feature_reg;
+ const unsigned int inten_reg;
+ const unsigned int intsts_reg;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f5f43c50698a..7da93e9bebed 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -269,38 +269,62 @@ static int sp_pci_resume(struct pci_dev *pdev)
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
-static const struct psp_vdata psp_entry = {
- .offset = 0x10500,
+static const struct psp_vdata pspv1 = {
+ .cmdresp_reg = 0x10580,
+ .cmdbuff_addr_lo_reg = 0x105e0,
+ .cmdbuff_addr_hi_reg = 0x105e4,
+ .feature_reg = 0x105fc,
+ .inten_reg = 0x10610,
+ .intsts_reg = 0x10614,
+};
+
+static const struct psp_vdata pspv2 = {
+ .cmdresp_reg = 0x10980,
+ .cmdbuff_addr_lo_reg = 0x109e0,
+ .cmdbuff_addr_hi_reg = 0x109e4,
+ .feature_reg = 0x109fc,
+ .inten_reg = 0x10690,
+ .intsts_reg = 0x10694,
};
#endif
static const struct sp_dev_vdata dev_vdata[] = {
- {
+ { /* 0 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv3,
#endif
},
- {
+ { /* 1 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
- .psp_vdata = &psp_entry
+ .psp_vdata = &pspv1,
#endif
},
- {
+ { /* 2 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5b,
#endif
},
+ { /* 3 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv2,
+#endif
+ },
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 03f4b9fce556..01b82b82f8b8 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2344,7 +2344,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2364,7 +2363,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2384,7 +2382,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2404,7 +2401,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2424,7 +2420,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),cbc(aes))",
.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2444,7 +2439,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2464,7 +2458,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2484,7 +2477,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_aead_setauthsize,
@@ -2504,7 +2496,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "ccm(aes)",
.driver_name = "ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_ccm_setauthsize,
@@ -2524,7 +2515,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4309(ccm(aes))",
.driver_name = "rfc4309-ccm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4309_ccm_setkey,
.setauthsize = cc_rfc4309_ccm_setauthsize,
@@ -2544,7 +2534,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "gcm(aes)",
.driver_name = "gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_aead_setkey,
.setauthsize = cc_gcm_setauthsize,
@@ -2564,7 +2553,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4106(gcm(aes))",
.driver_name = "rfc4106-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4106_gcm_setkey,
.setauthsize = cc_rfc4106_gcm_setauthsize,
@@ -2584,7 +2572,6 @@ static struct cc_alg_template aead_algs[] = {
.name = "rfc4543(gcm(aes))",
.driver_name = "rfc4543-gcm-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
.setkey = cc_rfc4543_gcm_setkey,
.setauthsize = cc_rfc4543_gcm_setauthsize,
@@ -2621,8 +2608,7 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- tmpl->type;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = cc_aead_init;
alg->exit = cc_aead_exit;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index b32577477b4c..dd948e1df9e5 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -454,9 +454,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -498,9 +496,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL :
- DMA_TO_DEVICE);
+ ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c183b73..7623b29911af 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -19,8 +19,6 @@
#define template_skcipher template_u.skcipher
-#define CC_MIN_AES_XTS_SIZE 0x10
-#define CC_MAX_AES_XTS_SIZE 0x2000
struct cc_cipher_handle {
struct list_head alg_list;
};
@@ -98,8 +96,7 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case S_DIN_to_AES:
switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
- if (size >= CC_MIN_AES_XTS_SIZE &&
- size <= CC_MAX_AES_XTS_SIZE &&
+ if (size >= AES_BLOCK_SIZE &&
IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -593,34 +590,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ IS_ALIGNED((unsigned long)ctr, 8)) {
+
+ __be64 *high_be = (__be64 *)ctr;
+ __be64 *low_be = high_be + 1;
+ u64 orig_low = __be64_to_cpu(*low_be);
+ u64 new_low = orig_low + (u64)increment;
+
+ *low_be = __cpu_to_be64(new_low);
+
+ if (new_low < orig_low)
+ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+ } else {
+ u8 *pos = (ctr + AES_BLOCK_SIZE);
+ u8 val;
+ unsigned int size;
+
+ for (; increment; increment--)
+ for (size = AES_BLOCK_SIZE; size; size--) {
+ val = *--pos + 1;
+ *pos = val;
+ if (val)
+ break;
+ }
+ }
+}
+
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CBC:
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req->iv, req->dst, len,
+ ivsize, 0);
+ }
+ break;
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- (req->cryptlen - ivsize),
- ivsize, 0);
+ case DRV_CIPHER_CTR:
+ /* Compute the counter of the last block */
+ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+ cc_update_ctr((u8 *)req->iv, len);
+ break;
+
+ default:
+ break;
}
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
skcipher_request_complete(req, err);
}
@@ -639,7 +684,7 @@ static int cc_cipher_process(struct skcipher_request *req,
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct cc_crypto_req cc_req = {};
- int rc, cts_restore_flag = 0;
+ int rc;
unsigned int seq_len = 0;
gfp_t flags = cc_gfp_flags(&req->base);
@@ -671,23 +716,10 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
- /*For CTS in case of data size aligned to 16 use CBC mode*/
- if (((nbytes % AES_BLOCK_SIZE) == 0) &&
- ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
- ctx_p->cipher_mode = DRV_CIPHER_CBC;
- cts_restore_flag = 1;
- }
-
/* Setup request structure */
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
- cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
- STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-
-#endif
-
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -708,14 +740,6 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
&seq_len);
- /* do we need to generate IV? */
- if (req_ctx->is_giv) {
- cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- cc_req.ivgen_dma_addr_len = 1;
- /* set the IV size (8/16 B long)*/
- cc_req.ivgen_size = ivsize;
- }
-
/* STAT_PHASE_3: Lock HW and push sequence */
rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
@@ -728,9 +752,6 @@ static int cc_cipher_process(struct skcipher_request *req,
}
exit_process:
- if (cts_restore_flag)
- ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-
if (rc != -EINPROGRESS && rc != -EBUSY) {
kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
@@ -743,8 +764,7 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
{
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- req_ctx->is_giv = false;
- req_ctx->backup_info = NULL;
+ memset(req_ctx, 0, sizeof(*req_ctx));
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
}
@@ -752,21 +772,28 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
gfp_t flags = cc_gfp_flags(&req->base);
+ unsigned int len;
- /*
- * Allocate and save the last IV sized bytes of the source, which will
- * be lost in case of in-place decryption and might be needed for CTS.
- */
- req_ctx->backup_info = kmalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+ /* Allocate and save the last IV sized bytes of the source,
+ * which will be lost in case of in-place decryption.
+ */
+ req_ctx->backup_info = kzalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
- (req->cryptlen - ivsize), ivsize, 0);
- req_ctx->is_giv = false;
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+ ivsize, 0);
+ }
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -927,7 +954,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -944,7 +970,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(paes)",
.driver_name = "cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -961,7 +986,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(paes)",
.driver_name = "ofb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -975,10 +999,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_712,
},
{
- .name = "cts1(cbc(paes))",
- .driver_name = "cts1-cbc-paes-ccree",
+ .name = "cts(cbc(paes))",
+ .driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -995,7 +1018,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(paes)",
.driver_name = "ctr-paes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_sethkey,
.encrypt = cc_cipher_encrypt,
@@ -1162,7 +1184,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1179,7 +1200,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1196,7 +1216,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1210,10 +1229,9 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_630,
},
{
- .name = "cts1(cbc(aes))",
- .driver_name = "cts1-cbc-aes-ccree",
+ .name = "cts(cbc(aes))",
+ .driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1230,7 +1248,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccree",
.blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1247,7 +1264,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des3_ede)",
.driver_name = "cbc-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1264,7 +1280,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des3_ede)",
.driver_name = "ecb-3des-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1281,7 +1296,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "cbc(des)",
.driver_name = "cbc-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1298,7 +1312,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.name = "ecb(des)",
.driver_name = "ecb-des-ccree",
.blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
@@ -1338,8 +1351,7 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
alg->base.cra_init = cc_cipher_init;
alg->base.cra_exit = cc_cipher_exit;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_TYPE_SKCIPHER;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 68444cfa936b..4dbc0a1e6d5c 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -22,7 +22,6 @@ struct cipher_req_ctx {
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
- bool is_giv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index bd974fef05e4..1ff229c2aeab 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -131,8 +131,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
if (irr) {
- dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
- irr);
+ dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 95f82b2d1e70..d608a4faf662 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -148,7 +148,6 @@ struct cc_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- u32 type;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 96ff777474d7..b9313306c36f 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -602,66 +602,7 @@ static int cc_hash_update(struct ahash_request *req)
return rc;
}
-static int cc_hash_finup(struct ahash_request *req)
-{
- struct ahash_req_ctx *state = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- u32 digestsize = crypto_ahash_digestsize(tfm);
- struct scatterlist *src = req->src;
- unsigned int nbytes = req->nbytes;
- u8 *result = req->result;
- struct device *dev = drvdata_to_dev(ctx->drvdata);
- bool is_hmac = ctx->is_hmac;
- struct cc_crypto_req cc_req = {};
- struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- unsigned int idx = 0;
- int rc;
- gfp_t flags = cc_gfp_flags(&req->base);
-
- dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
-
- if (cc_map_req(dev, state, ctx)) {
- dev_err(dev, "map_ahash_source() failed\n");
- return -EINVAL;
- }
-
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
- flags)) {
- dev_err(dev, "map_ahash_request_final() failed\n");
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
- if (cc_map_result(dev, state, digestsize)) {
- dev_err(dev, "map_ahash_digest() failed\n");
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_req(dev, state, ctx);
- return -ENOMEM;
- }
-
- /* Setup request structure */
- cc_req.user_cb = cc_hash_complete;
- cc_req.user_arg = req;
-
- idx = cc_restore_hash(desc, ctx, state, idx);
-
- if (is_hmac)
- idx = cc_fin_hmac(desc, req, idx);
-
- idx = cc_fin_result(desc, req, idx);
-
- rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS && rc != -EBUSY) {
- dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- cc_unmap_hash_request(dev, state, src, true);
- cc_unmap_result(dev, state, digestsize, result);
- cc_unmap_req(dev, state, ctx);
- }
- return rc;
-}
-
-static int cc_hash_final(struct ahash_request *req)
+static int cc_do_finup(struct ahash_request *req, bool update)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -678,21 +619,20 @@ static int cc_hash_final(struct ahash_request *req)
int rc;
gfp_t flags = cc_gfp_flags(&req->base);
- dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
- nbytes);
+ dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
+ update ? "finup" : "final", nbytes);
if (cc_map_req(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -EINVAL;
}
- if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
-
if (cc_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
cc_unmap_hash_request(dev, state, src, true);
@@ -706,7 +646,7 @@ static int cc_hash_final(struct ahash_request *req)
idx = cc_restore_hash(desc, ctx, state, idx);
- /* "DO-PAD" must be enabled only when writing current length to HW */
+ /* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
set_cipher_mode(&desc[idx], ctx->hw_mode);
@@ -731,6 +671,17 @@ static int cc_hash_final(struct ahash_request *req)
return rc;
}
+static int cc_hash_finup(struct ahash_request *req)
+{
+ return cc_do_finup(req, true);
+}
+
+
+static int cc_hash_final(struct ahash_request *req)
+{
+ return cc_do_finup(req, false);
+}
+
static int cc_hash_init(struct ahash_request *req)
{
struct ahash_req_ctx *state = ahash_request_ctx(req);
@@ -1813,9 +1764,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
alg->cra_exit = cc_cra_exit;
alg->cra_init = cc_cra_init;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
- alg->cra_type = &crypto_ahash_type;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
t_crypto_alg->hash_mode = template->hash_mode;
t_crypto_alg->hw_mode = template->hw_mode;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index b916c4eb608c..5c539af8ed60 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -4203,7 +4203,6 @@ static int chcr_unregister_alg(void)
#define SZ_AHASH_CTX sizeof(struct chcr_context)
#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
-#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
/*
* chcr_register_alg - Register crypto algorithms with kernel framework.
@@ -4237,8 +4236,7 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4258,10 +4256,9 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
+ a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
- a_hash->halg.base.cra_type = &crypto_ahash_type;
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d50140f9e5..490960755864 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -97,7 +97,7 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
{
return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- val << bit_pos);
+ (u64)val << bit_pos);
}
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
new file mode 100644
index 000000000000..8ca9c503bcb0
--- /dev/null
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CRYPTO_DEV_HISI_SEC
+ tristate "Support for Hisilicon SEC crypto block cipher accelerator"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_ALGAPI
+ select SG_SPLIT
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Support for Hisilicon SEC Engine in Hip06 and Hip07
+
+ To compile this as a module, choose M here: the module
+ will be called hisi_sec.
diff --git a/drivers/crypto/hisilicon/Makefile b/drivers/crypto/hisilicon/Makefile
new file mode 100644
index 000000000000..463f46ace182
--- /dev/null
+++ b/drivers/crypto/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += sec/
diff --git a/drivers/crypto/hisilicon/sec/Makefile b/drivers/crypto/hisilicon/sec/Makefile
new file mode 100644
index 000000000000..a55b698e0c27
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_HISI_SEC) += hisi_sec.o
+hisi_sec-y = sec_algs.o sec_drv.o
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
new file mode 100644
index 000000000000..f7d6d690116e
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -0,0 +1,1122 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/skcipher.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+
+#include "sec_drv.h"
+
+#define SEC_MAX_CIPHER_KEY 64
+#define SEC_REQ_LIMIT SZ_32M
+
+struct sec_c_alg_cfg {
+ unsigned c_alg : 3;
+ unsigned c_mode : 3;
+ unsigned key_len : 2;
+ unsigned c_width : 2;
+};
+
+static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
+ [SEC_C_DES_ECB_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_DES_CBC_64] = {
+ .c_alg = SEC_C_ALG_DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_DES,
+ },
+ [SEC_C_3DES_ECB_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_ECB_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_3DES_CBC_192_3KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_3_KEY,
+ },
+ [SEC_C_3DES_CBC_192_2KEY] = {
+ .c_alg = SEC_C_ALG_3DES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_3DES_2_KEY,
+ },
+ [SEC_C_AES_ECB_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_ECB_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_ECB_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_ECB,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CBC_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CBC_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CBC_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CBC,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_CTR_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_CTR_192] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_192,
+ },
+ [SEC_C_AES_CTR_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_CTR,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_AES_XTS_128] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_128,
+ },
+ [SEC_C_AES_XTS_256] = {
+ .c_alg = SEC_C_ALG_AES,
+ .c_mode = SEC_C_MODE_XTS,
+ .key_len = SEC_KEY_LEN_AES_256,
+ },
+ [SEC_C_NULL] = {
+ },
+};
+
+/*
+ * Mutex used to ensure safe operation of reference count of
+ * alg providers
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
+ struct sec_bd_info *req,
+ enum sec_cipher_alg alg)
+{
+ const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
+
+ memset(req, 0, sizeof(*req));
+ req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
+ req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
+ req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
+ req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
+
+ req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
+ req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
+}
+
+static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+ const u8 *key,
+ unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->cipher_alg = alg;
+ memcpy(ctx->key, key, keylen);
+ sec_alg_skcipher_init_template(ctx, &ctx->req_template,
+ ctx->cipher_alg);
+}
+
+static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+ int count,
+ struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current = NULL;
+ struct sec_hw_sgl *sgl_next;
+ dma_addr_t sgl_next_dma;
+ struct scatterlist *sg;
+ int ret, sge_index, i;
+
+ if (!count)
+ return -EINVAL;
+
+ for_each_sg(sgl, sg, count, i) {
+ sge_index = i % SEC_MAX_SGE_NUM;
+ if (sge_index == 0) {
+ sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+ GFP_KERNEL, &sgl_next_dma);
+ if (!sgl_next) {
+ ret = -ENOMEM;
+ goto err_free_hw_sgls;
+ }
+
+ if (!sgl_current) { /* First one */
+ *psec_sgl = sgl_next_dma;
+ *sec_sgl = sgl_next;
+ } else { /* Chained */
+ sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = sgl_next_dma;
+ sgl_current->next = sgl_next;
+ }
+ sgl_current = sgl_next;
+ }
+ sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
+ sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
+ sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
+ }
+ sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
+ sgl_current->next_sgl = 0;
+ (*sec_sgl)->entry_sum_in_chain = count;
+
+ return 0;
+
+err_free_hw_sgls:
+ sgl_current = *sec_sgl;
+ while (sgl_current) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ *psec_sgl = 0;
+
+ return ret;
+}
+
+static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+{
+ struct sec_hw_sgl *sgl_current, *sgl_next;
+
+ if (!hw_sgl)
+ return;
+ sgl_current = hw_sgl;
+ while (sgl_current->next) {
+ sgl_next = sgl_current->next;
+ dma_pool_free(info->hw_sgl_pool, sgl_current,
+ sgl_current->next_sgl);
+ sgl_current = sgl_next;
+ }
+ dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+}
+
+static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ enum sec_cipher_alg alg)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->key) {
+ /* rekeying */
+ memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
+ } else {
+ /* new key */
+ ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
+ &ctx->pkey, GFP_KERNEL);
+ if (!ctx->key) {
+ mutex_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&ctx->lock);
+ sec_alg_skcipher_init_context(tfm, key, keylen, alg);
+
+ return 0;
+}
+
+static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_ECB_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_ECB_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_ECB_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CBC_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CBC_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CBC_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ alg = SEC_C_AES_CTR_128;
+ break;
+ case AES_KEYSIZE_192:
+ alg = SEC_C_AES_CTR_192;
+ break;
+ case AES_KEYSIZE_256:
+ alg = SEC_C_AES_CTR_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ enum sec_cipher_alg alg;
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128 * 2:
+ alg = SEC_C_AES_XTS_128;
+ break;
+ case AES_KEYSIZE_256 * 2:
+ alg = SEC_C_AES_XTS_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
+}
+
+static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
+}
+
+static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
+}
+
+static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE * 3)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_ECB_192_3KEY);
+}
+
+static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ return sec_alg_skcipher_setkey(tfm, key, keylen,
+ SEC_C_3DES_CBC_192_3KEY);
+}
+
+static void sec_alg_free_el(struct sec_request_el *el,
+ struct sec_dev_info *info)
+{
+ sec_free_hw_sgl(el->out, el->dma_out, info);
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+ kfree(el->sgl_in);
+ kfree(el->sgl_out);
+ kfree(el);
+}
+
+/* queuelock must be held */
+static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
+{
+ struct sec_request_el *el, *temp;
+ int ret = 0;
+
+ mutex_lock(&sec_req->lock);
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ /*
+ * Add to hardware queue only under following circumstances
+ * 1) Software and hardware queue empty so no chain dependencies
+ * 2) No dependencies as new IV - (check software queue empty
+ * to maintain order)
+ * 3) No dependencies because the mode does no chaining.
+ *
+ * In other cases first insert onto the software queue which
+ * is then emptied as requests complete
+ */
+ if (!queue->havesoftqueue ||
+ (kfifo_is_empty(&queue->softqueue) &&
+ sec_queue_empty(queue))) {
+ ret = sec_queue_send(queue, &el->req, sec_req);
+ if (ret == -EAGAIN) {
+ /* Wait unti we can send then try again */
+ /* DEAD if here - should not happen */
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ } else {
+ kfifo_put(&queue->softqueue, el);
+ }
+ }
+err_unlock:
+ mutex_unlock(&sec_req->lock);
+
+ return ret;
+}
+
+static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
+ struct crypto_async_request *req_base)
+{
+ struct skcipher_request *skreq = container_of(req_base,
+ struct skcipher_request,
+ base);
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_request *backlog_req;
+ struct sec_request_el *sec_req_el, *nextrequest;
+ struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct device *dev = ctx->queue->dev_info->dev;
+ int icv_or_skey_en, ret;
+ bool done;
+
+ sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
+ head);
+ icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
+ SEC_BD_W0_ICV_OR_SKEY_EN_S;
+ if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
+ dev_err(dev, "Got an invalid answer %lu %d\n",
+ sec_resp->w1 & SEC_BD_W1_BD_INVALID,
+ icv_or_skey_en);
+ sec_req->err = -EINVAL;
+ /*
+ * We need to muddle on to avoid getting stuck with elements
+ * on the queue. Error will be reported so requester so
+ * it should be able to handle appropriately.
+ */
+ }
+
+ mutex_lock(&ctx->queue->queuelock);
+ /* Put the IV in place for chained cases */
+ switch (ctx->cipher_alg) {
+ case SEC_C_AES_CBC_128:
+ case SEC_C_AES_CBC_192:
+ case SEC_C_AES_CBC_256:
+ if (sec_req_el->req.w0 & SEC_BD_W0_DE)
+ sg_pcopy_to_buffer(sec_req_el->sgl_out,
+ sg_nents(sec_req_el->sgl_out),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ else
+ sg_pcopy_to_buffer(sec_req_el->sgl_in,
+ sg_nents(sec_req_el->sgl_in),
+ skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ sec_req_el->el_length -
+ crypto_skcipher_ivsize(atfm));
+ /* No need to sync to the device as coherent DMA */
+ break;
+ case SEC_C_AES_CTR_128:
+ case SEC_C_AES_CTR_192:
+ case SEC_C_AES_CTR_256:
+ crypto_inc(skreq->iv, 16);
+ break;
+ default:
+ /* Do not update */
+ break;
+ }
+
+ if (ctx->queue->havesoftqueue &&
+ !kfifo_is_empty(&ctx->queue->softqueue) &&
+ sec_queue_empty(ctx->queue)) {
+ ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
+ if (ret <= 0)
+ dev_err(dev,
+ "Error getting next element from kfifo %d\n",
+ ret);
+ else
+ /* We know there is space so this cannot fail */
+ sec_queue_send(ctx->queue, &nextrequest->req,
+ nextrequest->sec_req);
+ } else if (!list_empty(&ctx->backlog)) {
+ /* Need to verify there is room first */
+ backlog_req = list_first_entry(&ctx->backlog,
+ typeof(*backlog_req),
+ backlog_head);
+ if (sec_queue_can_enqueue(ctx->queue,
+ backlog_req->num_elements) ||
+ (ctx->queue->havesoftqueue &&
+ kfifo_avail(&ctx->queue->softqueue) >
+ backlog_req->num_elements)) {
+ sec_send_request(backlog_req, ctx->queue);
+ backlog_req->req_base->complete(backlog_req->req_base,
+ -EINPROGRESS);
+ list_del(&backlog_req->backlog_head);
+ }
+ }
+ mutex_unlock(&ctx->queue->queuelock);
+
+ mutex_lock(&sec_req->lock);
+ list_del(&sec_req_el->head);
+ mutex_unlock(&sec_req->lock);
+ sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
+
+ /*
+ * Request is done.
+ * The dance is needed as the lock is freed in the completion
+ */
+ mutex_lock(&sec_req->lock);
+ done = list_empty(&sec_req->elements);
+ mutex_unlock(&sec_req->lock);
+ if (done) {
+ if (crypto_skcipher_ivsize(atfm)) {
+ dma_unmap_single(dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ }
+ dma_unmap_sg(dev, skreq->src, sec_req->len_in,
+ DMA_BIDIRECTIONAL);
+ if (skreq->src != skreq->dst)
+ dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
+ DMA_BIDIRECTIONAL);
+ skreq->base.complete(&skreq->base, sec_req->err);
+ }
+}
+
+void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+{
+ struct sec_request *sec_req = shadow;
+
+ sec_req->cb(resp, sec_req->req_base);
+}
+
+static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+ int *steps)
+{
+ size_t *sizes;
+ int i;
+
+ /* Split into suitable sized blocks */
+ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+ sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ if (!sizes)
+ return -ENOMEM;
+
+ for (i = 0; i < *steps - 1; i++)
+ sizes[i] = SEC_REQ_LIMIT;
+ sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
+ *split_sizes = sizes;
+
+ return 0;
+}
+
+static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ int steps, struct scatterlist ***splits,
+ int **splits_nents,
+ int sgl_len_in,
+ struct device *dev)
+{
+ int ret, count;
+
+ count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+ if (!count)
+ return -EINVAL;
+
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ if (!*splits) {
+ ret = -ENOMEM;
+ goto err_unmap_sg;
+ }
+ *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ if (!*splits_nents) {
+ ret = -ENOMEM;
+ goto err_free_splits;
+ }
+
+ /* output the scatter list before and after this */
+ ret = sg_split(sgl, count, 0, steps, split_sizes,
+ *splits, *splits_nents, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_splits_nents;
+ }
+
+ return 0;
+
+err_free_splits_nents:
+ kfree(*splits_nents);
+err_free_splits:
+ kfree(*splits);
+err_unmap_sg:
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+
+ return ret;
+}
+
+/*
+ * Reverses the sec_map_and_split_sg call for messages not yet added to
+ * the queues.
+ */
+static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
+ struct scatterlist **splits, int *splits_nents,
+ int sgl_len_in, struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < steps; i++)
+ kfree(splits[i]);
+ kfree(splits_nents);
+ kfree(splits);
+
+ dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
+}
+
+static struct sec_request_el
+*sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
+ int el_size, bool different_dest,
+ struct scatterlist *sgl_in, int n_ents_in,
+ struct scatterlist *sgl_out, int n_ents_out,
+ struct sec_dev_info *info)
+{
+ struct sec_request_el *el;
+ struct sec_bd_info *req;
+ int ret;
+
+ el = kzalloc(sizeof(*el), GFP_KERNEL);
+ if (!el)
+ return ERR_PTR(-ENOMEM);
+ el->el_length = el_size;
+ req = &el->req;
+ memcpy(req, template, sizeof(*req));
+
+ req->w0 &= ~SEC_BD_W0_CIPHER_M;
+ if (encrypt)
+ req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
+ else
+ req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+ req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
+ SEC_BD_W0_C_GRAN_SIZE_19_16_M;
+
+ req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+ req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
+ SEC_BD_W0_C_GRAN_SIZE_21_20_M;
+
+ /* Writing whole u32 so no need to take care of masking */
+ req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
+ ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
+ SEC_BD_W2_C_GRAN_SIZE_15_0_M);
+
+ req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
+ req->w1 |= SEC_BD_W1_ADDR_TYPE;
+
+ el->sgl_in = sgl_in;
+
+ ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+ n_ents_in, info);
+ if (ret)
+ goto err_free_el;
+
+ req->data_addr_lo = lower_32_bits(el->dma_in);
+ req->data_addr_hi = upper_32_bits(el->dma_in);
+
+ if (different_dest) {
+ el->sgl_out = sgl_out;
+ ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+ el->sgl_out,
+ n_ents_out, info);
+ if (ret)
+ goto err_free_hw_sgl_in;
+
+ req->w0 |= SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
+
+ } else {
+ req->w0 &= ~SEC_BD_W0_DE;
+ req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
+ req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
+ }
+
+ return el;
+
+err_free_hw_sgl_in:
+ sec_free_hw_sgl(el->in, el->dma_in, info);
+err_free_el:
+ kfree(el);
+
+ return ERR_PTR(ret);
+}
+
+static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ bool encrypt)
+{
+ struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
+ struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sec_queue *queue = ctx->queue;
+ struct sec_request *sec_req = skcipher_request_ctx(skreq);
+ struct sec_dev_info *info = queue->dev_info;
+ int i, ret, steps;
+ size_t *split_sizes;
+ struct scatterlist **splits_in;
+ struct scatterlist **splits_out = NULL;
+ int *splits_in_nents;
+ int *splits_out_nents = NULL;
+ struct sec_request_el *el, *temp;
+
+ mutex_init(&sec_req->lock);
+ sec_req->req_base = &skreq->base;
+ sec_req->err = 0;
+ /* SGL mapping out here to allow us to break it up as necessary */
+ sec_req->len_in = sg_nents(skreq->src);
+
+ ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+ &steps);
+ if (ret)
+ return ret;
+ sec_req->num_elements = steps;
+ ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+ &splits_in_nents, sec_req->len_in,
+ info->dev);
+ if (ret)
+ goto err_free_split_sizes;
+
+ if (skreq->src != skreq->dst) {
+ sec_req->len_out = sg_nents(skreq->dst);
+ ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+ &splits_out, &splits_out_nents,
+ sec_req->len_out, info->dev);
+ if (ret)
+ goto err_unmap_in_sg;
+ }
+ /* Shared info stored in seq_req - applies to all BDs */
+ sec_req->tfm_ctx = ctx;
+ sec_req->cb = sec_skcipher_alg_callback;
+ INIT_LIST_HEAD(&sec_req->elements);
+
+ /*
+ * Future optimization.
+ * In the chaining case we can't use a dma pool bounce buffer
+ * but in the case where we know there is no chaining we can
+ */
+ if (crypto_skcipher_ivsize(atfm)) {
+ sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
+ ret = -ENOMEM;
+ goto err_unmap_out_sg;
+ }
+ }
+
+ /* Set them all up then queue - cleaner error handling. */
+ for (i = 0; i < steps; i++) {
+ el = sec_alg_alloc_and_fill_el(&ctx->req_template,
+ encrypt ? 1 : 0,
+ split_sizes[i],
+ skreq->src != skreq->dst,
+ splits_in[i], splits_in_nents[i],
+ splits_out[i],
+ splits_out_nents[i], info);
+ if (IS_ERR(el)) {
+ ret = PTR_ERR(el);
+ goto err_free_elements;
+ }
+ el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
+ el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
+ el->sec_req = sec_req;
+ list_add_tail(&el->head, &sec_req->elements);
+ }
+
+ /*
+ * Only attempt to queue if the whole lot can fit in the queue -
+ * we can't successfully cleanup after a partial queing so this
+ * must succeed or fail atomically.
+ *
+ * Big hammer test of both software and hardware queues - could be
+ * more refined but this is unlikely to happen so no need.
+ */
+
+ /* Cleanup - all elements in pointer arrays have been coppied */
+ kfree(splits_in_nents);
+ kfree(splits_in);
+ kfree(splits_out_nents);
+ kfree(splits_out);
+ kfree(split_sizes);
+
+ /* Grab a big lock for a long time to avoid concurrency issues */
+ mutex_lock(&queue->queuelock);
+
+ /*
+ * Can go on to queue if we have space in either:
+ * 1) The hardware queue and no software queue
+ * 2) The software queue
+ * AND there is nothing in the backlog. If there is backlog we
+ * have to only queue to the backlog queue and return busy.
+ */
+ if ((!sec_queue_can_enqueue(queue, steps) &&
+ (!queue->havesoftqueue ||
+ kfifo_avail(&queue->softqueue) > steps)) ||
+ !list_empty(&ctx->backlog)) {
+ if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ list_add_tail(&sec_req->backlog_head, &ctx->backlog);
+ mutex_unlock(&queue->queuelock);
+ return -EBUSY;
+ }
+
+ ret = -EBUSY;
+ mutex_unlock(&queue->queuelock);
+ goto err_free_elements;
+ }
+ ret = sec_send_request(sec_req, queue);
+ mutex_unlock(&queue->queuelock);
+ if (ret)
+ goto err_free_elements;
+
+ return -EINPROGRESS;
+
+err_free_elements:
+ list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
+ list_del(&el->head);
+ sec_alg_free_el(el, info);
+ }
+ if (crypto_skcipher_ivsize(atfm))
+ dma_unmap_single(info->dev, sec_req->dma_iv,
+ crypto_skcipher_ivsize(atfm),
+ DMA_BIDIRECTIONAL);
+err_unmap_out_sg:
+ if (skreq->src != skreq->dst)
+ sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
+ splits_out_nents, sec_req->len_out,
+ info->dev);
+err_unmap_in_sg:
+ sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
+ sec_req->len_in, info->dev);
+err_free_split_sizes:
+ kfree(split_sizes);
+
+ return ret;
+}
+
+static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, true);
+}
+
+static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+ return sec_alg_skcipher_crypto(req, false);
+}
+
+static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->backlog);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
+
+ ctx->queue = sec_queue_alloc_start_safe();
+ if (IS_ERR(ctx->queue))
+ return PTR_ERR(ctx->queue);
+
+ mutex_init(&ctx->queue->queuelock);
+ ctx->queue->havesoftqueue = false;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct device *dev = ctx->queue->dev_info->dev;
+
+ if (ctx->key) {
+ memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
+ dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
+ ctx->pkey);
+ }
+ sec_queue_stop_release(ctx->queue);
+}
+
+static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = sec_alg_skcipher_init(tfm);
+ if (ret)
+ return ret;
+
+ INIT_KFIFO(ctx->queue->softqueue);
+ ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
+ if (ret) {
+ sec_alg_skcipher_exit(tfm);
+ return ret;
+ }
+ ctx->queue->havesoftqueue = true;
+
+ return 0;
+}
+
+static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
+{
+ struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ kfifo_free(&ctx->queue->softqueue);
+ sec_alg_skcipher_exit(tfm);
+}
+
+static struct skcipher_alg sec_algs[] = {
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "hisi_sec_aes_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "hisi_sec_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "hisi_sec_aes_ctr",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_aes_ctr,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "hisi_sec_aes_xts",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_aes_xts,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ }, {
+ /* Unable to find any test vectors so untested */
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "hisi_sec_des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ }, {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "hisi_sec_des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init_with_queue,
+ .exit = sec_alg_skcipher_exit_with_queue,
+ .setkey = sec_alg_skcipher_setkey_3des_cbc,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "hisi_sec_3des_ecb",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .init = sec_alg_skcipher_init,
+ .exit = sec_alg_skcipher_exit,
+ .setkey = sec_alg_skcipher_setkey_3des_ecb,
+ .decrypt = sec_alg_skcipher_decrypt,
+ .encrypt = sec_alg_skcipher_encrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ }
+};
+
+int sec_algs_register(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs != 1)
+ goto unlock;
+
+ ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+ if (ret)
+ --active_devs;
+unlock:
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+void sec_algs_unregister(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs != 0)
+ goto unlock;
+ crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
+
+unlock:
+ mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
new file mode 100644
index 000000000000..c1ee4e7bf996
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ *
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ */
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "sec_drv.h"
+
+#define SEC_QUEUE_AR_FROCE_ALLOC 0
+#define SEC_QUEUE_AR_FROCE_NOALLOC 1
+#define SEC_QUEUE_AR_FROCE_DIS 2
+
+#define SEC_QUEUE_AW_FROCE_ALLOC 0
+#define SEC_QUEUE_AW_FROCE_NOALLOC 1
+#define SEC_QUEUE_AW_FROCE_DIS 2
+
+/* SEC_ALGSUB registers */
+#define SEC_ALGSUB_CLK_EN_REG 0x03b8
+#define SEC_ALGSUB_CLK_DIS_REG 0x03bc
+#define SEC_ALGSUB_CLK_ST_REG 0x535c
+#define SEC_ALGSUB_RST_REQ_REG 0x0aa8
+#define SEC_ALGSUB_RST_DREQ_REG 0x0aac
+#define SEC_ALGSUB_RST_ST_REG 0x5a54
+#define SEC_ALGSUB_RST_ST_IS_RST BIT(0)
+
+#define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8
+#define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc
+#define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c
+#define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0)
+
+#define SEC_SAA_BASE 0x00001000UL
+
+/* SEC_SAA registers */
+#define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE)
+#define SEC_SAA_CTRL_GET_QM_EN BIT(0)
+
+#define SEC_ST_INTMSK1_REG 0x0200
+#define SEC_ST_RINT1_REG 0x0400
+#define SEC_ST_INTSTS1_REG 0x0600
+#define SEC_BD_MNG_STAT_REG 0x0800
+#define SEC_PARSING_STAT_REG 0x0804
+#define SEC_LOAD_TIME_OUT_CNT_REG 0x0808
+#define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c
+#define SEC_BACK_TIME_OUT_CNT_REG 0x0810
+#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814
+#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818
+#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c
+#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820
+#define SEC_SAA_ACC_REG 0x083c
+#define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858
+#define SEC_LOAD_WORK_TIME_CNT_REG 0x0860
+#define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864
+#define SEC_BACK_WORK_TIME_CNT_REG 0x0868
+#define SEC_SAA_IDLE_TIME_CNT_REG 0x086c
+#define SEC_SAA_CLK_CNT_REG 0x0870
+
+/* SEC_COMMON registers */
+#define SEC_CLK_EN_REG 0x0000
+#define SEC_CTRL_REG 0x0004
+
+#define SEC_COMMON_CNT_CLR_CE_REG 0x0008
+#define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0)
+#define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1)
+
+#define SEC_SECURE_CTRL_REG 0x000c
+#define SEC_AXI_CACHE_CFG_REG 0x0010
+#define SEC_AXI_QOS_CFG_REG 0x0014
+#define SEC_IPV4_MASK_TABLE_REG 0x0020
+#define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4)
+#define SEC_FSM_MAX_CNT_REG 0x0064
+
+#define SEC_CTRL2_REG 0x0068
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0)
+#define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4)
+#define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4
+#define SEC_CTRL2_CLK_GATE_EN BIT(7)
+#define SEC_CTRL2_ENDIAN_BD BIT(8)
+#define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9)
+
+#define SEC_CNT_PRECISION_CFG_REG 0x006c
+#define SEC_DEBUG_BD_CFG_REG 0x0070
+#define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0)
+#define SEC_DEBUG_BD_CFG_WB_EN BIT(1)
+
+#define SEC_Q_SIGHT_SEL 0x0074
+#define SEC_Q_SIGHT_HIS_CLR 0x0078
+#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
+#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
+#define SEC_STAT_CLR_REG 0x0a00
+#define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04
+#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00
+#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04
+#define SEC_QM_BD_DFX_CFG_REG 0x0b08
+#define SEC_QM_BD_DFX_RESULT_REG 0x0b0c
+#define SEC_QM_BDID_DFX_RESULT_REG 0x0b10
+#define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14
+#define SEC_QM_BD_DFX_CFG2_REG 0x0b1c
+#define SEC_QM_BD_DFX_RESULT2_REG 0x0b20
+#define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18
+#define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28
+#define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c
+
+#define SEC_HASH_IPV4_MASK 0xfff00000
+#define SEC_MAX_SAA_NUM 0xa
+#define SEC_SAA_ADDR_SIZE 0x1000
+
+#define SEC_Q_INIT_REG 0x0
+#define SEC_Q_INIT_WO_STAT_CLEAR 0x2
+#define SEC_Q_INIT_AND_STAT_CLEAR 0x3
+
+#define SEC_Q_CFG_REG 0x8
+#define SEC_Q_CFG_REORDER BIT(0)
+
+#define SEC_Q_PROC_NUM_CFG_REG 0x10
+#define SEC_QUEUE_ENB_REG 0x18
+
+#define SEC_Q_DEPTH_CFG_REG 0x50
+#define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0)
+#define SEC_Q_DEPTH_CFG_DEPTH_S 0
+
+#define SEC_Q_BASE_HADDR_REG 0x54
+#define SEC_Q_BASE_LADDR_REG 0x58
+#define SEC_Q_WR_PTR_REG 0x5c
+#define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60
+#define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64
+#define SEC_Q_OUTORDER_RD_PTR_REG 0x68
+#define SEC_Q_OT_TH_REG 0x6c
+
+#define SEC_Q_ARUSER_CFG_REG 0x70
+#define SEC_Q_ARUSER_CFG_FA BIT(0)
+#define SEC_Q_ARUSER_CFG_FNA BIT(1)
+#define SEC_Q_ARUSER_CFG_RINVLD BIT(2)
+#define SEC_Q_ARUSER_CFG_PKG BIT(3)
+
+#define SEC_Q_AWUSER_CFG_REG 0x74
+#define SEC_Q_AWUSER_CFG_FA BIT(0)
+#define SEC_Q_AWUSER_CFG_FNA BIT(1)
+#define SEC_Q_AWUSER_CFG_PKG BIT(2)
+
+#define SEC_Q_ERR_BASE_HADDR_REG 0x7c
+#define SEC_Q_ERR_BASE_LADDR_REG 0x80
+#define SEC_Q_CFG_VF_NUM_REG 0x84
+#define SEC_Q_SOFT_PROC_PTR_REG 0x88
+#define SEC_Q_FAIL_INT_MSK_REG 0x300
+#define SEC_Q_FLOW_INT_MKS_REG 0x304
+#define SEC_Q_FAIL_RINT_REG 0x400
+#define SEC_Q_FLOW_RINT_REG 0x404
+#define SEC_Q_FAIL_INT_STATUS_REG 0x500
+#define SEC_Q_FLOW_INT_STATUS_REG 0x504
+#define SEC_Q_STATUS_REG 0x600
+#define SEC_Q_RD_PTR_REG 0x604
+#define SEC_Q_PRO_PTR_REG 0x608
+#define SEC_Q_OUTORDER_WR_PTR_REG 0x60c
+#define SEC_Q_OT_CNT_STATUS_REG 0x610
+#define SEC_Q_INORDER_BD_NUM_ST_REG 0x650
+#define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654
+#define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658
+#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c
+#define SEC_Q_RD_DONE_PTR_REG 0x660
+#define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700
+#define SEC_Q_CPL_Q_PTR_ST_REG 0x704
+#define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708
+#define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c
+#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710
+#define SEC_Q_WRR_ID_CHECK_REG 0x714
+#define SEC_Q_CPLQ_FULL_CHECK_REG 0x718
+#define SEC_Q_SUCCESS_BD_CNT_REG 0x800
+#define SEC_Q_FAIL_BD_CNT_REG 0x804
+#define SEC_Q_GET_BD_CNT_REG 0x808
+#define SEC_Q_IVLD_CNT_REG 0x80c
+#define SEC_Q_BD_PROC_GET_CNT_REG 0x810
+#define SEC_Q_BD_PROC_DONE_CNT_REG 0x814
+#define SEC_Q_LAT_CLR_REG 0x850
+#define SEC_Q_PKT_LAT_MAX_REG 0x854
+#define SEC_Q_PKT_LAT_AVG_REG 0x858
+#define SEC_Q_PKT_LAT_MIN_REG 0x85c
+#define SEC_Q_ID_CLR_CFG_REG 0x900
+#define SEC_Q_1ST_BD_ERR_ID_REG 0x904
+#define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908
+#define SEC_Q_1ST_RD_ERR_ID_REG 0x90c
+#define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910
+#define SEC_Q_1ST_IVLD_ID_REG 0x914
+#define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918
+#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c
+#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920
+
+struct sec_debug_bd_info {
+#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0)
+ u32 soft_err_check;
+#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0)
+ u32 hard_err_check;
+ u32 icv_mac1st_word;
+#define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0)
+ u32 sec_get_id;
+ /* W4---W15 */
+ u32 reserv_left[12];
+};
+
+struct sec_out_bd_info {
+#define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0)
+#define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14)
+ u16 data;
+};
+
+#define SEC_MAX_DEVICES 8
+static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
+static DEFINE_MUTEX(sec_id_lock);
+
+static int sec_queue_map_io(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct resource *res;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM,
+ 2 + queue->queue_id);
+ if (!res) {
+ dev_err(dev, "Failed to get queue %d memory resource\n",
+ queue->queue_id);
+ return -ENOMEM;
+ }
+ queue->regs = ioremap(res->start, resource_size(res));
+ if (!queue->regs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void sec_queue_unmap_io(struct sec_queue *queue)
+{
+ iounmap(queue->regs);
+}
+
+static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (ar_pkg)
+ regval |= SEC_Q_ARUSER_CFG_PKG;
+ else
+ regval &= ~SEC_Q_ARUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval |= SEC_Q_AWUSER_CFG_PKG;
+ writel_relaxed(regval, addr);
+
+ return 0;
+}
+
+static int sec_clk_en(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock enable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_clk_dis(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ u32 i = 0;
+
+ writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
+ do {
+ usleep_range(1000, 10000);
+ if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
+ return 0;
+ i++;
+ } while (i < 10);
+ dev_err(info->dev, "sec clock disable fail!\n");
+
+ return -EIO;
+}
+
+static int sec_reset_whole_module(struct sec_dev_info *info)
+{
+ void __iomem *base = info->regs[SEC_COMMON];
+ bool is_reset, b_is_reset;
+ u32 i = 0;
+
+ writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (is_reset && b_is_reset)
+ break;
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset req failed\n");
+ return -EIO;
+ }
+ }
+
+ i = 0;
+ writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
+ writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
+ while (1) {
+ usleep_range(1000, 10000);
+ is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
+ SEC_ALGSUB_RST_ST_IS_RST;
+ b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
+ SEC_ALGSUB_BUILD_RST_ST_IS_RST;
+ if (!is_reset && !b_is_reset)
+ break;
+
+ i++;
+ if (i > 10) {
+ dev_err(info->dev, "Reset dreq failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static void sec_bd_endian_little(struct sec_dev_info *info)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
+ writel_relaxed(regval, addr);
+}
+
+/*
+ * sec_cache_config - configure optimum cache placement
+ */
+static void sec_cache_config(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /* Check that translation is occurring */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ writel_relaxed(0x44cf9e, addr);
+ else
+ writel_relaxed(0x4cfd9, addr);
+}
+
+static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
+ SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clkgate)
+ regval |= SEC_CTRL2_CLK_GATE_EN;
+ else
+ regval &= ~SEC_CTRL2_CLK_GATE_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (clr_ce)
+ regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (snap_en)
+ regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ else
+ regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
+{
+ void __iomem *base = info->regs[SEC_SAA];
+ int i;
+
+ for (i = 0; i < 10; i++)
+ writel_relaxed(hash_mask[0],
+ base + SEC_IPV6_MASK_TABLE_X_REG(i));
+}
+
+static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
+{
+ if (hash_mask & SEC_HASH_IPV4_MASK) {
+ dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
+ return -EINVAL;
+ }
+
+ writel_relaxed(hash_mask,
+ info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
+
+ return 0;
+}
+
+static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ /* Always disable write back of normal bd */
+ regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
+
+ if (cfg)
+ regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
+ else
+ regval |= SEC_DEBUG_BD_CFG_WB_EN;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
+{
+ void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
+ SEC_SAA_CTRL_REG(saa_indx);
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (en)
+ regval |= SEC_SAA_CTRL_GET_QM_EN;
+ else
+ regval &= ~SEC_SAA_CTRL_GET_QM_EN;
+ writel_relaxed(regval, addr);
+}
+
+static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
+ u32 saa_int_mask)
+{
+ writel_relaxed(saa_int_mask,
+ info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
+ saa_indx * SEC_SAA_ADDR_SIZE);
+}
+
+static void sec_streamid(struct sec_dev_info *info, int i)
+{
+ #define SEC_SID 0x600
+ #define SEC_VMID 0
+
+ writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
+ info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
+}
+
+static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
+ regval |= SEC_Q_ARUSER_CFG_FA;
+ regval &= ~SEC_Q_ARUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_ARUSER_CFG_FA;
+ regval |= SEC_Q_ARUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
+{
+ void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
+ regval |= SEC_Q_AWUSER_CFG_FA;
+ regval &= ~SEC_Q_AWUSER_CFG_FNA;
+ } else {
+ regval &= ~SEC_Q_AWUSER_CFG_FA;
+ regval |= SEC_Q_AWUSER_CFG_FNA;
+ }
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
+{
+ void __iomem *base = queue->regs;
+ u32 regval;
+
+ regval = readl_relaxed(base + SEC_Q_CFG_REG);
+ if (reorder)
+ regval |= SEC_Q_CFG_REORDER;
+ else
+ regval &= ~SEC_Q_CFG_REORDER;
+ writel_relaxed(regval, base + SEC_Q_CFG_REG);
+}
+
+static void sec_queue_depth(struct sec_queue *queue, u32 depth)
+{
+ void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
+ u32 regval;
+
+ regval = readl_relaxed(addr);
+ regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
+ regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
+
+ writel_relaxed(regval, addr);
+}
+
+static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
+}
+
+static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
+}
+
+static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
+{
+ writel_relaxed(upper_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
+ writel_relaxed(lower_32_bits(addr),
+ queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
+}
+
+static void sec_queue_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_irq_enable(struct sec_queue *queue)
+{
+ writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
+}
+
+static void sec_queue_abn_irq_disable(struct sec_queue *queue)
+{
+ writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
+}
+
+static void sec_queue_stop(struct sec_queue *queue)
+{
+ disable_irq(queue->task_irq);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static void sec_queue_start(struct sec_queue *queue)
+{
+ sec_queue_irq_enable(queue);
+ enable_irq(queue->task_irq);
+ queue->expected = 0;
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+ writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
+}
+
+static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
+{
+ int i;
+
+ mutex_lock(&info->dev_lock);
+
+ /* Get the first idle queue in SEC device */
+ for (i = 0; i < SEC_Q_NUM; i++)
+ if (!info->queues[i].in_use) {
+ info->queues[i].in_use = true;
+ info->queues_in_use++;
+ mutex_unlock(&info->dev_lock);
+
+ return &info->queues[i];
+ }
+ mutex_unlock(&info->dev_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int sec_queue_free(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+
+ if (queue->queue_id >= SEC_Q_NUM) {
+ dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ if (!queue->in_use) {
+ dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&info->dev_lock);
+ queue->in_use = false;
+ info->queues_in_use--;
+ mutex_unlock(&info->dev_lock);
+
+ return 0;
+}
+
+static irqreturn_t sec_isr_handle_th(int irq, void *q)
+{
+ sec_queue_irq_disable(q);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t sec_isr_handle(int irq, void *q)
+{
+ struct sec_queue *queue = q;
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
+ struct sec_out_bd_info *outorder_msg;
+ struct sec_bd_info *msg;
+ u32 ooo_read, ooo_write;
+ void __iomem *base = queue->regs;
+ int q_id;
+
+ ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+
+ while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
+ /*
+ * Must be before callback otherwise blocks adding other chained
+ * elements
+ */
+ set_bit(q_id, queue->unprocessed);
+ if (q_id == queue->expected)
+ while (test_bit(queue->expected, queue->unprocessed)) {
+ clear_bit(queue->expected, queue->unprocessed);
+ msg = msg_ring->vaddr + queue->expected;
+ msg->w0 &= ~SEC_BD_W0_DONE;
+ msg_ring->callback(msg,
+ queue->shadow[queue->expected]);
+ queue->shadow[queue->expected] = NULL;
+ queue->expected = (queue->expected + 1) %
+ SEC_QUEUE_LEN;
+ atomic_dec(&msg_ring->used);
+ }
+
+ ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
+ writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
+ ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
+ outorder_msg = cq_ring->vaddr + ooo_read;
+ q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
+ msg = msg_ring->vaddr + q_id;
+ }
+
+ sec_queue_irq_enable(queue);
+
+ return IRQ_HANDLED;
+}
+
+static int sec_queue_irq_init(struct sec_queue *queue)
+{
+ struct sec_dev_info *info = queue->dev_info;
+ int irq = queue->task_irq;
+ int ret;
+
+ ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
+ IRQF_TRIGGER_RISING, queue->name, queue);
+ if (ret) {
+ dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
+ return ret;
+ }
+ disable_irq(irq);
+
+ return 0;
+}
+
+static int sec_queue_irq_uninit(struct sec_queue *queue)
+{
+ free_irq(queue->task_irq, queue);
+
+ return 0;
+}
+
+static struct sec_dev_info *sec_device_get(void)
+{
+ struct sec_dev_info *sec_dev = NULL;
+ struct sec_dev_info *this_sec_dev;
+ int least_busy_n = SEC_Q_NUM + 1;
+ int i;
+
+ /* Find which one is least busy and use that first */
+ for (i = 0; i < SEC_MAX_DEVICES; i++) {
+ this_sec_dev = sec_devices[i];
+ if (this_sec_dev &&
+ this_sec_dev->queues_in_use < least_busy_n) {
+ least_busy_n = this_sec_dev->queues_in_use;
+ sec_dev = this_sec_dev;
+ }
+ }
+
+ return sec_dev;
+}
+
+static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
+{
+ struct sec_queue *queue;
+
+ queue = sec_alloc_queue(info);
+ if (IS_ERR(queue)) {
+ dev_err(info->dev, "alloc sec queue failed! %ld\n",
+ PTR_ERR(queue));
+ return queue;
+ }
+
+ sec_queue_start(queue);
+
+ return queue;
+}
+
+/**
+ * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
+ *
+ * This function does extremely simplistic load balancing. It does not take into
+ * account NUMA locality of the accelerator, or which cpu has requested the
+ * queue. Future work may focus on optimizing this in order to improve full
+ * machine throughput.
+ */
+struct sec_queue *sec_queue_alloc_start_safe(void)
+{
+ struct sec_dev_info *info;
+ struct sec_queue *queue = ERR_PTR(-ENODEV);
+
+ mutex_lock(&sec_id_lock);
+ info = sec_device_get();
+ if (!info)
+ goto unlock;
+
+ queue = sec_queue_alloc_start(info);
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return queue;
+}
+
+/**
+ * sec_queue_stop_release() - free up a hw queue for reuse
+ * @queue: The queue we are done with.
+ *
+ * This will stop the current queue, terminanting any transactions
+ * that are inflight an return it to the pool of available hw queuess
+ */
+int sec_queue_stop_release(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ int ret;
+
+ sec_queue_stop(queue);
+
+ ret = sec_queue_free(queue);
+ if (ret)
+ dev_err(dev, "Releasing queue failed %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * sec_queue_empty() - Is this hardware queue currently empty.
+ *
+ * We need to know if we have an empty queue for some of the chaining modes
+ * as if it is not empty we may need to hold the message in a software queue
+ * until the hw queue is drained.
+ */
+bool sec_queue_empty(struct sec_queue *queue)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return !atomic_read(&msg_ring->used);
+}
+
+/**
+ * sec_queue_send() - queue up a single operation in the hw queue
+ * @queue: The queue in which to put the message
+ * @msg: The message
+ * @ctx: Context to be put in the shadow array and passed back to cb on result.
+ *
+ * This function will return -EAGAIN if the queue is currently full.
+ */
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+ void __iomem *base = queue->regs;
+ u32 write, read;
+
+ mutex_lock(&msg_ring->lock);
+ read = readl(base + SEC_Q_RD_PTR_REG);
+ write = readl(base + SEC_Q_WR_PTR_REG);
+ if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
+ mutex_unlock(&msg_ring->lock);
+ return -EAGAIN;
+ }
+ memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
+ queue->shadow[write] = ctx;
+ write = (write + 1) % SEC_QUEUE_LEN;
+
+ /* Ensure content updated before queue advance */
+ wmb();
+ writel(write, base + SEC_Q_WR_PTR_REG);
+
+ atomic_inc(&msg_ring->used);
+ mutex_unlock(&msg_ring->lock);
+
+ return 0;
+}
+
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
+{
+ struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
+
+ return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
+}
+
+static void sec_queue_hw_init(struct sec_queue *queue)
+{
+ sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
+ sec_queue_ar_pkgattr(queue, 1);
+ sec_queue_aw_pkgattr(queue, 1);
+
+ /* Enable out of order queue */
+ sec_queue_reorder(queue, true);
+
+ /* Interrupt after a single complete element */
+ writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
+
+ sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
+
+ sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
+
+ sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
+
+ sec_queue_errbase_addr(queue, queue->ring_db.paddr);
+
+ writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
+
+ sec_queue_abn_irq_disable(queue);
+ sec_queue_irq_disable(queue);
+ writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
+}
+
+static int sec_hw_init(struct sec_dev_info *info)
+{
+ struct iommu_domain *domain;
+ u32 sec_ipv4_mask = 0;
+ u32 sec_ipv6_mask[10] = {};
+ u32 i, ret;
+
+ domain = iommu_get_domain_for_dev(info->dev);
+
+ /*
+ * Enable all available processing unit clocks.
+ * Only the first cluster is usable with translations.
+ */
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
+ info->num_saas = 5;
+
+ else
+ info->num_saas = 10;
+
+ writel_relaxed(GENMASK(info->num_saas - 1, 0),
+ info->regs[SEC_SAA] + SEC_CLK_EN_REG);
+
+ /* 32 bit little endian */
+ sec_bd_endian_little(info);
+
+ sec_cache_config(info);
+
+ /* Data axi port write and read outstanding config as per datasheet */
+ sec_data_axiwr_otsd_cfg(info, 0x7);
+ sec_data_axird_otsd_cfg(info, 0x7);
+
+ /* Enable clock gating */
+ sec_clk_gate_en(info, true);
+
+ /* Set CNT_CYC register not read clear */
+ sec_comm_cnt_cfg(info, false);
+
+ /* Enable CNT_CYC */
+ sec_commsnap_en(info, false);
+
+ writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
+
+ ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
+ if (ret) {
+ dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
+ return -EIO;
+ }
+
+ sec_ipv6_hashmask(info, sec_ipv6_mask);
+
+ /* do not use debug bd */
+ sec_set_dbg_bd_cfg(info, 0);
+
+ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_streamid(info, i);
+ /* Same QoS for all queues */
+ writel_relaxed(0x3f,
+ info->regs[SEC_SAA] +
+ SEC_Q_WEIGHT_CFG_REG(i));
+ }
+ }
+
+ for (i = 0; i < info->num_saas; i++) {
+ sec_saa_getqm_en(info, i, 1);
+ sec_saa_int_mask(info, i, 0);
+ }
+
+ return 0;
+}
+
+static void sec_hw_exit(struct sec_dev_info *info)
+{
+ int i;
+
+ for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
+ sec_saa_int_mask(info, i, (u32)~0);
+ sec_saa_getqm_en(info, i, 0);
+ }
+}
+
+static void sec_queue_base_init(struct sec_dev_info *info,
+ struct sec_queue *queue, int queue_id)
+{
+ queue->dev_info = info;
+ queue->queue_id = queue_id;
+ snprintf(queue->name, sizeof(queue->name),
+ "%s_%d", dev_name(info->dev), queue->queue_id);
+}
+
+static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
+{
+ struct resource *res;
+ int i;
+
+ for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+
+ if (!res) {
+ dev_err(info->dev, "Memory resource %d not found\n", i);
+ return -EINVAL;
+ }
+
+ info->regs[i] = devm_ioremap(info->dev, res->start,
+ resource_size(res));
+ if (!info->regs[i]) {
+ dev_err(info->dev,
+ "Memory resource %d could not be remapped\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sec_base_init(struct sec_dev_info *info,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ret = sec_map_io(info, pdev);
+ if (ret)
+ return ret;
+
+ ret = sec_clk_en(info);
+ if (ret)
+ return ret;
+
+ ret = sec_reset_whole_module(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ ret = sec_hw_init(info);
+ if (ret)
+ goto sec_clk_disable;
+
+ return 0;
+
+sec_clk_disable:
+ sec_clk_dis(info);
+
+ return ret;
+}
+
+static void sec_base_exit(struct sec_dev_info *info)
+{
+ sec_hw_exit(info);
+ sec_clk_dis(info);
+}
+
+#define SEC_Q_CMD_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
+#define SEC_Q_CQ_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
+#define SEC_Q_DB_SIZE \
+ round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
+
+static int sec_queue_res_cfg(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+ struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
+ struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
+ struct sec_queue_ring_db *ring_db = &queue->ring_db;
+ int ret;
+
+ ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE,
+ &ring_cmd->paddr,
+ GFP_KERNEL);
+ if (!ring_cmd->vaddr)
+ return -ENOMEM;
+
+ atomic_set(&ring_cmd->used, 0);
+ mutex_init(&ring_cmd->lock);
+ ring_cmd->callback = sec_alg_callback;
+
+ ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE,
+ &ring_cq->paddr,
+ GFP_KERNEL);
+ if (!ring_cq->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cmd;
+ }
+
+ ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE,
+ &ring_db->paddr,
+ GFP_KERNEL);
+ if (!ring_db->vaddr) {
+ ret = -ENOMEM;
+ goto err_free_ring_cq;
+ }
+ queue->task_irq = platform_get_irq(to_platform_device(dev),
+ queue->queue_id * 2 + 1);
+ if (queue->task_irq <= 0) {
+ ret = -EINVAL;
+ goto err_free_ring_db;
+ }
+
+ return 0;
+
+err_free_ring_db:
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+err_free_ring_cq:
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+err_free_ring_cmd:
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+
+ return ret;
+}
+
+static void sec_queue_free_ring_pages(struct sec_queue *queue)
+{
+ struct device *dev = queue->dev_info->dev;
+
+ dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
+ queue->ring_db.paddr);
+ dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
+ queue->ring_cq.paddr);
+ dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
+ queue->ring_cmd.paddr);
+}
+
+static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
+ int queue_id)
+{
+ int ret;
+
+ sec_queue_base_init(info, queue, queue_id);
+
+ ret = sec_queue_res_cfg(queue);
+ if (ret)
+ return ret;
+
+ ret = sec_queue_map_io(queue);
+ if (ret) {
+ dev_err(info->dev, "Queue map failed %d\n", ret);
+ sec_queue_free_ring_pages(queue);
+ return ret;
+ }
+
+ sec_queue_hw_init(queue);
+
+ return 0;
+}
+
+static void sec_queue_unconfig(struct sec_dev_info *info,
+ struct sec_queue *queue)
+{
+ sec_queue_unmap_io(queue);
+ sec_queue_free_ring_pages(queue);
+}
+
+static int sec_id_alloc(struct sec_dev_info *info)
+{
+ int ret = 0;
+ int i;
+
+ mutex_lock(&sec_id_lock);
+
+ for (i = 0; i < SEC_MAX_DEVICES; i++)
+ if (!sec_devices[i])
+ break;
+ if (i == SEC_MAX_DEVICES) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ info->sec_id = i;
+ sec_devices[info->sec_id] = info;
+
+unlock:
+ mutex_unlock(&sec_id_lock);
+
+ return ret;
+}
+
+static void sec_id_free(struct sec_dev_info *info)
+{
+ mutex_lock(&sec_id_lock);
+ sec_devices[info->sec_id] = NULL;
+ mutex_unlock(&sec_id_lock);
+}
+
+static int sec_probe(struct platform_device *pdev)
+{
+ struct sec_dev_info *info;
+ struct device *dev = &pdev->dev;
+ int i, j;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ mutex_init(&info->dev_lock);
+
+ info->hw_sgl_pool = dmam_pool_create("sgl", dev,
+ sizeof(struct sec_hw_sgl), 64, 0);
+ if (!info->hw_sgl_pool) {
+ dev_err(dev, "Failed to create sec sgl dma pool\n");
+ return -ENOMEM;
+ }
+
+ ret = sec_base_init(info, pdev);
+ if (ret) {
+ dev_err(dev, "Base initialization fail! %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ ret = sec_queue_config(info, &info->queues[i], i);
+ if (ret)
+ goto queues_unconfig;
+
+ ret = sec_queue_irq_init(&info->queues[i]);
+ if (ret) {
+ sec_queue_unconfig(info, &info->queues[i]);
+ goto queues_unconfig;
+ }
+ }
+
+ ret = sec_algs_register();
+ if (ret) {
+ dev_err(dev, "Failed to register algorithms with crypto %d\n",
+ ret);
+ goto queues_unconfig;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = sec_id_alloc(info);
+ if (ret)
+ goto algs_unregister;
+
+ return 0;
+
+algs_unregister:
+ sec_algs_unregister();
+queues_unconfig:
+ for (j = i - 1; j >= 0; j--) {
+ sec_queue_irq_uninit(&info->queues[j]);
+ sec_queue_unconfig(info, &info->queues[j]);
+ }
+ sec_base_exit(info);
+
+ return ret;
+}
+
+static int sec_remove(struct platform_device *pdev)
+{
+ struct sec_dev_info *info = platform_get_drvdata(pdev);
+ int i;
+
+ /* Unexpose as soon as possible, reuse during remove is fine */
+ sec_id_free(info);
+
+ sec_algs_unregister();
+
+ for (i = 0; i < SEC_Q_NUM; i++) {
+ sec_queue_irq_uninit(&info->queues[i]);
+ sec_queue_unconfig(info, &info->queues[i]);
+ }
+
+ sec_base_exit(info);
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id sec_match[] = {
+ { .compatible = "hisilicon,hip06-sec" },
+ { .compatible = "hisilicon,hip07-sec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sec_match);
+
+static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
+ { "HISI02C1", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
+
+static struct platform_driver sec_driver = {
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .driver = {
+ .name = "hisi_sec_platform_driver",
+ .of_match_table = sec_match,
+ .acpi_match_table = ACPI_PTR(sec_acpi_match),
+ },
+};
+module_platform_driver(sec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
+MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
new file mode 100644
index 000000000000..2d2f186674ba
--- /dev/null
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef _SEC_DRV_H_
+#define _SEC_DRV_H_
+
+#include <crypto/algapi.h>
+#include <linux/kfifo.h>
+
+#define SEC_MAX_SGE_NUM 64
+#define SEC_HW_RING_NUM 3
+
+#define SEC_CMD_RING 0
+#define SEC_OUTORDER_RING 1
+#define SEC_DBG_RING 2
+
+/* A reasonable length to balance memory use against flexibility */
+#define SEC_QUEUE_LEN 512
+
+#define SEC_MAX_SGE_NUM 64
+
+struct sec_bd_info {
+#define SEC_BD_W0_T_LEN_M GENMASK(4, 0)
+#define SEC_BD_W0_T_LEN_S 0
+
+#define SEC_BD_W0_C_WIDTH_M GENMASK(6, 5)
+#define SEC_BD_W0_C_WIDTH_S 5
+#define SEC_C_WIDTH_AES_128BIT 0
+#define SEC_C_WIDTH_AES_8BIT 1
+#define SEC_C_WIDTH_AES_1BIT 2
+#define SEC_C_WIDTH_DES_64BIT 0
+#define SEC_C_WIDTH_DES_8BIT 1
+#define SEC_C_WIDTH_DES_1BIT 2
+
+#define SEC_BD_W0_C_MODE_M GENMASK(9, 7)
+#define SEC_BD_W0_C_MODE_S 7
+#define SEC_C_MODE_ECB 0
+#define SEC_C_MODE_CBC 1
+#define SEC_C_MODE_CTR 4
+#define SEC_C_MODE_CCM 5
+#define SEC_C_MODE_GCM 6
+#define SEC_C_MODE_XTS 7
+
+#define SEC_BD_W0_SEQ BIT(10)
+#define SEC_BD_W0_DE BIT(11)
+#define SEC_BD_W0_DAT_SKIP_M GENMASK(13, 12)
+#define SEC_BD_W0_DAT_SKIP_S 12
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_M GENMASK(17, 14)
+#define SEC_BD_W0_C_GRAN_SIZE_19_16_S 14
+
+#define SEC_BD_W0_CIPHER_M GENMASK(19, 18)
+#define SEC_BD_W0_CIPHER_S 18
+#define SEC_CIPHER_NULL 0
+#define SEC_CIPHER_ENCRYPT 1
+#define SEC_CIPHER_DECRYPT 2
+
+#define SEC_BD_W0_AUTH_M GENMASK(21, 20)
+#define SEC_BD_W0_AUTH_S 20
+#define SEC_AUTH_NULL 0
+#define SEC_AUTH_MAC 1
+#define SEC_AUTH_VERIF 2
+
+#define SEC_BD_W0_AI_GEN BIT(22)
+#define SEC_BD_W0_CI_GEN BIT(23)
+#define SEC_BD_W0_NO_HPAD BIT(24)
+#define SEC_BD_W0_HM_M GENMASK(26, 25)
+#define SEC_BD_W0_HM_S 25
+#define SEC_BD_W0_ICV_OR_SKEY_EN_M GENMASK(28, 27)
+#define SEC_BD_W0_ICV_OR_SKEY_EN_S 27
+
+/* Multi purpose field - gran size bits for send, flag for recv */
+#define SEC_BD_W0_FLAG_M GENMASK(30, 29)
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_M GENMASK(30, 29)
+#define SEC_BD_W0_FLAG_S 29
+#define SEC_BD_W0_C_GRAN_SIZE_21_20_S 29
+
+#define SEC_BD_W0_DONE BIT(31)
+ u32 w0;
+
+#define SEC_BD_W1_AUTH_GRAN_SIZE_M GENMASK(21, 0)
+#define SEC_BD_W1_AUTH_GRAN_SIZE_S 0
+#define SEC_BD_W1_M_KEY_EN BIT(22)
+#define SEC_BD_W1_BD_INVALID BIT(23)
+#define SEC_BD_W1_ADDR_TYPE BIT(24)
+
+#define SEC_BD_W1_A_ALG_M GENMASK(28, 25)
+#define SEC_BD_W1_A_ALG_S 25
+#define SEC_A_ALG_SHA1 0
+#define SEC_A_ALG_SHA256 1
+#define SEC_A_ALG_MD5 2
+#define SEC_A_ALG_SHA224 3
+#define SEC_A_ALG_HMAC_SHA1 8
+#define SEC_A_ALG_HMAC_SHA224 10
+#define SEC_A_ALG_HMAC_SHA256 11
+#define SEC_A_ALG_HMAC_MD5 12
+#define SEC_A_ALG_AES_XCBC 13
+#define SEC_A_ALG_AES_CMAC 14
+
+#define SEC_BD_W1_C_ALG_M GENMASK(31, 29)
+#define SEC_BD_W1_C_ALG_S 29
+#define SEC_C_ALG_DES 0
+#define SEC_C_ALG_3DES 1
+#define SEC_C_ALG_AES 2
+
+ u32 w1;
+
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_M GENMASK(15, 0)
+#define SEC_BD_W2_C_GRAN_SIZE_15_0_S 0
+#define SEC_BD_W2_GRAN_NUM_M GENMASK(31, 16)
+#define SEC_BD_W2_GRAN_NUM_S 16
+ u32 w2;
+
+#define SEC_BD_W3_AUTH_LEN_OFFSET_M GENMASK(9, 0)
+#define SEC_BD_W3_AUTH_LEN_OFFSET_S 0
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_M GENMASK(19, 10)
+#define SEC_BD_W3_CIPHER_LEN_OFFSET_S 10
+#define SEC_BD_W3_MAC_LEN_M GENMASK(24, 20)
+#define SEC_BD_W3_MAC_LEN_S 20
+#define SEC_BD_W3_A_KEY_LEN_M GENMASK(29, 25)
+#define SEC_BD_W3_A_KEY_LEN_S 25
+#define SEC_BD_W3_C_KEY_LEN_M GENMASK(31, 30)
+#define SEC_BD_W3_C_KEY_LEN_S 30
+#define SEC_KEY_LEN_AES_128 0
+#define SEC_KEY_LEN_AES_192 1
+#define SEC_KEY_LEN_AES_256 2
+#define SEC_KEY_LEN_DES 1
+#define SEC_KEY_LEN_3DES_3_KEY 1
+#define SEC_KEY_LEN_3DES_2_KEY 3
+ u32 w3;
+
+ /* W4,5 */
+ union {
+ u32 authkey_addr_lo;
+ u32 authiv_addr_lo;
+ };
+ union {
+ u32 authkey_addr_hi;
+ u32 authiv_addr_hi;
+ };
+
+ /* W6,7 */
+ u32 cipher_key_addr_lo;
+ u32 cipher_key_addr_hi;
+
+ /* W8,9 */
+ u32 cipher_iv_addr_lo;
+ u32 cipher_iv_addr_hi;
+
+ /* W10,11 */
+ u32 data_addr_lo;
+ u32 data_addr_hi;
+
+ /* W12,13 */
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ /* W14,15 */
+ u32 cipher_destin_addr_lo;
+ u32 cipher_destin_addr_hi;
+};
+
+enum sec_mem_region {
+ SEC_COMMON = 0,
+ SEC_SAA,
+ SEC_NUM_ADDR_REGIONS
+};
+
+#define SEC_NAME_SIZE 64
+#define SEC_Q_NUM 16
+
+
+/**
+ * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
+ * @used: Local counter used to cheaply establish if the ring is empty.
+ * @lock: Protect against simultaneous adjusting of the read and write pointers.
+ * @vaddr: Virtual address for the ram pages used for the ring.
+ * @paddr: Physical address of the dma mapped region of ram used for the ring.
+ * @callback: Callback function called on a ring element completing.
+ */
+struct sec_queue_ring_cmd {
+ atomic_t used;
+ struct mutex lock;
+ struct sec_bd_info *vaddr;
+ dma_addr_t paddr;
+ void (*callback)(struct sec_bd_info *resp, void *ctx);
+};
+
+struct sec_debug_bd_info;
+struct sec_queue_ring_db {
+ struct sec_debug_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_out_bd_info;
+struct sec_queue_ring_cq {
+ struct sec_out_bd_info *vaddr;
+ dma_addr_t paddr;
+};
+
+struct sec_dev_info;
+
+enum sec_cipher_alg {
+ SEC_C_DES_ECB_64,
+ SEC_C_DES_CBC_64,
+
+ SEC_C_3DES_ECB_192_3KEY,
+ SEC_C_3DES_ECB_192_2KEY,
+
+ SEC_C_3DES_CBC_192_3KEY,
+ SEC_C_3DES_CBC_192_2KEY,
+
+ SEC_C_AES_ECB_128,
+ SEC_C_AES_ECB_192,
+ SEC_C_AES_ECB_256,
+
+ SEC_C_AES_CBC_128,
+ SEC_C_AES_CBC_192,
+ SEC_C_AES_CBC_256,
+
+ SEC_C_AES_CTR_128,
+ SEC_C_AES_CTR_192,
+ SEC_C_AES_CTR_256,
+
+ SEC_C_AES_XTS_128,
+ SEC_C_AES_XTS_256,
+
+ SEC_C_NULL,
+};
+
+/**
+ * struct sec_alg_tfm_ctx - hardware specific tranformation context
+ * @cipher_alg: Cipher algorithm enabled include encryption mode.
+ * @key: Key storage if required.
+ * @pkey: DMA address for the key storage.
+ * @req_template: Request template to save time on setup.
+ * @queue: The hardware queue associated with this tfm context.
+ * @lock: Protect key and pkey to ensure they are consistent
+ * @auth_buf: Current context buffer for auth operations.
+ * @backlog: The backlog queue used for cases where our buffers aren't
+ * large enough.
+ */
+struct sec_alg_tfm_ctx {
+ enum sec_cipher_alg cipher_alg;
+ u8 *key;
+ dma_addr_t pkey;
+ struct sec_bd_info req_template;
+ struct sec_queue *queue;
+ struct mutex lock;
+ u8 *auth_buf;
+ struct list_head backlog;
+};
+
+/**
+ * struct sec_request - data associate with a single crypto request
+ * @elements: List of subparts of this request (hardware size restriction)
+ * @num_elements: The number of subparts (used as an optimization)
+ * @lock: Protect elements of this structure against concurrent change.
+ * @tfm_ctx: hardware specific context.
+ * @len_in: length of in sgl from upper layers
+ * @len_out: length of out sgl from upper layers
+ * @dma_iv: initialization vector - phsyical address
+ * @err: store used to track errors across subelements of this request.
+ * @req_base: pointer to base element of associate crypto context.
+ * This is needed to allow shared handling skcipher, ahash etc.
+ * @cb: completion callback.
+ * @backlog_head: list head to allow backlog maintenance.
+ *
+ * The hardware is limited in the maximum size of data that it can
+ * process from a single BD. Typically this is fairly large (32MB)
+ * but still requires the complexity of splitting the incoming
+ * skreq up into a number of elements complete with appropriate
+ * iv chaining.
+ */
+struct sec_request {
+ struct list_head elements;
+ int num_elements;
+ struct mutex lock;
+ struct sec_alg_tfm_ctx *tfm_ctx;
+ int len_in;
+ int len_out;
+ dma_addr_t dma_iv;
+ int err;
+ struct crypto_async_request *req_base;
+ void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
+ struct list_head backlog_head;
+};
+
+/**
+ * struct sec_request_el - A subpart of a request.
+ * @head: allow us to attach this to the list in the sec_request
+ * @req: hardware block descriptor corresponding to this request subpart
+ * @in: hardware sgl for input - virtual address
+ * @dma_in: hardware sgl for input - physical address
+ * @sgl_in: scatterlist for this request subpart
+ * @out: hardware sgl for output - virtual address
+ * @dma_out: hardware sgl for output - physical address
+ * @sgl_out: scatterlist for this request subpart
+ * @sec_req: The request which this subpart forms a part of
+ * @el_length: Number of bytes in this subpart. Needed to locate
+ * last ivsize chunk for iv chaining.
+ */
+struct sec_request_el {
+ struct list_head head;
+ struct sec_bd_info req;
+ struct sec_hw_sgl *in;
+ dma_addr_t dma_in;
+ struct scatterlist *sgl_in;
+ struct sec_hw_sgl *out;
+ dma_addr_t dma_out;
+ struct scatterlist *sgl_out;
+ struct sec_request *sec_req;
+ size_t el_length;
+};
+
+/**
+ * struct sec_queue - All the information about a HW queue
+ * @dev_info: The parent SEC device to which this queue belongs.
+ * @task_irq: Completion interrupt for the queue.
+ * @name: Human readable queue description also used as irq name.
+ * @ring: The several HW rings associated with one queue.
+ * @regs: The iomapped device registers
+ * @queue_id: Index of the queue used for naming and resource selection.
+ * @in_use: Flag to say if the queue is in use.
+ * @expected: The next expected element to finish assuming we were in order.
+ * @uprocessed: A bitmap to track which OoO elements are done but not handled.
+ * @softqueue: A software queue used when chaining requirements prevent direct
+ * use of the hardware queues.
+ * @havesoftqueue: A flag to say we have a queues - as we may need one for the
+ * current mode.
+ * @queuelock: Protect the soft queue from concurrent changes to avoid some
+ * potential loss of data races.
+ * @shadow: Pointers back to the shadow copy of the hardware ring element
+ * need because we can't store any context reference in the bd element.
+ */
+struct sec_queue {
+ struct sec_dev_info *dev_info;
+ int task_irq;
+ char name[SEC_NAME_SIZE];
+ struct sec_queue_ring_cmd ring_cmd;
+ struct sec_queue_ring_cq ring_cq;
+ struct sec_queue_ring_db ring_db;
+ void __iomem *regs;
+ u32 queue_id;
+ bool in_use;
+ int expected;
+
+ DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
+ DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
+ bool havesoftqueue;
+ struct mutex queuelock;
+ void *shadow[SEC_QUEUE_LEN];
+};
+
+/**
+ * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
+ * @buf: The IOV dma address for this entry.
+ * @len: Length of this IOV.
+ * @pad: Reserved space.
+ */
+struct sec_hw_sge {
+ dma_addr_t buf;
+ unsigned int len;
+ unsigned int pad;
+};
+
+/**
+ * struct sec_hw_sgl: One hardware SGL entry.
+ * @next_sgl: The next entry if we need to chain dma address. Null if last.
+ * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
+ * @entry_sum_in_sgl: The number of SGEs in this SGL element.
+ * @flag: Unused in skciphers.
+ * @serial_num: Unsued in skciphers.
+ * @cpuid: Currently unused.
+ * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
+ * @next: Virtual address used to stash the next sgl - useful in completion.
+ * @reserved: A reserved field not currently used.
+ * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
+ * @node: Currently unused.
+ */
+struct sec_hw_sgl {
+ dma_addr_t next_sgl;
+ u16 entry_sum_in_chain;
+ u16 entry_sum_in_sgl;
+ u32 flag;
+ u64 serial_num;
+ u32 cpuid;
+ u32 data_bytes_in_sgl;
+ struct sec_hw_sgl *next;
+ u64 reserved;
+ struct sec_hw_sge sge_entries[SEC_MAX_SGE_NUM];
+ u8 node[16];
+};
+
+struct dma_pool;
+
+/**
+ * struct sec_dev_info: The full SEC unit comprising queues and processors.
+ * @sec_id: Index used to track which SEC this is when more than one is present.
+ * @num_saas: The number of backed processors enabled.
+ * @regs: iomapped register regions shared by whole SEC unit.
+ * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
+ * @queues: The 16 queues that this SEC instance provides.
+ * @dev: Device pointer.
+ * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
+ */
+struct sec_dev_info {
+ int sec_id;
+ int num_saas;
+ void __iomem *regs[SEC_NUM_ADDR_REGIONS];
+ struct mutex dev_lock;
+ int queues_in_use;
+ struct sec_queue queues[SEC_Q_NUM];
+ struct device *dev;
+ struct dma_pool *hw_sgl_pool;
+};
+
+int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
+bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
+int sec_queue_stop_release(struct sec_queue *queue);
+struct sec_queue *sec_queue_alloc_start_safe(void);
+bool sec_queue_empty(struct sec_queue *queue);
+
+/* Algorithm specific elements from sec_algs.c */
+void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
+int sec_algs_register(void);
+void sec_algs_unregister(void);
+
+#endif /* _SEC_DRV_H_ */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4e86f864a952..7e71043457a6 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -33,7 +30,19 @@ MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
{
u32 val, htable_offset;
- int i;
+ int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+
+ if (priv->version == EIP197B) {
+ cs_rc_max = EIP197B_CS_RC_MAX;
+ cs_ht_wc = EIP197B_CS_HT_WC;
+ cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
+ } else {
+ cs_rc_max = EIP197D_CS_RC_MAX;
+ cs_ht_wc = EIP197D_CS_HT_WC;
+ cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
+ cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
+ }
/* Enable the record cache memory access */
val = readl(priv->base + EIP197_CS_RAM_CTRL);
@@ -54,7 +63,7 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
writel(val, priv->base + EIP197_TRC_PARAMS);
/* Clear all records */
- for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+ for (i = 0; i < cs_rc_max; i++) {
u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
@@ -64,14 +73,14 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
if (i == 0)
val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
- else if (i == EIP197_CS_RC_MAX - 1)
+ else if (i == cs_rc_max - 1)
val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
writel(val, priv->base + offset + sizeof(u32));
}
/* Clear the hash table entries */
- htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
- for (i = 0; i < 64; i++)
+ htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+ for (i = 0; i < cs_ht_wc; i++)
writel(GENMASK(29, 0),
priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
@@ -82,23 +91,23 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
/* Write head and tail pointers of the record free chain */
val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
- EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+ EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
writel(val, priv->base + EIP197_TRC_FREECHAIN);
/* Configure the record cache #1 */
- val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
- EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+ val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+ EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
writel(val, priv->base + EIP197_TRC_PARAMS2);
/* Configure the record cache #2 */
- val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+ val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
EIP197_TRC_PARAMS_HTABLE_SZ(2);
writel(val, priv->base + EIP197_TRC_PARAMS);
}
static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
- const struct firmware *fw, u32 ctrl,
+ const struct firmware *fw, int pe, u32 ctrl,
u32 prog_en)
{
const u32 *data = (const u32 *)fw->data;
@@ -112,7 +121,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -120,7 +129,7 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
/* Release engine from reset */
val = readl(EIP197_PE(priv) + ctrl);
@@ -132,35 +141,62 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
{
const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
const struct firmware *fw[FW_NB];
- int i, j, ret = 0;
+ char fw_path[31], *dir = NULL;
+ int i, j, ret = 0, pe;
u32 val;
+ switch (priv->version) {
+ case EIP197B:
+ dir = "eip197b";
+ break;
+ case EIP197D:
+ dir = "eip197d";
+ break;
+ default:
+ /* No firmware is required */
+ return 0;
+ }
+
for (i = 0; i < FW_NB; i++) {
- ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
+ ret = request_firmware(&fw[i], fw_path, priv->dev);
if (ret) {
- dev_err(priv->dev,
- "Failed to request firmware %s (%d)\n",
- fw_name[i], ret);
- goto release_fw;
- }
- }
-
- /* Clear the scratchpad memory */
- val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
- EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
- EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
- EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
+ if (priv->version != EIP197B)
+ goto release_fw;
- memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
- EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
-
- eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
- EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+ /* Fallback to the old firmware location for the
+ * EIP197b.
+ */
+ ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to request firmware %s (%d)\n",
+ fw_name[i], ret);
+ goto release_fw;
+ }
+ }
+ }
- eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
- EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Clear the scratchpad memory */
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+ val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+ EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+ EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+ EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+ memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
+ EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+ eip197_write_firmware(priv, fw[FW_IFPP], pe,
+ EIP197_PE_ICE_FPP_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+ eip197_write_firmware(priv, fw[FW_IPUE], pe,
+ EIP197_PE_ICE_PUE_CTRL(pe),
+ EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+ }
release_fw:
for (j = 0; j < i; j++)
@@ -256,7 +292,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
{
u32 version, val;
- int i, ret;
+ int i, ret, pe;
/* Determine endianess and configure byte swap */
version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
@@ -267,6 +303,10 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+ /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+
writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
@@ -282,82 +322,94 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Clear any pending interrupt */
writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- /* Data Fetch Engine configuration */
-
- /* Reset all DFE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- if (priv->version == EIP197) {
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
- val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
- val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
-
- /* Leave the DFE threads reset state */
- writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
-
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
- writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
-
- if (priv->version == EIP197) {
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN |
- GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
- }
-
- /* Data Store Engine configuration */
-
- /* Reset all DSE threads */
- writel(EIP197_DxE_THR_CTRL_RESET_PE,
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
-
- /* Wait for all DSE threads to complete */
- while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
- GENMASK(15, 12)) != GENMASK(15, 12))
- ;
-
- /* DMA transfer size to use */
- val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
- val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
- val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
- /* FIXME: instability issues can occur for EIP97 but disabling it impact
- * performances.
- */
- if (priv->version == EIP197)
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
+ /* Processing Engine configuration */
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Data Fetch Engine configuration */
- /* Leave the DSE threads reset state */
- writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Reset all DFE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* Processing Engine configuration */
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+ val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+ EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+ val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+ /* Leave the DFE threads reset state */
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+ /* Configure the processing engine thresholds */
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(9),
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+ writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+ EIP197_PE_IN_xBUF_THRES_MAX(7),
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+ if (priv->version == EIP197B || priv->version == EIP197D) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+ }
- /* H/W capabilities selection */
- val = EIP197_FUNCTION_RSVD;
- val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
- val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
- val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
- val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
- val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
- writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
+ /* Data Store Engine configuration */
+
+ /* Reset all DSE threads */
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Wait for all DSE threads to complete */
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+ GENMASK(15, 12)) != GENMASK(15, 12))
+ ;
+
+ /* DMA transfer size to use */
+ val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+ /* Leave the DSE threads reset state */
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Configure the procesing engine thresholds */
+ writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+ EIP197_PE_OUT_DBUF_THRES_MAX(8),
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+ /* Processing Engine configuration */
+
+ /* H/W capabilities selection */
+ val = EIP197_FUNCTION_RSVD;
+ val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+ val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
+ val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
+ val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
+ val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+ val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
+ val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+ val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+ }
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
@@ -408,18 +460,20 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
- /* Enable command descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
+ for (pe = 0; pe < priv->config.pes; pe++) {
+ /* Enable command descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
- /* Enable result descriptor rings */
- writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
+ /* Enable result descriptor rings */
+ writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+ }
/* Clear any HIA interrupt */
writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- if (priv->version == EIP197) {
+ if (priv->version == EIP197B || priv->version == EIP197D) {
eip197_trc_cache_init(priv);
ret = eip197_load_firmwares(priv);
@@ -452,7 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
- struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources,
@@ -476,16 +529,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
}
handle_req:
- request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request)
- goto request_failed;
-
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ring, request, &commands, &results);
- if (ret) {
- kfree(request);
+ ret = ctx->send(req, ring, &commands, &results);
+ if (ret)
goto request_failed;
- }
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -494,14 +541,8 @@ handle_req:
* to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error.
*/
- if (!commands && !results) {
- kfree(request);
+ if (!commands && !results)
continue;
- }
-
- spin_lock_bh(&priv->ring[ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ring].list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands;
rdesc += results;
@@ -519,7 +560,7 @@ finalize:
if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq;
@@ -528,7 +569,7 @@ finalize:
priv->ring[ring].busy = true;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
@@ -560,6 +601,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL;
}
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req)
+{
+ int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+ priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+ int i = safexcel_ring_first_rdr_index(priv, ring);
+
+ return priv->ring[ring].rdr_req[i];
+}
+
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
@@ -588,21 +647,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request)
+ dma_addr_t ctxr_dma, int ring)
{
struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc;
int ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
- if (IS_ERR(cdesc)) {
- ret = PTR_ERR(cdesc);
- goto unlock;
- }
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0;
@@ -617,21 +671,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback;
}
- request->req = async;
- goto unlock;
+ safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+ return ret;
cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-unlock:
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring)
{
- struct safexcel_request *sreq;
+ struct crypto_async_request *req;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete;
@@ -646,28 +699,22 @@ handle_results:
goto requests_left;
for (i = 0; i < nreq; i++) {
- spin_lock_bh(&priv->ring[ring].egress_lock);
- sreq = list_first_entry(&priv->ring[ring].list,
- struct safexcel_request, list);
- list_del(&sreq->list);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- ctx = crypto_tfm_ctx(sreq->req->tfm);
- ndesc = ctx->handle_result(priv, ring, sreq->req,
+ req = safexcel_rdr_req_get(priv, ring);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ ndesc = ctx->handle_result(priv, ring, req,
&should_complete, &ret);
if (ndesc < 0) {
- kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge;
}
if (should_complete) {
local_bh_disable();
- sreq->req->complete(sreq->req, ret);
+ req->complete(req, ret);
local_bh_enable();
}
- kfree(sreq);
tot_descs += ndesc;
handled++;
}
@@ -686,7 +733,7 @@ acknowledge:
goto handle_results;
requests_left:
- spin_lock_bh(&priv->ring[ring].egress_lock);
+ spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring);
@@ -694,7 +741,7 @@ requests_left:
if (!priv->ring[ring].requests)
priv->ring[ring].busy = false;
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ spin_unlock_bh(&priv->ring[ring].lock);
}
static void safexcel_dequeue_work(struct work_struct *work)
@@ -785,17 +832,29 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
}
static struct safexcel_alg_template *safexcel_algs[] = {
+ &safexcel_alg_ecb_des,
+ &safexcel_alg_cbc_des,
+ &safexcel_alg_ecb_des3_ede,
+ &safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
+ &safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
&safexcel_alg_sha256,
+ &safexcel_alg_sha384,
+ &safexcel_alg_sha512,
+ &safexcel_alg_hmac_md5,
&safexcel_alg_hmac_sha1,
&safexcel_alg_hmac_sha224,
&safexcel_alg_hmac_sha256,
+ &safexcel_alg_hmac_sha384,
+ &safexcel_alg_hmac_sha512,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha384_cbc_aes,
+ &safexcel_alg_authenc_hmac_sha512_cbc_aes,
};
static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -805,6 +864,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
safexcel_algs[i]->priv = priv;
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -820,6 +882,9 @@ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
fail:
for (j = 0; j < i; j++) {
+ if (!(safexcel_algs[j]->engines & priv->version))
+ continue;
+
if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -836,6 +901,9 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
int i;
for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+ if (!(safexcel_algs[i]->engines & priv->version))
+ continue;
+
if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
@@ -847,9 +915,21 @@ static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
- u32 val, mask;
+ u32 val, mask = 0;
val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+ /* Read number of PEs from the engine */
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
+ mask = EIP197_N_PES_MASK;
+ break;
+ default:
+ mask = EIP97_N_PES_MASK;
+ }
+ priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
@@ -867,7 +947,9 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
{
struct safexcel_register_offsets *offsets = &priv->offsets;
- if (priv->version == EIP197) {
+ switch (priv->version) {
+ case EIP197B:
+ case EIP197D:
offsets->hia_aic = EIP197_HIA_AIC_BASE;
offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
@@ -878,7 +960,8 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
offsets->pe = EIP197_PE_BASE;
- } else {
+ break;
+ case EIP97IES:
offsets->hia_aic = EIP97_HIA_AIC_BASE;
offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
@@ -889,6 +972,7 @@ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
offsets->pe = EIP97_PE_BASE;
+ break;
}
}
@@ -906,6 +990,9 @@ static int safexcel_probe(struct platform_device *pdev)
priv->dev = dev;
priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+ if (priv->version == EIP197B || priv->version == EIP197D)
+ priv->flags |= EIP197_TRC_CACHE;
+
safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -957,6 +1044,13 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
+ priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ GFP_KERNEL);
+ if (!priv->ring) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
for (i = 0; i < priv->config.rings; i++) {
char irq_name[6] = {0}; /* "ringX\0" */
char wq_name[9] = {0}; /* "wq_ringX\0" */
@@ -969,6 +1063,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
+ priv->ring[i].rdr_req = devm_kzalloc(dev,
+ sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ GFP_KERNEL);
+ if (!priv->ring[i].rdr_req) {
+ ret = -ENOMEM;
+ goto err_reg_clk;
+ }
+
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) {
ret = -ENOMEM;
@@ -1004,9 +1106,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
- INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
- spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock);
}
@@ -1034,6 +1134,24 @@ err_core_clk:
return ret;
}
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->config.rings; i++) {
+ /* clear any pending interrupt */
+ writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+ writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+ /* Reset the CDR base address */
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+ /* Reset the RDR base address */
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ }
+}
static int safexcel_remove(struct platform_device *pdev)
{
@@ -1041,6 +1159,8 @@ static int safexcel_remove(struct platform_device *pdev)
int i;
safexcel_unregister_algorithms(priv);
+ safexcel_hw_reset_rings(priv);
+
clk_disable_unprepare(priv->clk);
for (i = 0; i < priv->config.rings; i++)
@@ -1051,12 +1171,26 @@ static int safexcel_remove(struct platform_device *pdev)
static const struct of_device_id safexcel_of_match_table[] = {
{
+ .compatible = "inside-secure,safexcel-eip97ies",
+ .data = (void *)EIP97IES,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197b",
+ .data = (void *)EIP197B,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197d",
+ .data = (void *)EIP197D,
+ },
+ {
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip97",
- .data = (void *)EIP97,
+ .data = (void *)EIP97IES,
},
{
+ /* Deprecated. Kept for backward compatibility. */
.compatible = "inside-secure,safexcel-eip197",
- .data = (void *)EIP197,
+ .data = (void *)EIP197B,
},
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 8b3ee9b59f53..65624a81f0fd 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __SAFEXCEL_H__
@@ -95,13 +92,13 @@
#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x0000
-#define EIP197_HIA_DFE_THR_CTRL 0x0000
-#define EIP197_HIA_DFE_THR_STAT 0x0004
-#define EIP197_HIA_DSE_CFG 0x0000
-#define EIP197_HIA_DSE_THR_CTRL 0x0000
-#define EIP197_HIA_DSE_THR_STAT 0x0004
-#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n)))
#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
@@ -114,18 +111,18 @@
#define EIP197_HIA_MST_CTRL 0xfff4
#define EIP197_HIA_OPTIONS 0xfff8
#define EIP197_HIA_VERSION 0xfffc
-#define EIP197_PE_IN_DBUF_THRES 0x0000
-#define EIP197_PE_IN_TBUF_THRES 0x0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
-#define EIP197_PE_ICE_PUE_CTRL 0x0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
-#define EIP197_PE_ICE_FPP_CTRL 0x0d80
-#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
-#define EIP197_PE_OUT_DBUF_THRES 0x1c00
-#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
/* EIP197-specific registers, no indirection */
@@ -184,6 +181,11 @@
#define EIP197_HIA_RA_PE_CTRL_RESET BIT(31)
#define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET 4
+#define EIP197_N_PES_MASK GENMASK(4, 0)
+#define EIP97_N_PES_MASK GENMASK(2, 0)
+
/* EIP197_HIA_AIC_R_ENABLE_CTRL */
#define EIP197_CDR_IRQ(n) BIT((n) * 2)
#define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1)
@@ -217,6 +219,7 @@
#define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0))
#define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0)
#define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
#define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
#define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
@@ -287,7 +290,7 @@ struct safexcel_context_record {
u32 control0;
u32 control1;
- __le32 data[24];
+ __le32 data[40];
} __packed;
/* control0 */
@@ -305,14 +308,19 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5)
#define CONTEXT_CONTROL_SIZE(n) ((n) << 8)
#define CONTEXT_CONTROL_KEY_EN BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES (0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES (0x2 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
#define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
#define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23)
#define CONTEXT_CONTROL_INV_FR (0x5 << 24)
#define CONTEXT_CONTROL_INV_TR (0x6 << 24)
@@ -327,6 +335,11 @@ struct safexcel_context_record {
#define CONTEXT_CONTROL_COUNTER_MODE BIT(10)
#define CONTEXT_CONTROL_HASH_STORE BIT(19)
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE 64
+
/* EIP197_CS_RAM_CTRL */
#define EIP197_TRC_ENABLE_0 BIT(4)
#define EIP197_TRC_ENABLE_1 BIT(5)
@@ -349,13 +362,19 @@ struct safexcel_context_record {
#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
/* Cache helpers */
-#define EIP197_CS_RC_MAX 52
+#define EIP197B_CS_RC_MAX 52
+#define EIP197D_CS_RC_MAX 96
#define EIP197_CS_RC_SIZE (4 * sizeof(u32))
#define EIP197_CS_RC_NEXT(x) (x)
#define EIP197_CS_RC_PREV(x) ((x) << 10)
#define EIP197_RC_NULL 0x3ff
-#define EIP197_CS_TRC_REC_WC 59
-#define EIP197_CS_TRC_LG_REC_WC 73
+#define EIP197B_CS_TRC_REC_WC 59
+#define EIP197D_CS_TRC_REC_WC 64
+#define EIP197B_CS_TRC_LG_REC_WC 73
+#define EIP197D_CS_TRC_LG_REC_WC 80
+#define EIP197B_CS_HT_WC 64
+#define EIP197D_CS_HT_WC 256
+
/* Result data */
struct result_data_desc {
@@ -450,6 +469,7 @@ struct safexcel_control_data_desc {
#define EIP197_OPTION_MAGIC_VALUE BIT(0)
#define EIP197_OPTION_64BIT_CTX BIT(1)
#define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
#define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
#define EIP197_TYPE_EXTENDED 0x3
@@ -480,7 +500,7 @@ enum eip197_fw {
FW_NB
};
-struct safexcel_ring {
+struct safexcel_desc_ring {
void *base;
void *base_end;
dma_addr_t base_dma;
@@ -489,8 +509,7 @@ struct safexcel_ring {
void *write;
void *read;
- /* number of elements used in the ring */
- unsigned nr;
+ /* descriptor element offset */
unsigned offset;
};
@@ -500,12 +519,8 @@ enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_AHASH,
};
-struct safexcel_request {
- struct list_head list;
- struct crypto_async_request *req;
-};
-
struct safexcel_config {
+ u32 pes;
u32 rings;
u32 cd_size;
@@ -521,9 +536,40 @@ struct safexcel_work_data {
int ring;
};
+struct safexcel_ring {
+ spinlock_t lock;
+
+ struct workqueue_struct *workqueue;
+ struct safexcel_work_data work_data;
+
+ /* command/result rings */
+ struct safexcel_desc_ring cdr;
+ struct safexcel_desc_ring rdr;
+
+ /* result ring crypto API request */
+ struct crypto_async_request **rdr_req;
+
+ /* queue */
+ struct crypto_queue queue;
+ spinlock_t queue_lock;
+
+ /* Number of requests in the engine. */
+ int requests;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
+};
+
enum safexcel_eip_version {
- EIP97,
- EIP197,
+ EIP97IES = BIT(0),
+ EIP197B = BIT(1),
+ EIP197D = BIT(2),
};
struct safexcel_register_offsets {
@@ -539,6 +585,10 @@ struct safexcel_register_offsets {
u32 pe;
};
+enum safexcel_flags {
+ EIP197_TRC_CACHE = BIT(0),
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
@@ -548,46 +598,19 @@ struct safexcel_crypto_priv {
enum safexcel_eip_version version;
struct safexcel_register_offsets offsets;
+ u32 flags;
/* context DMA pool */
struct dma_pool *context_pool;
atomic_t ring_used;
- struct {
- spinlock_t lock;
- spinlock_t egress_lock;
-
- struct list_head list;
- struct workqueue_struct *workqueue;
- struct safexcel_work_data work_data;
-
- /* command/result rings */
- struct safexcel_ring cdr;
- struct safexcel_ring rdr;
-
- /* queue */
- struct crypto_queue queue;
- spinlock_t queue_lock;
-
- /* Number of requests in the engine. */
- int requests;
-
- /* The ring is currently handling at least one request */
- bool busy;
-
- /* Store for current requests when bailing out of the dequeueing
- * function when no enough resources are available.
- */
- struct crypto_async_request *req;
- struct crypto_async_request *backlog;
- } ring[EIP197_MAX_RINGS];
+ struct safexcel_ring *ring;
};
struct safexcel_context {
int (*send)(struct crypto_async_request *req, int ring,
- struct safexcel_request *request, int *commands,
- int *results);
+ int *commands, int *results);
int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *req, bool *complete,
int *ret);
@@ -600,13 +623,13 @@ struct safexcel_context {
};
struct safexcel_ahash_export_state {
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
u32 digest;
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
- u8 cache[SHA256_BLOCK_SIZE];
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u8 cache[SHA512_BLOCK_SIZE];
};
/*
@@ -617,6 +640,7 @@ struct safexcel_ahash_export_state {
struct safexcel_alg_template {
struct safexcel_crypto_priv *priv;
enum safexcel_alg_type type;
+ u32 engines;
union {
struct skcipher_alg skcipher;
struct aead_alg aead;
@@ -635,16 +659,16 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
- dma_addr_t ctxr_dma, int ring,
- struct safexcel_request *request);
+ dma_addr_t ctxr_dma, int ring);
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr);
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr);
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring);
+ struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
int ring_id,
bool first, bool last,
@@ -655,21 +679,44 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
int ring_id,
bool first, bool last,
dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc,
+ struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
void *istate, void *ostate);
/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 6bb60fda2043..3aef1d43e435 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/device.h>
@@ -15,6 +12,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -27,21 +25,28 @@ enum safexcel_cipher_direction {
SAFEXCEL_DECRYPT,
};
+enum safexcel_cipher_alg {
+ SAFEXCEL_DES,
+ SAFEXCEL_3DES,
+ SAFEXCEL_AES,
+};
+
struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
u32 mode;
+ enum safexcel_cipher_alg alg;
bool aead;
__le32 key[8];
unsigned int key_len;
/* All the below is AEAD specific */
- u32 alg;
+ u32 hash_alg;
u32 state_sz;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_cipher_req {
@@ -57,10 +62,24 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
unsigned offset = 0;
if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ switch (ctx->alg) {
+ case SAFEXCEL_DES:
+ offset = DES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
+ case SAFEXCEL_3DES:
+ offset = DES3_EDE_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+ break;
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ case SAFEXCEL_AES:
+ offset = AES_BLOCK_SIZE / sizeof(u32);
+ memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE);
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ break;
+ }
}
token = (struct safexcel_token *)(cdesc->control_data.token + offset);
@@ -145,7 +164,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
return ret;
}
- if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < len / sizeof(u32); i++) {
if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
ctx->base.needs_inv = true;
@@ -179,12 +198,12 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
goto badkey;
/* Encryption key */
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
memcmp(ctx->key, keys.enckey, keys.enckeylen))
ctx->base.needs_inv = true;
/* Auth key */
- switch (ctx->alg) {
+ switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
keys.authkeylen, &istate, &ostate))
@@ -200,6 +219,16 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
keys.authkeylen, &istate, &ostate))
goto badkey;
break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+ if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
+ case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+ if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
@@ -208,7 +237,7 @@ static int safexcel_aead_aes_setkey(struct crypto_aead *ctfm, const u8 *key,
crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
CRYPTO_TFM_RES_MASK);
- if (priv->version == EIP197 && ctx->base.ctxr_dma &&
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
(memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
memcmp(ctx->opad, ostate.state, ctx->state_sz)))
ctx->base.needs_inv = true;
@@ -258,22 +287,28 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
if (ctx->aead)
cdesc->control_data.control0 |= CONTEXT_CONTROL_DIGEST_HMAC |
- ctx->alg;
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- break;
- default:
- dev_err(priv->dev, "aes keysize not supported: %u\n",
- ctx->key_len);
- return -EINVAL;
+ ctx->hash_alg;
+
+ if (ctx->alg == SAFEXCEL_DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_DES;
+ } else if (ctx->alg == SAFEXCEL_3DES) {
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+ } else if (ctx->alg == SAFEXCEL_AES) {
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+ break;
+ default:
+ dev_err(priv->dev, "aes keysize not supported: %u\n",
+ ctx->key_len);
+ return -EINVAL;
+ }
}
ctrl_size = ctx->key_len / sizeof(u32);
@@ -298,7 +333,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -315,7 +349,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) {
dma_unmap_sg(priv->dev, src,
@@ -335,8 +368,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
return ndesc;
}
-static int safexcel_aes_send(struct crypto_async_request *base, int ring,
- struct safexcel_request *request,
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen,
@@ -346,7 +378,7 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
+ struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -386,8 +418,6 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz);
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* command descriptors */
for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg);
@@ -434,12 +464,12 @@ static int safexcel_aes_send(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc);
goto rdesc_rollback;
}
+ if (first)
+ first_rdesc = rdesc;
n_rdesc++;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
- request->req = base;
+ safexcel_rdr_req_set(priv, ring, first_rdesc, base);
*commands = n_cdesc;
*results = n_rdesc;
@@ -452,8 +482,6 @@ cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
-
if (src == dst) {
dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen),
@@ -481,7 +509,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
@@ -491,17 +518,13 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
break;
}
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
+ if (likely(!*ret))
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
ndesc++;
} while (!rdesc->last_seg);
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -577,15 +600,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
}
static int safexcel_cipher_send_inv(struct crypto_async_request *base,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
- request);
+ ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -596,7 +617,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
}
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
@@ -605,21 +625,19 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv,
commands, results);
return ret;
}
static int safexcel_aead_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+ int *commands, int *results)
{
struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -628,14 +646,13 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request, commands,
- results);
+ ret = safexcel_cipher_send_inv(async, ring, commands, results);
else
- ret = safexcel_aes_send(async, ring, request, sreq, req->src,
- req->dst, req->cryptlen, req->assoclen,
+ ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+ req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv,
commands, results);
return ret;
@@ -705,9 +722,10 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
-static int safexcel_aes(struct crypto_async_request *base,
+static int safexcel_queue_req(struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- enum safexcel_cipher_direction dir, u32 mode)
+ enum safexcel_cipher_direction dir, u32 mode,
+ enum safexcel_cipher_alg alg)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
@@ -715,10 +733,11 @@ static int safexcel_aes(struct crypto_async_request *base,
sreq->needs_inv = false;
sreq->direction = dir;
+ ctx->alg = alg;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 && ctx->base.needs_inv) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -745,14 +764,16 @@ static int safexcel_aes(struct crypto_async_request *base,
static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_AES);
}
static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
@@ -795,7 +816,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_skcipher_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "skcipher: invalidation error %d\n",
@@ -815,7 +836,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
if (safexcel_cipher_cra_exit(tfm))
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_aead_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "aead: invalidation error %d\n",
@@ -828,6 +849,7 @@ static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_ecb_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_ecb_aes_encrypt,
@@ -838,7 +860,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_name = "ecb(aes)",
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -852,18 +874,21 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
{
- return safexcel_aes(&req->base, skcipher_request_ctx(req),
- SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_AES);
}
struct safexcel_alg_template safexcel_alg_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.skcipher = {
.setkey = safexcel_skcipher_aes_setkey,
.encrypt = safexcel_cbc_aes_encrypt,
@@ -875,7 +900,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_name = "cbc(aes)",
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -887,20 +912,234 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
+static int safexcel_cbc_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_cbc_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ if (len != DES_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_cbc_des_encrypt,
+ .decrypt = safexcel_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "safexcel-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+static int safexcel_ecb_des_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des_setkey,
+ .encrypt = safexcel_ecb_des_encrypt,
+ .decrypt = safexcel_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "safexcel-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_cbc_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_CBC,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (len != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* if context exits and key changed, need to invalidate it */
+ if (ctx->base.ctxr_dma) {
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+ }
+
+ memcpy(ctx->key, key, len);
+
+ ctx->key_len = len;
+
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_cbc_des3_ede_encrypt,
+ .decrypt = safexcel_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "safexcel-cbc-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_ecb_des3_ede_encrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_ENCRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+static int safexcel_ecb_des3_ede_decrypt(struct skcipher_request *req)
+{
+ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+ SAFEXCEL_DECRYPT, CONTEXT_CONTROL_CRYPTO_MODE_ECB,
+ SAFEXCEL_3DES);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.skcipher = {
+ .setkey = safexcel_des3_ede_setkey,
+ .encrypt = safexcel_ecb_des3_ede_encrypt,
+ .decrypt = safexcel_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "safexcel-ecb-des3_ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_encrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_decrypt(struct aead_request *req)
{
struct safexcel_cipher_req *creq = aead_request_ctx(req);
- return safexcel_aes(&req->base, creq, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT,
+ CONTEXT_CONTROL_CRYPTO_MODE_CBC, SAFEXCEL_AES);
}
static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
@@ -926,13 +1165,14 @@ static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
ctx->state_sz = SHA1_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -943,7 +1183,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -960,13 +1200,14 @@ static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -977,7 +1218,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -994,13 +1235,14 @@ static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_aead_cra_init(tfm);
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
ctx->state_sz = SHA256_DIGEST_SIZE;
return 0;
}
struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.aead = {
.setkey = safexcel_aead_aes_setkey,
.encrypt = safexcel_aead_encrypt,
@@ -1011,7 +1253,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1022,3 +1264,73 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
},
},
};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha512_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ ctx->state_sz = SHA512_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.aead = {
+ .setkey = safexcel_aead_aes_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_sha384_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index c77b0e1655a8..ac9282c1a5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -1,14 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <crypto/hmac.h>
+#include <crypto/md5.h>
#include <crypto/sha.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -22,8 +20,8 @@ struct safexcel_ahash_ctx {
u32 alg;
- u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
- u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+ u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
};
struct safexcel_ahash_req {
@@ -38,18 +36,26 @@ struct safexcel_ahash_req {
u32 digest;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
- u64 len;
- u64 processed;
+ u64 len[2];
+ u64 processed[2];
- u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+ if (req->len[1] > req->processed[1])
+ return 0xffffffff - (req->len[0] - req->processed[0]);
+
+ return req->len[0] - req->processed[0];
+}
+
static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
u32 input_length, u32 result_length)
{
@@ -72,9 +78,9 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc,
- unsigned int digestsize,
- unsigned int blocksize)
+ unsigned int digestsize)
{
+ struct safexcel_crypto_priv *priv = ctx->priv;
int i;
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
@@ -82,12 +88,17 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
cdesc->control_data.control0 |= req->digest;
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
- if (req->processed) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (req->processed[0] || req->processed[1]) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
+ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
} else {
@@ -102,12 +113,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
* fields. Do this now as we need it to setup the first command
* descriptor.
*/
- if (req->processed) {
+ if (req->processed[0] || req->processed[1]) {
for (i = 0; i < digestsize / sizeof(u32); i++)
ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
- if (req->finish)
- ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ if (req->finish) {
+ u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
+ req->processed[1]);
+
+ /* This is a haredware limitation, as the
+ * counter must fit into an u32. This represents
+ * a farily big amount of input data, so we
+ * shouldn't see this.
+ */
+ if (unlikely(count & 0xffff0000)) {
+ dev_warn(priv->dev,
+ "Input data is too big\n");
+ return;
+ }
+
+ ctx->base.ctxr->data[i] = cpu_to_le32(count);
+ }
}
} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
@@ -126,11 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len;
+ u64 cache_len;
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
@@ -141,7 +167,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -164,7 +189,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- cache_len = sreq->len - sreq->processed;
+ cache_len = safexcel_queued_len(sreq);
if (cache_len)
memcpy(sreq->cache, sreq->cache_next, cache_len);
@@ -174,7 +199,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
}
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
- struct safexcel_request *request,
int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
@@ -185,9 +209,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, extra, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len;
- queued = len = req->len - req->processed;
+ queued = len = safexcel_queued_len(req);
if (queued <= crypto_ahash_blocksize(ahash))
cache_len = queued;
else
@@ -220,16 +245,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
}
- spin_lock_bh(&priv->ring[ring].egress_lock);
-
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, req->cache_dma)) {
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ if (dma_mapping_error(priv->dev, req->cache_dma))
return -EINVAL;
- }
req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -260,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
- if (queued - sglen < 0)
+ if (queued < sglen)
sglen = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
@@ -282,8 +303,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
send_command:
/* Setup the context options */
- safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
- crypto_ahash_blocksize(ahash));
+ safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -303,10 +323,11 @@ send_command:
goto unmap_result;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
+ safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
- req->processed += len;
- request->req = &areq->base;
+ req->processed[0] += len;
+ if (req->processed[0] < len)
+ req->processed[1]++;
*commands = n_cdesc;
*results = 1;
@@ -327,7 +348,6 @@ unmap_cache:
req->cache_sz = 0;
}
- spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}
@@ -335,16 +355,18 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
unsigned int state_w_sz = req->state_sz / sizeof(u32);
+ u64 processed;
int i;
+ processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+ processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
+
for (i = 0; i < state_w_sz; i++)
if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
return true;
- if (ctx->base.ctxr->data[state_w_sz] !=
- cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+ if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
return true;
return false;
@@ -363,21 +385,16 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) {
dev_err(priv->dev,
"hash: invalidate: could not retrieve the result descriptor\n");
*ret = PTR_ERR(rdesc);
- } else if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "hash: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EINVAL;
+ } else {
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
}
safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -413,7 +430,7 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
- BUG_ON(priv->version == EIP97 && req->needs_inv);
+ BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
if (req->needs_inv) {
req->needs_inv = false;
@@ -428,15 +445,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
}
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
ret = safexcel_invalidate_cache(async, ctx->priv,
- ctx->base.ctxr_dma, ring, request);
+ ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -447,19 +463,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
}
static int safexcel_ahash_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
+ int ring, int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret;
if (req->needs_inv)
- ret = safexcel_ahash_send_inv(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_inv(async, ring, commands, results);
else
- ret = safexcel_ahash_send_req(async, ring, request,
- commands, results);
+ ret = safexcel_ahash_send_req(async, ring, commands, results);
+
return ret;
}
@@ -509,17 +523,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
- int queued, cache_len;
+ u64 queued, cache_len;
- /* cache_len: everyting accepted by the driver but not sent yet,
- * tot sz handled by update() - last req sz - tot sz handled by send()
- */
- cache_len = req->len - areq->nbytes - req->processed;
/* queued: everything accepted by the driver which will be handled by
* the next send() calls.
* tot sz handled by update() - tot sz handled by send()
*/
- queued = req->len - req->processed;
+ queued = safexcel_queued_len(req);
+ /* cache_len: everything accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
+ cache_len = queued - areq->nbytes;
/*
* In case there isn't enough bytes to proceed (less than a
@@ -546,8 +560,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
if (ctx->base.ctxr) {
- if (priv->version == EIP197 &&
- !ctx->base.needs_inv && req->processed &&
+ if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+ (req->processed[0] || req->processed[1]) &&
req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
/* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -590,7 +604,9 @@ static int safexcel_ahash_update(struct ahash_request *areq)
if (!areq->nbytes)
return 0;
- req->len += areq->nbytes;
+ req->len[0] += areq->nbytes;
+ if (req->len[0] < areq->nbytes)
+ req->len[1]++;
safexcel_ahash_cache(areq);
@@ -605,7 +621,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- req->len - req->processed > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
return safexcel_ahash_enqueue(areq);
return 0;
@@ -620,8 +636,11 @@ static int safexcel_ahash_final(struct ahash_request *areq)
req->finish = true;
/* If we have an overall 0 length request */
- if (!(req->len + areq->nbytes)) {
- if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+ if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+ memcpy(areq->result, md5_zero_message_hash,
+ MD5_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
memcpy(areq->result, sha1_zero_message_hash,
SHA1_DIGEST_SIZE);
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
@@ -630,6 +649,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
memcpy(areq->result, sha256_zero_message_hash,
SHA256_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+ memcpy(areq->result, sha384_zero_message_hash,
+ SHA384_DIGEST_SIZE);
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ memcpy(areq->result, sha512_zero_message_hash,
+ SHA512_DIGEST_SIZE);
return 0;
}
@@ -654,8 +679,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
- export->len = req->len;
- export->processed = req->processed;
+ export->len[0] = req->len[0];
+ export->len[1] = req->len[1];
+ export->processed[0] = req->processed[0];
+ export->processed[1] = req->processed[1];
export->digest = req->digest;
@@ -676,8 +703,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
if (ret)
return ret;
- req->len = export->len;
- req->processed = export->processed;
+ req->len[0] = export->len[0];
+ req->len[1] = export->len[1];
+ req->processed[0] = export->processed[0];
+ req->processed[1] = export->processed[1];
req->digest = export->digest;
@@ -743,7 +772,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- if (priv->version == EIP197) {
+ if (priv->flags & EIP197_TRC_CACHE) {
ret = safexcel_ahash_exit_inv(tfm);
if (ret)
dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
@@ -755,6 +784,7 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
struct safexcel_alg_template safexcel_alg_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha1_init,
.update = safexcel_ahash_update,
@@ -908,8 +938,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
u8 *ipad, *opad;
int ret;
- tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(alg, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
@@ -963,7 +992,7 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- if (priv->version == EIP197 && ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
for (i = 0; i < state_sz / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
@@ -988,6 +1017,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha1_init,
.update = safexcel_ahash_update,
@@ -1051,6 +1081,7 @@ static int safexcel_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha256_init,
.update = safexcel_ahash_update,
@@ -1113,6 +1144,7 @@ static int safexcel_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_sha224_init,
.update = safexcel_ahash_update,
@@ -1168,6 +1200,7 @@ static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha224_init,
.update = safexcel_ahash_update,
@@ -1224,6 +1257,7 @@ static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
.alg.ahash = {
.init = safexcel_hmac_sha256_init,
.update = safexcel_ahash_update,
@@ -1251,3 +1285,375 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
},
},
};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA512_H0);
+ req->state[1] = upper_32_bits(SHA512_H0);
+ req->state[2] = lower_32_bits(SHA512_H1);
+ req->state[3] = upper_32_bits(SHA512_H1);
+ req->state[4] = lower_32_bits(SHA512_H2);
+ req->state[5] = upper_32_bits(SHA512_H2);
+ req->state[6] = lower_32_bits(SHA512_H3);
+ req->state[7] = upper_32_bits(SHA512_H3);
+ req->state[8] = lower_32_bits(SHA512_H4);
+ req->state[9] = upper_32_bits(SHA512_H4);
+ req->state[10] = lower_32_bits(SHA512_H5);
+ req->state[11] = upper_32_bits(SHA512_H5);
+ req->state[12] = lower_32_bits(SHA512_H6);
+ req->state[13] = upper_32_bits(SHA512_H6);
+ req->state[14] = lower_32_bits(SHA512_H7);
+ req->state[15] = upper_32_bits(SHA512_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha512_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "safexcel-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = lower_32_bits(SHA384_H0);
+ req->state[1] = upper_32_bits(SHA384_H0);
+ req->state[2] = lower_32_bits(SHA384_H1);
+ req->state[3] = upper_32_bits(SHA384_H1);
+ req->state[4] = lower_32_bits(SHA384_H2);
+ req->state[5] = upper_32_bits(SHA384_H2);
+ req->state[6] = lower_32_bits(SHA384_H3);
+ req->state[7] = upper_32_bits(SHA384_H3);
+ req->state[8] = lower_32_bits(SHA384_H4);
+ req->state[9] = upper_32_bits(SHA384_H4);
+ req->state[10] = lower_32_bits(SHA384_H5);
+ req->state[11] = upper_32_bits(SHA384_H5);
+ req->state[12] = lower_32_bits(SHA384_H6);
+ req->state[13] = upper_32_bits(SHA384_H6);
+ req->state[14] = lower_32_bits(SHA384_H7);
+ req->state[15] = upper_32_bits(SHA384_H7);
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_sha384_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "safexcel-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha512_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha512_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha512_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha512_digest,
+ .setkey = safexcel_hmac_sha512_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "safexcel-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+ SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_sha384_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_sha384_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_sha384_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_sha384_digest,
+ .setkey = safexcel_hmac_sha384_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "safexcel-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ memset(req, 0, sizeof(*req));
+
+ req->state[0] = MD5_H0;
+ req->state[1] = MD5_H1;
+ req->state[2] = MD5_H2;
+ req->state[3] = MD5_H3;
+
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
+
+ return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_md5_digest,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "safexcel-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+ safexcel_md5_init(areq);
+ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+ MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+ int ret = safexcel_hmac_md5_init(areq);
+
+ if (ret)
+ return ret;
+
+ return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+ .type = SAFEXCEL_ALG_TYPE_AHASH,
+ .engines = EIP97IES | EIP197B | EIP197D,
+ .alg.ahash = {
+ .init = safexcel_hmac_md5_init,
+ .update = safexcel_ahash_update,
+ .final = safexcel_ahash_final,
+ .finup = safexcel_ahash_finup,
+ .digest = safexcel_hmac_md5_digest,
+ .setkey = safexcel_hmac_md5_setkey,
+ .export = safexcel_ahash_export,
+ .import = safexcel_ahash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct safexcel_ahash_export_state),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "safexcel-hmac-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+ .cra_init = safexcel_ahash_cra_init,
+ .cra_exit = safexcel_ahash_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+ },
+};
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index c9d2a8716b5b..eb75fa684876 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Marvell
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/dma-mapping.h>
@@ -14,8 +11,8 @@
#include "safexcel.h"
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *cdr,
- struct safexcel_ring *rdr)
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr)
{
cdr->offset = sizeof(u32) * priv->config.cd_offset;
cdr->base = dmam_alloc_coherent(priv->dev,
@@ -24,7 +21,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!cdr->base)
return -ENOMEM;
cdr->write = cdr->base;
- cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+ cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +31,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!rdr->base)
return -ENOMEM;
rdr->write = rdr->base;
- rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+ rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base;
return 0;
@@ -46,49 +43,73 @@ inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
}
static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->write;
- if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM);
- ring->write += ring->offset;
if (ring->write == ring->base_end)
ring->write = ring->base;
+ else
+ ring->write += ring->offset;
- ring->nr++;
return ptr;
}
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
void *ptr = ring->read;
- if (!ring->nr)
+ if (ring->write == ring->read)
return ERR_PTR(-ENOENT);
- ring->read += ring->offset;
if (ring->read == ring->base_end)
ring->read = ring->base;
+ else
+ ring->read += ring->offset;
- ring->nr--;
return ptr;
}
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+ int ring)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct safexcel_result_desc *rdesc)
+{
+ struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+ return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
- struct safexcel_ring *ring)
+ struct safexcel_desc_ring *ring)
{
- if (!ring->nr)
+ if (ring->write == ring->read)
return;
if (ring->write == ring->base)
- ring->write = ring->base_end - ring->offset;
+ ring->write = ring->base_end;
else
ring->write -= ring->offset;
-
- ring->nr--;
}
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index e34d80b6b7e5..99ff54cc8a15 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -1183,8 +1183,7 @@ static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
u8 *opad;
int ret;
- tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index ab6235b7ff22..55f34cfc43ff 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1487,8 +1487,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = tmpl->block_size;
base->cra_ctxsize = sizeof(struct n2_hash_ctx);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index c2f7d4befb55..ad3358e74f5c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -386,7 +386,6 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index becb738c897b..a6764af83c6d 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -288,7 +288,6 @@ struct shash_alg nx_shash_sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b6e183d58d73..92956bc6e45e 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -294,7 +294,6 @@ struct shash_alg nx_shash_sha512_alg = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index d1a1c74fb56a..0641185bd82f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1464,8 +1464,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1487,8 +1486,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1511,8 +1509,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1536,8 +1533,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
@@ -1564,8 +1560,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1586,8 +1581,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1609,8 +1603,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1633,8 +1626,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1659,8 +1651,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1681,8 +1672,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1704,8 +1694,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,8 +1717,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d32c79328876..21e5cae0a1e0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -247,8 +247,7 @@ static struct shash_alg sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -271,8 +270,7 @@ static struct shash_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -484,7 +482,6 @@ static struct shash_alg sha1_alg_nano = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -503,7 +500,6 @@ static struct shash_alg sha256_alg_nano = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index da8a2d3b5e9a..9225d060e18f 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -163,7 +163,7 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->accel_dev = accel_dev;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 53227d70d397..d8a5db11b7ea 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -378,8 +378,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
else
return -EINVAL;
- ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
- CRYPTO_ALG_TYPE_AHASH_MASK);
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
if (IS_ERR(ahash_tfm))
return PTR_ERR(ahash_tfm);
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
new file mode 100644
index 000000000000..e54249ccc009
--- /dev/null
+++ b/drivers/crypto/qcom-rng.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-18 Linaro Limited
+//
+// Based on msm-rng.c and downstream driver
+
+#include <crypto/internal/rng.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT 0x0000
+#define PRNG_STATUS 0x0004
+#define PRNG_LFSR_CFG 0x0100
+#define PRNG_CONFIG 0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK 0x0000ffff
+#define PRNG_LFSR_CFG_CLOCKS 0x0000dddd
+#define PRNG_CONFIG_HW_ENABLE BIT(1)
+#define PRNG_STATUS_DATA_AVAIL BIT(0)
+
+#define WORD_SZ 4
+
+struct qcom_rng {
+ struct mutex lock;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int skip_init;
+};
+
+struct qcom_rng_ctx {
+ struct qcom_rng *rng;
+};
+
+static struct qcom_rng *qcom_rng_dev;
+
+static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
+{
+ unsigned int currsize = 0;
+ u32 val;
+
+ /* read random data from hardware */
+ do {
+ val = readl_relaxed(rng->base + PRNG_STATUS);
+ if (!(val & PRNG_STATUS_DATA_AVAIL))
+ break;
+
+ val = readl_relaxed(rng->base + PRNG_DATA_OUT);
+ if (!val)
+ break;
+
+ if ((max - currsize) >= WORD_SZ) {
+ memcpy(data, &val, WORD_SZ);
+ data += WORD_SZ;
+ currsize += WORD_SZ;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - currsize);
+ break;
+ }
+ } while (currsize < max);
+
+ return currsize;
+}
+
+static int qcom_rng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct qcom_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct qcom_rng *rng = ctx->rng;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ mutex_lock(&rng->lock);
+
+ ret = qcom_rng_read(rng, dstn, dlen);
+
+ mutex_unlock(&rng->lock);
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
+static int qcom_rng_enable(struct qcom_rng *rng)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(rng->clk);
+ if (ret)
+ return ret;
+
+ /* Enable PRNG only if it is not already enabled */
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ if (val & PRNG_CONFIG_HW_ENABLE)
+ goto already_enabled;
+
+ val = readl_relaxed(rng->base + PRNG_LFSR_CFG);
+ val &= ~PRNG_LFSR_CFG_MASK;
+ val |= PRNG_LFSR_CFG_CLOCKS;
+ writel(val, rng->base + PRNG_LFSR_CFG);
+
+ val = readl_relaxed(rng->base + PRNG_CONFIG);
+ val |= PRNG_CONFIG_HW_ENABLE;
+ writel(val, rng->base + PRNG_CONFIG);
+
+already_enabled:
+ clk_disable_unprepare(rng->clk);
+
+ return 0;
+}
+
+static int qcom_rng_init(struct crypto_tfm *tfm)
+{
+ struct qcom_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ctx->rng = qcom_rng_dev;
+
+ if (!ctx->rng->skip_init)
+ return qcom_rng_enable(ctx->rng);
+
+ return 0;
+}
+
+static struct rng_alg qcom_rng_alg = {
+ .generate = qcom_rng_generate,
+ .seed = qcom_rng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "qcom-rng",
+ .cra_flags = CRYPTO_ALG_TYPE_RNG,
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct qcom_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = qcom_rng_init,
+ }
+};
+
+static int qcom_rng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct qcom_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+ mutex_init(&rng->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ /* ACPI systems have clk already on, so skip clk_get */
+ if (!has_acpi_companion(&pdev->dev)) {
+ rng->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
+ }
+
+
+ rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+
+ qcom_rng_dev = rng;
+ ret = crypto_register_rng(&qcom_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
+ qcom_rng_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int qcom_rng_remove(struct platform_device *pdev)
+{
+ crypto_unregister_rng(&qcom_rng_alg);
+
+ qcom_rng_dev = NULL;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qcom_rng_acpi_match[] = {
+ { .id = "QCOM8160", .driver_data = 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
+#endif
+
+static const struct of_device_id qcom_rng_of_match[] = {
+ { .compatible = "qcom,prng", .data = (void *)0},
+ { .compatible = "qcom,prng-ee", .data = (void *)1},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
+
+static struct platform_driver qcom_rng_driver = {
+ .probe = qcom_rng_probe,
+ .remove = qcom_rng_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(qcom_rng_of_match),
+ .acpi_match_table = ACPI_PTR(qcom_rng_acpi_match),
+ }
+};
+module_platform_driver(qcom_rng_driver);
+
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_DESCRIPTION("Qualcomm random number generator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index bf7163042569..faa282074e5a 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1765,8 +1765,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha1",
.cra_driver_name = "exynos-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1791,8 +1790,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "md5",
.cra_driver_name = "exynos-md5",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
@@ -1817,8 +1815,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "exynos-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0f2245e1af2b..e7540a5b8197 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1253,8 +1253,7 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1280,8 +1279,7 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sahara_ctx),
@@ -1351,7 +1349,7 @@ err_sha_v4_algs:
err_sha_v3_algs:
for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
+ crypto_unregister_ahash(&sha_v3_algs[j]);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -1367,7 +1365,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c5d3efc54a4f..23b0b7bd64c7 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/aes.h>
@@ -105,6 +106,7 @@
#define GCM_CTR_INIT 2
#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+#define CRYP_AUTOSUSPEND_DELAY 50
struct stm32_cryp_caps {
bool swap_final;
@@ -519,6 +521,8 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
+ pm_runtime_get_sync(cryp->dev);
+
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -638,6 +642,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
free_pages((unsigned long)buf_out, pages);
}
+ pm_runtime_mark_last_busy(cryp->dev);
+ pm_runtime_put_autosuspend(cryp->dev);
+
if (is_gcm(cryp) || is_ccm(cryp)) {
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
cryp->areq = NULL;
@@ -1969,6 +1976,13 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
rst = devm_reset_control_get(dev, NULL);
if (!IS_ERR(rst)) {
reset_control_assert(rst);
@@ -2008,6 +2022,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
err_aead_algs:
@@ -2020,6 +2036,11 @@ err_engine1:
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(cryp->clk);
return ret;
@@ -2028,10 +2049,15 @@ err_engine1:
static int stm32_cryp_remove(struct platform_device *pdev)
{
struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+ int ret;
if (!cryp)
return -ENODEV;
+ ret = pm_runtime_get_sync(cryp->dev);
+ if (ret < 0)
+ return ret;
+
crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
@@ -2041,16 +2067,52 @@ static int stm32_cryp_remove(struct platform_device *pdev)
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
+ pm_runtime_disable(cryp->dev);
+ pm_runtime_put_noidle(cryp->dev);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_cryp_runtime_suspend(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+
clk_disable_unprepare(cryp->clk);
return 0;
}
+static int stm32_cryp_runtime_resume(struct device *dev)
+{
+ struct stm32_cryp *cryp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_cryp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend,
+ stm32_cryp_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
.remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_cryp_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index cdc96f1bb917..590d7352837e 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/engine.h>
@@ -121,6 +122,8 @@ enum stm32_hash_data_format {
#define HASH_QUEUE_LENGTH 16
#define HASH_DMA_THRESHOLD 50
+#define HASH_AUTOSUSPEND_DELAY 50
+
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
@@ -814,12 +817,17 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
rctx->flags |= HASH_FLAGS_ERRORS;
}
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
crypto_finalize_hash_request(hdev->engine, req, err);
}
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
+ pm_runtime_get_sync(hdev->dev);
+
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
stm32_hash_write(hdev, HASH_STR, 0);
@@ -967,6 +975,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
+ pm_runtime_get_sync(hdev->dev);
+
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
cpu_relax();
@@ -982,6 +992,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
memcpy(out, rctx, sizeof(*rctx));
return 0;
@@ -1000,6 +1013,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
+ pm_runtime_get_sync(hdev->dev);
+
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
stm32_hash_write(hdev, HASH_CR, *preg);
@@ -1009,6 +1024,9 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+ pm_runtime_mark_last_busy(hdev->dev);
+ pm_runtime_put_autosuspend(hdev->dev);
+
kfree(rctx->hw_context);
return 0;
@@ -1132,8 +1150,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1159,8 +1176,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1185,8 +1201,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1212,8 +1227,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1241,8 +1255,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1268,8 +1281,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1294,8 +1306,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1321,8 +1332,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",
.cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
@@ -1482,6 +1492,13 @@ static int stm32_hash_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(hdev->rst)) {
reset_control_assert(hdev->rst);
@@ -1522,6 +1539,8 @@ static int stm32_hash_probe(struct platform_device *pdev)
dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+ pm_runtime_put_sync(dev);
+
return 0;
err_algs:
@@ -1535,6 +1554,9 @@ err_engine:
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
clk_disable_unprepare(hdev->clk);
return ret;
@@ -1543,11 +1565,16 @@ err_engine:
static int stm32_hash_remove(struct platform_device *pdev)
{
static struct stm32_hash_dev *hdev;
+ int ret;
hdev = platform_get_drvdata(pdev);
if (!hdev)
return -ENODEV;
+ ret = pm_runtime_get_sync(hdev->dev);
+ if (ret < 0)
+ return ret;
+
stm32_hash_unregister_algs(hdev);
crypto_engine_exit(hdev->engine);
@@ -1559,16 +1586,52 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (hdev->dma_lch)
dma_release_channel(hdev->dma_lch);
+ pm_runtime_disable(hdev->dev);
+ pm_runtime_put_noidle(hdev->dev);
+
clk_disable_unprepare(hdev->clk);
return 0;
}
+#ifdef CONFIG_PM
+static int stm32_hash_runtime_suspend(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static int stm32_hash_runtime_resume(struct device *dev)
+{
+ struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(hdev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_hash_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
+ stm32_hash_runtime_resume, NULL)
+};
+
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
.remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
+ .pm = &stm32_hash_pm_ops,
.of_match_table = stm32_hash_of_match,
}
};
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 8f09b8430893..5f3242a246fc 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -6,8 +6,10 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <crypto/internal/hash.h>
@@ -28,9 +30,7 @@
#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5))
#define CRC_INIT_DEFAULT 0xFFFFFFFF
-/* Polynomial reversed */
-#define POLY_CRC32 0xEDB88320
-#define POLY_CRC32C 0x82F63B78
+#define CRC_AUTOSUSPEND_DELAY 50
struct stm32_crc {
struct list_head list;
@@ -66,7 +66,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32;
+ mctx->poly = CRC32_POLY_LE;
return 0;
}
@@ -75,7 +75,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = CRC_INIT_DEFAULT;
- mctx->poly = POLY_CRC32C;
+ mctx->poly = CRC32C_POLY_LE;
return 0;
}
@@ -106,6 +106,8 @@ static int stm32_crc_init(struct shash_desc *desc)
}
spin_unlock_bh(&crc_list.lock);
+ pm_runtime_get_sync(ctx->crc->dev);
+
/* Reset, set key, poly and configure in bit reverse mode */
writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
@@ -115,6 +117,9 @@ static int stm32_crc_init(struct shash_desc *desc)
ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
+ pm_runtime_mark_last_busy(ctx->crc->dev);
+ pm_runtime_put_autosuspend(ctx->crc->dev);
+
return 0;
}
@@ -126,6 +131,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
u32 *d32;
unsigned int i;
+ pm_runtime_get_sync(crc->dev);
+
if (unlikely(crc->nb_pending_bytes)) {
while (crc->nb_pending_bytes != sizeof(u32) && length) {
/* Fill in pending data */
@@ -149,6 +156,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ pm_runtime_mark_last_busy(crc->dev);
+ pm_runtime_put_autosuspend(crc->dev);
+
/* Check for pending data (non 32 bits) */
length &= 3;
if (likely(!length))
@@ -174,7 +184,7 @@ static int stm32_crc_final(struct shash_desc *desc, u8 *out)
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
/* Send computed CRC */
- put_unaligned_le32(mctx->poly == POLY_CRC32C ?
+ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
~ctx->partial : ctx->partial, out);
return 0;
@@ -272,6 +282,13 @@ static int stm32_crc_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
platform_set_drvdata(pdev, crc);
spin_lock(&crc_list.lock);
@@ -287,12 +304,18 @@ static int stm32_crc_probe(struct platform_device *pdev)
dev_info(dev, "Initialized\n");
+ pm_runtime_put_sync(dev);
+
return 0;
}
static int stm32_crc_remove(struct platform_device *pdev)
{
struct stm32_crc *crc = platform_get_drvdata(pdev);
+ int ret = pm_runtime_get_sync(crc->dev);
+
+ if (ret < 0)
+ return ret;
spin_lock(&crc_list.lock);
list_del(&crc->list);
@@ -300,11 +323,46 @@ static int stm32_crc_remove(struct platform_device *pdev)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+ pm_runtime_disable(crc->dev);
+ pm_runtime_put_noidle(crc->dev);
+
+ clk_disable_unprepare(crc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_crc_runtime_suspend(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+
clk_disable_unprepare(crc->clk);
return 0;
}
+static int stm32_crc_runtime_resume(struct device *dev)
+{
+ struct stm32_crc *crc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(crc->clk);
+ if (ret) {
+ dev_err(crc->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_crc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+ stm32_crc_runtime_resume, NULL)
+};
+
static const struct of_device_id stm32_dt_ids[] = {
{ .compatible = "st,stm32f7-crc", },
{},
@@ -316,6 +374,7 @@ static struct platform_driver stm32_crc_driver = {
.remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
+ .pm = &stm32_crc_pm_ops,
.of_match_table = stm32_dt_ids,
},
};
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index a81d89b3b7d8..89adf9e0fed2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -45,11 +45,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "md5-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -73,11 +71,9 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "sha1-sun4i-ss",
.cra_priority = 300,
.cra_alignmask = 3,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
- .cra_type = &crypto_ahash_type,
.cra_init = sun4i_hash_crainit
}
}
@@ -96,8 +92,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -118,8 +113,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -140,8 +134,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -161,8 +154,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -183,8 +175,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -205,7 +196,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cf14f099ce4a..6988012deca4 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2822,8 +2822,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2838,8 +2837,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2854,8 +2852,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2870,8 +2867,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2886,8 +2882,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2902,8 +2897,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2918,8 +2912,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2934,8 +2927,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2950,8 +2942,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2966,8 +2957,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2982,8 +2972,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2998,8 +2987,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3186,7 +3174,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
alg->cra_exit = talitos_cra_exit;
- alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
t_alg->algt.alg.hash.final = ahash_final;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 2d0a677bcc76..daf4fed0df8c 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1524,8 +1524,7 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
.cra_init = hash_cra_init,
@@ -1548,11 +1547,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1574,11 +1571,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
@@ -1600,11 +1595,9 @@ static struct hash_algo_template hash_algs[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC),
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hash_ctx),
- .cra_type = &crypto_ahash_type,
.cra_init = hash_cra_init,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index af6a908dfa7a..2c573d1aaa64 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -49,12 +49,18 @@ struct virtio_crypto_sym_request {
bool encrypt;
};
+struct virtio_crypto_algo {
+ uint32_t algonum;
+ uint32_t service;
+ unsigned int active_devs;
+ struct crypto_alg algo;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
static void virtio_crypto_ablkcipher_finalize_req(
struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
@@ -312,15 +318,21 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
unsigned int keylen)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ uint32_t alg;
int ret;
+ ret = virtio_crypto_alg_validate_key(keylen, &alg);
+ if (ret)
+ return ret;
+
if (!ctx->vcrypto) {
/* New key */
int node = virtio_crypto_get_current_node();
struct virtio_crypto *vcrypto =
- virtcrypto_get_dev_node(node);
+ virtcrypto_get_dev_node(node,
+ VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+ pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
return -ENODEV;
}
@@ -371,12 +383,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
/* Why 3? outhdr + iv + inhdr */
sg_total = src_nents + dst_nents + 3;
- sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
+ sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!sgs)
return -ENOMEM;
- req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+ req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dev_to_node(&vcrypto->vdev->dev));
if (!req_data) {
kfree(sgs);
@@ -571,57 +583,85 @@ static void virtio_crypto_ablkcipher_finalize_req(
virtcrypto_clear_request(&vc_sym_req->base);
}
-static struct crypto_alg virtio_crypto_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "virtio_crypto_aes_cbc",
- .cra_priority = 150,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = virtio_crypto_ablkcipher_init,
- .cra_exit = virtio_crypto_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = virtio_crypto_ablkcipher_setkey,
- .decrypt = virtio_crypto_ablkcipher_decrypt,
- .encrypt = virtio_crypto_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+ .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+ .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+ .algo = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "virtio_crypto_aes_cbc",
+ .cra_priority = 150,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = virtio_crypto_ablkcipher_init,
+ .cra_exit = virtio_crypto_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = virtio_crypto_ablkcipher_setkey,
+ .decrypt = virtio_crypto_ablkcipher_decrypt,
+ .encrypt = virtio_crypto_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
},
},
} };
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
{
int ret = 0;
+ int i = 0;
mutex_lock(&algs_lock);
- if (++virtio_crypto_active_devs != 1)
- goto unlock;
- ret = crypto_register_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
- if (ret)
- virtio_crypto_active_devs--;
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 0) {
+ ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+ if (ret)
+ goto unlock;
+ }
+
+ virtio_crypto_algs[i].active_devs++;
+ dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+ virtio_crypto_algs[i].algo.cra_name);
+ }
unlock:
mutex_unlock(&algs_lock);
return ret;
}
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
{
+ int i = 0;
+
mutex_lock(&algs_lock);
- if (--virtio_crypto_active_devs != 0)
- goto unlock;
- crypto_unregister_algs(virtio_crypto_algs,
- ARRAY_SIZE(virtio_crypto_algs));
+ for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+ uint32_t service = virtio_crypto_algs[i].service;
+ uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+ if (virtio_crypto_algs[i].active_devs == 0 ||
+ !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+ continue;
+
+ if (virtio_crypto_algs[i].active_devs == 1)
+ crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+ virtio_crypto_algs[i].active_devs--;
+ }
-unlock:
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 66501a5a2b7b..63ef7f7924ea 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -55,6 +55,20 @@ struct virtio_crypto {
/* Number of queue currently used by the driver */
u32 curr_queue;
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_*
+ */
+ u32 crypto_services;
+
+ /* Detailed algorithms mask */
+ u32 cipher_algo_l;
+ u32 cipher_algo_h;
+ u32 hash_algo;
+ u32 mac_algo_l;
+ u32 mac_algo_h;
+ u32 aead_algo;
+
/* Maximum length of cipher key */
u32 max_cipher_key_len;
/* Maximum length of authenticated key */
@@ -102,7 +116,12 @@ int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_get_dev_node(int node);
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
+ uint32_t service,
+ uint32_t algo);
+struct virtio_crypto *virtcrypto_get_dev_node(int node,
+ uint32_t service,
+ uint32_t algo);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
@@ -122,7 +141,7 @@ static inline int virtio_crypto_get_current_node(void)
return node;
}
-int virtio_crypto_algs_register(void);
-void virtio_crypto_algs_unregister(void);
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 7c7198553699..3c9e120287af 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -303,6 +303,13 @@ static int virtcrypto_probe(struct virtio_device *vdev)
u32 max_data_queues = 0, max_cipher_key_len = 0;
u32 max_auth_key_len = 0;
u64 max_size = 0;
+ u32 cipher_algo_l = 0;
+ u32 cipher_algo_h = 0;
+ u32 hash_algo = 0;
+ u32 mac_algo_l = 0;
+ u32 mac_algo_h = 0;
+ u32 aead_algo = 0;
+ u32 crypto_services = 0;
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -339,6 +346,20 @@ static int virtcrypto_probe(struct virtio_device *vdev)
max_auth_key_len, &max_auth_key_len);
virtio_cread(vdev, struct virtio_crypto_config,
max_size, &max_size);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ crypto_services, &crypto_services);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_l, &cipher_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ cipher_algo_h, &cipher_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ hash_algo, &hash_algo);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_l, &mac_algo_l);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ mac_algo_h, &mac_algo_h);
+ virtio_cread(vdev, struct virtio_crypto_config,
+ aead_algo, &aead_algo);
/* Add virtio crypto device to global table */
err = virtcrypto_devmgr_add_dev(vcrypto);
@@ -358,6 +379,14 @@ static int virtcrypto_probe(struct virtio_device *vdev)
vcrypto->max_cipher_key_len = max_cipher_key_len;
vcrypto->max_auth_key_len = max_auth_key_len;
vcrypto->max_size = max_size;
+ vcrypto->crypto_services = crypto_services;
+ vcrypto->cipher_algo_l = cipher_algo_l;
+ vcrypto->cipher_algo_h = cipher_algo_h;
+ vcrypto->mac_algo_l = mac_algo_l;
+ vcrypto->mac_algo_h = mac_algo_h;
+ vcrypto->hash_algo = hash_algo;
+ vcrypto->aead_algo = aead_algo;
+
dev_info(&vdev->dev,
"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index a69ff71de2c4..d70de3a4f7d7 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -181,14 +181,20 @@ int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
/*
* virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
* @node: Node id the driver works.
+ * @service: Crypto service that needs to be supported by the
+ * dev
+ * @algo: The algorithm number that needs to be supported by the
+ * dev
*
- * Function returns the virtio crypto device used fewest on the node.
+ * Function returns the virtio crypto device used fewest on the node,
+ * and supports the given crypto service and algorithm.
*
* To be used by virtio crypto device specific drivers.
*
* Return: pointer to vcrypto_dev or NULL if not found.
*/
-struct virtio_crypto *virtcrypto_get_dev_node(int node)
+struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
+ uint32_t algo)
{
struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
unsigned long best = ~0;
@@ -199,7 +205,8 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
dev_to_node(&tmp_dev->vdev->dev) < 0) &&
- virtcrypto_dev_started(tmp_dev)) {
+ virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
ctr = atomic_read(&tmp_dev->ref_count);
if (best > ctr) {
vcrypto_dev = tmp_dev;
@@ -214,7 +221,9 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
/* Get any started device */
list_for_each_entry(tmp_dev,
virtcrypto_devmgr_get_head(), list) {
- if (virtcrypto_dev_started(tmp_dev)) {
+ if (virtcrypto_dev_started(tmp_dev) &&
+ virtcrypto_algo_is_supported(tmp_dev,
+ service, algo)) {
vcrypto_dev = tmp_dev;
break;
}
@@ -240,7 +249,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node)
*/
int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
{
- if (virtio_crypto_algs_register()) {
+ if (virtio_crypto_algs_register(vcrypto)) {
pr_err("virtio_crypto: Failed to register crypto algs\n");
return -EFAULT;
}
@@ -260,5 +269,65 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
*/
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
{
- virtio_crypto_algs_unregister();
+ virtio_crypto_algs_unregister(vcrypto);
+}
+
+/*
+ * vcrypto_algo_is_supported()
+ * @vcrypto: Pointer to virtio crypto device.
+ * @service: The bit number for service validate.
+ * See VIRTIO_CRYPTO_SERVICE_*
+ * @algo : The bit number for the algorithm to validate.
+ *
+ *
+ * Validate if the virtio crypto device supports a service and
+ * algo.
+ *
+ * Return true if device supports a service and algo.
+ */
+
+bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
+ uint32_t service,
+ uint32_t algo)
+{
+ uint32_t service_mask = 1u << service;
+ uint32_t algo_mask = 0;
+ bool low = true;
+
+ if (algo > 31) {
+ algo -= 32;
+ low = false;
+ }
+
+ if (!(vcrypto->crypto_services & service_mask))
+ return false;
+
+ switch (service) {
+ case VIRTIO_CRYPTO_SERVICE_CIPHER:
+ if (low)
+ algo_mask = vcrypto->cipher_algo_l;
+ else
+ algo_mask = vcrypto->cipher_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_HASH:
+ algo_mask = vcrypto->hash_algo;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_MAC:
+ if (low)
+ algo_mask = vcrypto->mac_algo_l;
+ else
+ algo_mask = vcrypto->mac_algo_h;
+ break;
+
+ case VIRTIO_CRYPTO_SERVICE_AEAD:
+ algo_mask = vcrypto->aead_algo;
+ break;
+ }
+
+ if (!(algo_mask & (1u << algo)))
+ return false;
+
+ return true;
}
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..dd8b8716467a 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -215,7 +215,7 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index e107e180e2c8..1e929a1e4ca7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -119,6 +119,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
static u32 xgbe_vid_crc32_le(__le16 vid_le)
{
- u32 poly = 0xedb88320; /* CRCPOLY_LE */
u32 crc = ~0;
u32 temp = 0;
unsigned char *data = (unsigned char *)&vid_le;
@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 5a655d289dd5..024998d6d8c6 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -37,11 +38,6 @@
#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
-/*
- * CRC polynomial - used in working out multicast filter bits.
- */
-#define ENET_CRCPOLY 0x04c11db7
-
/* switch to use multicast code lifted from sunhme driver */
#define SUNHME_MULTICAST
@@ -838,7 +834,7 @@ crc416(unsigned int curval, unsigned short nxtval)
next = next >> 1;
/* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
+ if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
}
return cur;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d8dad07f826a..e6f28c7942ab 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,6 +54,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/crc32poly.h>
#include <net/checksum.h>
#include <net/ip.h>
@@ -9722,7 +9723,7 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
if (tmp)
- reg ^= 0xedb88320;
+ reg ^= CRC32_POLY_LE;
}
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 76366c735831..2708297e7795 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2950,7 +2950,6 @@ fec_enet_close(struct net_device *ndev)
*/
#define FEC_HASH_BITS 6 /* #bits in hash */
-#define CRC32_POLY 0xEDB88320
static void set_multicast_list(struct net_device *ndev)
{
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index 7832db71dcb9..1dbee5d898b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -2,9 +2,6 @@
#ifndef FS_ENET_FEC_H
#define FS_ENET_FEC_H
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY 0x04C11DB7
-
#define FEC_MAX_MULTICAST_ADDRS 64
/* Interrupt events/masks.
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index f3e9dd47b56f..0e9719fbc624 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -30,6 +30,7 @@
#include <linux/ethtool.h>
#include <linux/cache.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -1078,7 +1079,7 @@ static void ks_stop_rx(struct ks_net *ks)
} /* ks_stop_rx */
-static unsigned long const ethernet_polynomial = 0x04c11db7U;
+static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
static unsigned long ether_gen_crc(int length, u8 *data)
{
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 458a7844260a..99d86e39ff54 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/dcbnl.h>
#include "dwc-xlgmac.h"
@@ -193,7 +194,6 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
{
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
- u32 poly = 0xedb88320;
u32 crc = ~0;
u32 temp = 0;
int i, bits;
@@ -208,7 +208,7 @@ static u32 xlgmac_vid_crc32_le(__le16 vid_le)
data_byte >>= 1;
if (temp)
- crc ^= poly;
+ crc ^= CRC32_POLY_LE;
}
return crc;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 7bc74d7d8a3a..1075eacdb441 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -40,6 +40,7 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
+#include <linux/crc32poly.h>
#include <linux/semaphore.h>
#include "osdep_service.h"
@@ -49,8 +50,6 @@
/* =====WEP related===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -135,7 +134,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j)
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
p1 = (u8 *)&crc32_table[i];
p1[0] = crc32_reverseBit(p[3]);
p1[1] = crc32_reverseBit(p[2]);
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 612277a555d2..6c8ac9e86c9f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -6,6 +6,7 @@
******************************************************************************/
#define _RTW_SECURITY_C_
+#include <linux/crc32poly.h>
#include <drv_types.h>
#include <rtw_debug.h>
@@ -87,8 +88,6 @@ const char *security_type_str(u8 value)
/* WEP related ===== */
-#define CRC32_POLY 0x04c11db7
-
struct arc4context {
u32 x;
u32 y;
@@ -178,7 +177,7 @@ static void crc32_init(void)
for (i = 0; i < 256; ++i) {
k = crc32_reverseBit((u8)i);
for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
- c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1);
}
p1 = (u8 *)&crc32_table[i];
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index 11f5e530a75f..c31fc6408383 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -137,7 +137,6 @@ static struct shash_alg alg256 = {
.base = {
.cra_name = "skein256",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_256_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
@@ -155,7 +154,6 @@ static struct shash_alg alg512 = {
.base = {
.cra_name = "skein512",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_512_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
@@ -173,7 +171,6 @@ static struct shash_alg alg1024 = {
.base = {
.cra_name = "skein1024",
.cra_driver_name = "skein",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SKEIN_1024_BLOCK_BYTES,
.cra_module = THIS_MODULE,
}
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb24d79f..7e0dad94cb2b 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
+ * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
+ * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
void *key;
void *p;
+ void *q;
void *g;
unsigned int key_size;
unsigned int p_size;
+ unsigned int q_size;
unsigned int g_size;
};
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f941102af36..3fb581bf3b87 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -122,11 +122,10 @@ struct drbg_state {
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
- __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
- __u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
+ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 880e6be9e95e..a66c127a20ed 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -22,27 +22,14 @@
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
- struct scatterlist *sg,
- int chain, int num)
+ struct scatterlist *sg, int num)
{
- if (chain) {
- head->length += sg->length;
- sg = sg_next(sg);
- }
-
if (sg)
sg_chain(head, num, sg);
else
sg_mark_end(head);
}
-static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out)
-{
- return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
- (int)(walk_in->offset - walk_out->offset));
-}
-
static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
{
unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b571dd34..8a46202b1857 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
struct sha1_state {
u32 state[SHA1_DIGEST_SIZE / 4];
u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
- u64 polytmp[2*VMAC_TAG_LEN/64];
- u64 cached_nonce[2];
- u64 cached_aes[2];
- int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
- struct crypto_cipher *child;
- struct vmac_ctx __vmac_ctx;
- u8 partial[VMAC_NHBYTES]; /* partial block */
- int partial_size; /* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/lib/crc32.c b/lib/crc32.c
index 2ef20fe84b69..a6c9afafc8c8 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -27,6 +27,7 @@
/* see: Documentation/crc32.txt for a description of algorithms */
#include <linux/crc32.h>
+#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -184,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -194,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
+ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
@@ -268,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
{
- return crc32_generic_shift(crc, len, CRCPOLY_LE);
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
}
u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
@@ -330,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
#if CRC_LE_BITS == 1
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
}
#else
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
return crc32_be_generic(crc, p, len,
- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
+ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
}
#endif
EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index cb275a28a750..0c8fb5923e7e 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -1,18 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
-#define CRCPOLY_LE 0xedb88320
-#define CRCPOLY_BE 0x04c11db7
-
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
/* Try to choose an implementation variant via Kconfig */
#ifdef CONFIG_CRC32_SLICEBY8
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 0234361b24b8..7c4932eed748 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -51,6 +51,7 @@
#endif /* STATIC */
#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
@@ -654,7 +655,7 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
- c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f26660ea10a..f755b997b967 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
+#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
@@ -57,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
static void crc32init_le(void)
{
- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
@@ -76,7 +77,7 @@ static void crc32init_be(void)
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..a5c921e6d667 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -41,7 +41,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
- int esign, msign, bsign, rsign;
+ int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
@@ -53,7 +53,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
- esign = exp->sign;
msign = mod->sign;
rp = res->d;
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 34532d14fd4c..25a5d87e2e4c 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,6 +15,7 @@
* but they are bigger and use more memory for the lookup table.
*/
+#include <linux/crc32poly.h>
#include "xz_private.h"
/*
@@ -29,7 +30,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
- const uint32_t poly = 0xEDB88320;
+ const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index e3313c45663f..6102169239d1 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -42,7 +42,7 @@ static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
sg_set_page(sg, sg_page(src),
src->length - diff, walk->offset);
- scatterwalk_crypto_chain(sg, sg_next(src), 0, 2);
+ scatterwalk_crypto_chain(sg, sg_next(src), 2);
}
static int tls_enc_record(struct aead_request *aead_req,
diff --git a/security/keys/dh.c b/security/keys/dh.c
index b203f7758f97..711e89d8c415 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -317,7 +317,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
if (ret)
goto out3;
- tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+ tfm = crypto_alloc_kpp("dh", 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out3;