summaryrefslogtreecommitdiffstats
path: root/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
diff options
context:
space:
mode:
authorJohannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>2012-07-11 19:37:37 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2012-08-01 11:47:30 +0200
commit4d6d6a2c850f89bc9283d02519cb536baba72032 (patch)
tree8433747260d88000d79849bcd4db0e56b86aa6e4 /arch/x86/crypto/cast5-avx-x86_64-asm_64.S
parentcrypto: testmgr - add larger cast5 testvectors (diff)
downloadlinux-4d6d6a2c850f89bc9283d02519cb536baba72032.tar.xz
linux-4d6d6a2c850f89bc9283d02519cb536baba72032.zip
crypto: cast5 - add x86_64/avx assembler implementation
This patch adds a x86_64/avx assembler implementation of the Cast5 block cipher. The implementation processes sixteen blocks in parallel (four 4 block chunk AVX operations). The table-lookups are done in general-purpose registers. For small blocksizes the functions from the generic module are called. A good performance increase is provided for blocksizes greater or equal to 128B. Patch has been tested with tcrypt and automated filesystem tests. Tcrypt benchmark results: Intel Core i5-2500 CPU (fam:6, model:42, step:7) cast5-avx-x86_64 vs. cast5-generic 64bit key: size ecb-enc ecb-dec cbc-enc cbc-dec ctr-enc ctr-dec 16B 0.99x 0.99x 1.00x 1.00x 1.02x 1.01x 64B 1.00x 1.00x 0.98x 1.00x 1.01x 1.02x 256B 2.03x 2.01x 0.95x 2.11x 2.12x 2.13x 1024B 2.30x 2.24x 0.95x 2.29x 2.35x 2.35x 8192B 2.31x 2.27x 0.95x 2.31x 2.39x 2.39x 128bit key: size ecb-enc ecb-dec cbc-enc cbc-dec ctr-enc ctr-dec 16B 0.99x 0.99x 1.00x 1.00x 1.01x 1.01x 64B 1.00x 1.00x 0.98x 1.01x 1.02x 1.01x 256B 2.17x 2.13x 0.96x 2.19x 2.19x 2.19x 1024B 2.29x 2.32x 0.95x 2.34x 2.37x 2.38x 8192B 2.35x 2.32x 0.95x 2.35x 2.39x 2.39x Signed-off-by: Johannes Goetzfried <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/cast5-avx-x86_64-asm_64.S')
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S322
1 files changed, 322 insertions, 0 deletions
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
new file mode 100644
index 000000000000..94693c877e3b
--- /dev/null
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -0,0 +1,322 @@
+/*
+ * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
+ *
+ * Copyright (C) 2012 Johannes Goetzfried
+ * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ *
+ */
+
+.file "cast5-avx-x86_64-asm_64.S"
+.text
+
+.extern cast5_s1
+.extern cast5_s2
+.extern cast5_s3
+.extern cast5_s4
+
+/* structure of crypto context */
+#define km 0
+#define kr (16*4)
+#define rr ((16*4)+16)
+
+/* s-boxes */
+#define s1 cast5_s1
+#define s2 cast5_s2
+#define s3 cast5_s3
+#define s4 cast5_s4
+
+/**********************************************************************
+ 16-way AVX cast5
+ **********************************************************************/
+#define CTX %rdi
+
+#define RL1 %xmm0
+#define RR1 %xmm1
+#define RL2 %xmm2
+#define RR2 %xmm3
+#define RL3 %xmm4
+#define RR3 %xmm5
+#define RL4 %xmm6
+#define RR4 %xmm7
+
+#define RX %xmm8
+
+#define RKM %xmm9
+#define RKRF %xmm10
+#define RKRR %xmm11
+
+#define RTMP %xmm12
+#define RMASK %xmm13
+#define R32 %xmm14
+
+#define RID1 %rax
+#define RID1b %al
+#define RID2 %rbx
+#define RID2b %bl
+
+#define RGI1 %rdx
+#define RGI1bl %dl
+#define RGI1bh %dh
+#define RGI2 %rcx
+#define RGI2bl %cl
+#define RGI2bh %ch
+
+#define RFS1 %r8
+#define RFS1d %r8d
+#define RFS2 %r9
+#define RFS2d %r9d
+#define RFS3 %r10
+#define RFS3d %r10d
+
+
+#define lookup_32bit(src, dst, op1, op2, op3) \
+ movb src ## bl, RID1b; \
+ movb src ## bh, RID2b; \
+ movl s1(, RID1, 4), dst ## d; \
+ op1 s2(, RID2, 4), dst ## d; \
+ shrq $16, src; \
+ movb src ## bl, RID1b; \
+ movb src ## bh, RID2b; \
+ op2 s3(, RID1, 4), dst ## d; \
+ op3 s4(, RID2, 4), dst ## d;
+
+#define F(a, x, op0, op1, op2, op3) \
+ op0 a, RKM, x; \
+ vpslld RKRF, x, RTMP; \
+ vpsrld RKRR, x, x; \
+ vpor RTMP, x, x; \
+ \
+ vpshufb RMASK, x, x; \
+ vmovq x, RGI1; \
+ vpsrldq $8, x, x; \
+ vmovq x, RGI2; \
+ \
+ lookup_32bit(RGI1, RFS1, op1, op2, op3); \
+ shrq $16, RGI1; \
+ lookup_32bit(RGI1, RFS2, op1, op2, op3); \
+ shlq $32, RFS2; \
+ orq RFS1, RFS2; \
+ \
+ lookup_32bit(RGI2, RFS1, op1, op2, op3); \
+ shrq $16, RGI2; \
+ lookup_32bit(RGI2, RFS3, op1, op2, op3); \
+ shlq $32, RFS3; \
+ orq RFS1, RFS3; \
+ \
+ vmovq RFS2, x; \
+ vpinsrq $1, RFS3, x, x;
+
+#define F1(b, x) F(b, x, vpaddd, xorl, subl, addl)
+#define F2(b, x) F(b, x, vpxor, subl, addl, xorl)
+#define F3(b, x) F(b, x, vpsubd, addl, xorl, subl)
+
+#define subround(a, b, x, n, f) \
+ F ## f(b, x); \
+ vpxor a, x, a;
+
+#define round(l, r, n, f) \
+ vbroadcastss (km+(4*n))(CTX), RKM; \
+ vpinsrb $0, (kr+n)(CTX), RKRF, RKRF; \
+ vpsubq RKRF, R32, RKRR; \
+ subround(l ## 1, r ## 1, RX, n, f); \
+ subround(l ## 2, r ## 2, RX, n, f); \
+ subround(l ## 3, r ## 3, RX, n, f); \
+ subround(l ## 4, r ## 4, RX, n, f);
+
+
+#define transpose_2x4(x0, x1, t0, t1) \
+ vpunpckldq x1, x0, t0; \
+ vpunpckhdq x1, x0, t1; \
+ \
+ vpunpcklqdq t1, t0, x0; \
+ vpunpckhqdq t1, t0, x1;
+
+#define inpack_blocks(in, x0, x1, t0, t1) \
+ vmovdqu (0*4*4)(in), x0; \
+ vmovdqu (1*4*4)(in), x1; \
+ vpshufb RMASK, x0, x0; \
+ vpshufb RMASK, x1, x1; \
+ \
+ transpose_2x4(x0, x1, t0, t1)
+
+#define outunpack_blocks(out, x0, x1, t0, t1) \
+ transpose_2x4(x0, x1, t0, t1) \
+ \
+ vpshufb RMASK, x0, x0; \
+ vpshufb RMASK, x1, x1; \
+ vmovdqu x0, (0*4*4)(out); \
+ vmovdqu x1, (1*4*4)(out);
+
+#define outunpack_xor_blocks(out, x0, x1, t0, t1) \
+ transpose_2x4(x0, x1, t0, t1) \
+ \
+ vpshufb RMASK, x0, x0; \
+ vpshufb RMASK, x1, x1; \
+ vpxor (0*4*4)(out), x0, x0; \
+ vmovdqu x0, (0*4*4)(out); \
+ vpxor (1*4*4)(out), x1, x1; \
+ vmovdqu x1, (1*4*4)(out);
+
+.align 16
+.Lbswap_mask:
+ .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+.L32_mask:
+ .byte 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0
+
+.align 16
+.global __cast5_enc_blk_16way
+.type __cast5_enc_blk_16way,@function;
+
+__cast5_enc_blk_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: bool, if true: xor output
+ */
+
+ pushq %rbx;
+ pushq %rcx;
+
+ vmovdqu .Lbswap_mask, RMASK;
+ vmovdqu .L32_mask, R32;
+ vpxor RKRF, RKRF, RKRF;
+
+ inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
+ leaq (2*4*4)(%rdx), %rax;
+ inpack_blocks(%rax, RL2, RR2, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ inpack_blocks(%rax, RL3, RR3, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ inpack_blocks(%rax, RL4, RR4, RTMP, RX);
+
+ xorq RID1, RID1;
+ xorq RID2, RID2;
+
+ round(RL, RR, 0, 1);
+ round(RR, RL, 1, 2);
+ round(RL, RR, 2, 3);
+ round(RR, RL, 3, 1);
+ round(RL, RR, 4, 2);
+ round(RR, RL, 5, 3);
+ round(RL, RR, 6, 1);
+ round(RR, RL, 7, 2);
+ round(RL, RR, 8, 3);
+ round(RR, RL, 9, 1);
+ round(RL, RR, 10, 2);
+ round(RR, RL, 11, 3);
+
+ movb rr(CTX), %al;
+ testb %al, %al;
+ jnz __skip_enc;
+
+ round(RL, RR, 12, 1);
+ round(RR, RL, 13, 2);
+ round(RL, RR, 14, 3);
+ round(RR, RL, 15, 1);
+
+__skip_enc:
+ popq %rcx;
+ popq %rbx;
+
+ testb %cl, %cl;
+ jnz __enc_xor16;
+
+ outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
+ leaq (2*4*4)(%rsi), %rax;
+ outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_blocks(%rax, RR4, RL4, RTMP, RX);
+
+ ret;
+
+__enc_xor16:
+ outunpack_xor_blocks(%rsi, RR1, RL1, RTMP, RX);
+ leaq (2*4*4)(%rsi), %rax;
+ outunpack_xor_blocks(%rax, RR2, RL2, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_xor_blocks(%rax, RR3, RL3, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX);
+
+ ret;
+
+.align 16
+.global cast5_dec_blk_16way
+.type cast5_dec_blk_16way,@function;
+
+cast5_dec_blk_16way:
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ pushq %rbx;
+
+ vmovdqu .Lbswap_mask, RMASK;
+ vmovdqu .L32_mask, R32;
+ vpxor RKRF, RKRF, RKRF;
+
+ inpack_blocks(%rdx, RL1, RR1, RTMP, RX);
+ leaq (2*4*4)(%rdx), %rax;
+ inpack_blocks(%rax, RL2, RR2, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ inpack_blocks(%rax, RL3, RR3, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ inpack_blocks(%rax, RL4, RR4, RTMP, RX);
+
+ xorq RID1, RID1;
+ xorq RID2, RID2;
+
+ movb rr(CTX), %al;
+ testb %al, %al;
+ jnz __skip_dec;
+
+ round(RL, RR, 15, 1);
+ round(RR, RL, 14, 3);
+ round(RL, RR, 13, 2);
+ round(RR, RL, 12, 1);
+
+__skip_dec:
+ round(RL, RR, 11, 3);
+ round(RR, RL, 10, 2);
+ round(RL, RR, 9, 1);
+ round(RR, RL, 8, 3);
+ round(RL, RR, 7, 2);
+ round(RR, RL, 6, 1);
+ round(RL, RR, 5, 3);
+ round(RR, RL, 4, 2);
+ round(RL, RR, 3, 1);
+ round(RR, RL, 2, 3);
+ round(RL, RR, 1, 2);
+ round(RR, RL, 0, 1);
+
+ popq %rbx;
+
+ outunpack_blocks(%rsi, RR1, RL1, RTMP, RX);
+ leaq (2*4*4)(%rsi), %rax;
+ outunpack_blocks(%rax, RR2, RL2, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_blocks(%rax, RR3, RL3, RTMP, RX);
+ leaq (2*4*4)(%rax), %rax;
+ outunpack_blocks(%rax, RR4, RL4, RTMP, RX);
+
+ ret;