diff options
author | Dave Watson <davejwatson@fb.com> | 2018-02-14 18:40:31 +0100 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-02-22 15:16:50 +0100 |
commit | 933d6aefd54e4b49c347eb1a6cf8f8785814e90e (patch) | |
tree | d1ce8a8cd085f135cdf597d06da20fe686882ba2 /arch/x86/crypto | |
parent | crypto: aesni - Introduce partial block macro (diff) | |
download | linux-933d6aefd54e4b49c347eb1a6cf8f8785814e90e.tar.xz linux-933d6aefd54e4b49c347eb1a6cf8f8785814e90e.zip |
crypto: aesni - Add fast path for > 16 byte update
We can fast-path any < 16 byte read if the full message is > 16 bytes,
and shift over by the appropriate amount. Usually we are
reading > 16 bytes, so this should be faster than the READ_PARTIAL
macro introduced in b20209c91e2 for the average case.
Signed-off-by: Dave Watson <davejwatson@fb.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto')
-rw-r--r-- | arch/x86/crypto/aesni-intel_asm.S | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 760a5174e308..c00f32c39360 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -356,12 +356,37 @@ _zero_cipher_left_\@: ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) movdqu %xmm0, PBlockEncKey(%arg2) + cmp $16, %arg5 + jge _large_enough_update_\@ + lea (%arg4,%r11,1), %r10 mov %r13, %r12 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + jmp _data_read_\@ + +_large_enough_update_\@: + sub $16, %r11 + add %r13, %r11 + + # receive the last <16 Byte block + movdqu (%arg4, %r11, 1), %xmm1 + sub %r13, %r11 + add $16, %r11 + + lea SHIFT_MASK+16(%rip), %r12 + # adjust the shuffle mask pointer to be able to shift 16-r13 bytes + # (r13 is the number of bytes in plaintext mod 16) + sub %r13, %r12 + # get the appropriate shuffle mask + movdqu (%r12), %xmm2 + # shift right 16-r13 bytes + PSHUFB_XMM %xmm2, %xmm1 + +_data_read_\@: lea ALL_F+16(%rip), %r12 sub %r13, %r12 + .ifc \operation, dec movdqa %xmm1, %xmm2 .endif |