summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib/memmove_64.S
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2019-12-16 22:42:54 +0100
committerBorislav Petkov <bp@suse.de>2020-01-08 11:29:25 +0100
commitf444a5ff95dce07cf4353cbb85fc3e785019d430 (patch)
tree028332dd5f3c84140bc8ec91bc705b39f0546e1d /arch/x86/lib/memmove_64.S
parentx86/context-tracking: Remove exception_enter/exit() from KVM_PV_REASON_PAGE_N... (diff)
downloadlinux-f444a5ff95dce07cf4353cbb85fc3e785019d430.tar.xz
linux-f444a5ff95dce07cf4353cbb85fc3e785019d430.zip
x86/cpufeatures: Add support for fast short REP; MOVSB
>From the Intel Optimization Reference Manual: 3.7.6.1 Fast Short REP MOVSB Beginning with processors based on Ice Lake Client microarchitecture, REP MOVSB performance of short operations is enhanced. The enhancement applies to string lengths between 1 and 128 bytes long. Support for fast-short REP MOVSB is enumerated by the CPUID feature flag: CPUID [EAX=7H, ECX=0H).EDX.FAST_SHORT_REP_MOVSB[bit 4] = 1. There is no change in the REP STOS performance. Add an X86_FEATURE_FSRM flag for this. memmove() avoids REP MOVSB for short (< 32 byte) copies. Check FSRM and use REP MOVSB for short copies on systems that support it. [ bp: Massage and add comment. ] Signed-off-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lkml.kernel.org/r/20191216214254.26492-1-tony.luck@intel.com
Diffstat (limited to 'arch/x86/lib/memmove_64.S')
-rw-r--r--arch/x86/lib/memmove_64.S7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 337830d7a59c..7ff00ea64e4f 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -29,10 +29,7 @@
SYM_FUNC_START_ALIAS(memmove)
SYM_FUNC_START(__memmove)
- /* Handle more 32 bytes in loop */
mov %rdi, %rax
- cmp $0x20, %rdx
- jb 1f
/* Decide forward/backward copy mode */
cmp %rdi, %rsi
@@ -42,7 +39,9 @@ SYM_FUNC_START(__memmove)
cmp %rdi, %r8
jg 2f
+ /* FSRM implies ERMS => no length checks, do the copy directly */
.Lmemmove_begin_forward:
+ ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
/*
@@ -114,6 +113,8 @@ SYM_FUNC_START(__memmove)
*/
.p2align 4
2:
+ cmp $0x20, %rdx
+ jb 1f
cmp $680, %rdx
jb 6f
cmp %dil, %sil