diff options
author | Tony Luck <tony.luck@intel.com> | 2019-12-17 00:42:54 +0300 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2020-01-08 13:29:25 +0300 |
commit | f444a5ff95dce07cf4353cbb85fc3e785019d430 (patch) | |
tree | 028332dd5f3c84140bc8ec91bc705b39f0546e1d /arch/x86/lib | |
parent | 50cc02e599ef1e049473358604dc07d32b751e2c (diff) | |
download | linux-f444a5ff95dce07cf4353cbb85fc3e785019d430.tar.xz |
x86/cpufeatures: Add support for fast short REP; MOVSB
>From the Intel Optimization Reference Manual:
3.7.6.1 Fast Short REP MOVSB
Beginning with processors based on Ice Lake Client microarchitecture,
REP MOVSB performance of short operations is enhanced. The enhancement
applies to string lengths between 1 and 128 bytes long. Support for
fast-short REP MOVSB is enumerated by the CPUID feature flag: CPUID
[EAX=7H, ECX=0H).EDX.FAST_SHORT_REP_MOVSB[bit 4] = 1. There is no change
in the REP STOS performance.
Add an X86_FEATURE_FSRM flag for this.
memmove() avoids REP MOVSB for short (< 32 byte) copies. Check FSRM and
use REP MOVSB for short copies on systems that support it.
[ bp: Massage and add comment. ]
Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20191216214254.26492-1-tony.luck@intel.com
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/memmove_64.S | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 337830d7a59c..7ff00ea64e4f 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -29,10 +29,7 @@ SYM_FUNC_START_ALIAS(memmove) SYM_FUNC_START(__memmove) - /* Handle more 32 bytes in loop */ mov %rdi, %rax - cmp $0x20, %rdx - jb 1f /* Decide forward/backward copy mode */ cmp %rdi, %rsi @@ -42,7 +39,9 @@ SYM_FUNC_START(__memmove) cmp %rdi, %r8 jg 2f + /* FSRM implies ERMS => no length checks, do the copy directly */ .Lmemmove_begin_forward: + ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS /* @@ -114,6 +113,8 @@ SYM_FUNC_START(__memmove) */ .p2align 4 2: + cmp $0x20, %rdx + jb 1f cmp $680, %rdx jb 6f cmp %dil, %sil |