diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-15 23:39:15 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-19 03:05:28 +0300 |
commit | 3639a535587d7aac449cdce9710dfdc97a3c8c8e (patch) | |
tree | ec5bb3c10ed99a83143a66979a6c0af529ea6daa /arch/x86 | |
parent | d2c95f9d6802cc518d71d9795f4d9da54fb4e24d (diff) | |
download | linux-3639a535587d7aac449cdce9710dfdc97a3c8c8e.tar.xz |
x86: move stac/clac from user copy routines into callers
This is preparatory work for inlining the 'rep movs' case, but also a
cleanup. The __copy_user_nocache() function was mis-used by the rdma
code to do uncached kernel copies that don't actually want user copies
at all, and as a result doesn't want the stac/clac either.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 8 | ||||
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 10 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 6 |
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 59ea54af505e..339883729065 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -27,6 +27,7 @@ copy_user_generic(void *to, const void *from, unsigned len) { unsigned ret; + stac(); /* * If CPU has FSRM feature, use 'rep movs'. * Otherwise, use copy_user_generic_unrolled. @@ -38,6 +39,7 @@ copy_user_generic(void *to, const void *from, unsigned len) "=d" (len)), "1" (to), "2" (from), "3" (len) : "memory", "rcx", "r8", "r9", "r10", "r11"); + clac(); return ret; } @@ -64,8 +66,12 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { + long ret; kasan_check_write(dst, size); - return __copy_user_nocache(dst, src, size, 0); + stac(); + ret = __copy_user_nocache(dst, src, size, 0); + clac(); + return ret; } static inline int diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index d0283bc7567d..818f2f728294 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -51,7 +51,6 @@ * eax uncopied bytes or 0 if successful. */ SYM_FUNC_START(copy_user_generic_unrolled) - ASM_STAC cmpl $8,%edx jb .Lcopy_user_short_string_bytes ALIGN_DESTINATION @@ -123,15 +122,12 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) * eax uncopied bytes or 0 if successful. */ SYM_FUNC_START(copy_user_fast_string) - ASM_STAC movl %edx,%ecx 1: rep movsb xorl %eax,%eax - ASM_CLAC RET 12: movl %ecx,%eax /* ecx is zerorest also */ - ASM_CLAC RET _ASM_EXTABLE_CPY(1b, 12b) @@ -160,12 +156,10 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) movl %edx,%ecx 1: rep movsb 2: mov %ecx,%eax - ASM_CLAC RET 3: movl %edx,%eax - ASM_CLAC RET _ASM_EXTABLE_CPY(1b, 2b) @@ -209,7 +203,6 @@ SYM_CODE_START_LOCAL(copy_user_short_string) decl %ecx jnz 21b 23: xor %eax,%eax - ASM_CLAC RET 40: leal (%rdx,%rcx,8),%edx @@ -233,8 +226,6 @@ SYM_CODE_END(copy_user_short_string) * - Require 4-byte alignment when size is 4 bytes. */ SYM_FUNC_START(__copy_user_nocache) - ASM_STAC - /* If size is less than 8 bytes, go to 4-byte copy */ cmpl $8,%edx jb .L_4b_nocache_copy_entry @@ -327,7 +318,6 @@ SYM_FUNC_START(__copy_user_nocache) /* Finished copying; fence the prior stores */ .L_finish_copy: xorl %eax,%eax - ASM_CLAC sfence RET diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 6c1f8ac5e721..15704c605a2b 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -45,7 +45,11 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) { unsigned long flushed, dest = (unsigned long) dst; - long rc = __copy_user_nocache(dst, src, size, 0); + long rc; + + stac(); + rc = __copy_user_nocache(dst, src, size, 0); + clac(); /* * __copy_user_nocache() uses non-temporal stores for the bulk |