diff options
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 87 | ||||
-rw-r--r-- | arch/x86/lib/insn-eval.c | 5 | ||||
-rw-r--r-- | arch/x86/lib/kaslr.c | 2 | ||||
-rw-r--r-- | arch/x86/lib/mmx_32.c | 0 | ||||
-rw-r--r-- | arch/x86/lib/putuser.S | 4 | ||||
-rw-r--r-- | arch/x86/lib/retpoline.S | 2 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 2 |
7 files changed, 62 insertions, 40 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 8ca5ecf16dc4..9dec1b38a98f 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -53,12 +53,12 @@ SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx - jb 20f /* less then 8 bytes, go to byte copy loop */ + jb .Lcopy_user_short_string_bytes ALIGN_DESTINATION movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz .L_copy_short_string + jz copy_user_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -79,37 +79,11 @@ SYM_FUNC_START(copy_user_generic_unrolled) leaq 64(%rdi),%rdi decl %ecx jnz 1b -.L_copy_short_string: - movl %edx,%ecx - andl $7,%edx - shrl $3,%ecx - jz 20f -18: movq (%rsi),%r8 -19: movq %r8,(%rdi) - leaq 8(%rsi),%rsi - leaq 8(%rdi),%rdi - decl %ecx - jnz 18b -20: andl %edx,%edx - jz 23f - movl %edx,%ecx -21: movb (%rsi),%al -22: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 21b -23: xor %eax,%eax - ASM_CLAC - RET + jmp copy_user_short_string 30: shll $6,%ecx addl %ecx,%edx - jmp 60f -40: leal (%rdx,%rcx,8),%edx - jmp 60f -50: movl %ecx,%edx -60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ + jmp .Lcopy_user_handle_tail _ASM_EXTABLE_CPY(1b, 30b) _ASM_EXTABLE_CPY(2b, 30b) @@ -127,10 +101,6 @@ SYM_FUNC_START(copy_user_generic_unrolled) _ASM_EXTABLE_CPY(14b, 30b) _ASM_EXTABLE_CPY(15b, 30b) _ASM_EXTABLE_CPY(16b, 30b) - _ASM_EXTABLE_CPY(18b, 40b) - _ASM_EXTABLE_CPY(19b, 40b) - _ASM_EXTABLE_CPY(21b, 50b) - _ASM_EXTABLE_CPY(22b, 50b) SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) @@ -191,7 +161,7 @@ EXPORT_SYMBOL(copy_user_generic_string) SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC /* CPUs without FSRM should avoid rep movsb for short copies */ - ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM + ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM movl %edx,%ecx 1: rep movsb xorl %eax,%eax @@ -244,6 +214,53 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) SYM_CODE_END(.Lcopy_user_handle_tail) /* + * Finish memcpy of less than 64 bytes. #AC should already be set. + * + * Input: + * rdi destination + * rsi source + * rdx count (< 64) + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(copy_user_short_string) + movl %edx,%ecx + andl $7,%edx + shrl $3,%ecx + jz .Lcopy_user_short_string_bytes +18: movq (%rsi),%r8 +19: movq %r8,(%rdi) + leaq 8(%rsi),%rsi + leaq 8(%rdi),%rdi + decl %ecx + jnz 18b +.Lcopy_user_short_string_bytes: + andl %edx,%edx + jz 23f + movl %edx,%ecx +21: movb (%rsi),%al +22: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 21b +23: xor %eax,%eax + ASM_CLAC + RET + +40: leal (%rdx,%rcx,8),%edx + jmp 60f +50: movl %ecx,%edx /* ecx is zerorest also */ +60: jmp .Lcopy_user_handle_tail + + _ASM_EXTABLE_CPY(18b, 40b) + _ASM_EXTABLE_CPY(19b, 40b) + _ASM_EXTABLE_CPY(21b, 50b) + _ASM_EXTABLE_CPY(22b, 50b) +SYM_CODE_END(copy_user_short_string) + +/* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. * diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index b781d324211b..21104c41cba0 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -342,9 +342,9 @@ static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff) */ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) { -#ifdef CONFIG_X86_64 unsigned short sel; +#ifdef CONFIG_X86_64 switch (seg_reg_idx) { case INAT_SEG_REG_IGNORE: return 0; @@ -402,7 +402,8 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx) case INAT_SEG_REG_FS: return (unsigned short)(regs->fs & 0xffff); case INAT_SEG_REG_GS: - return get_user_gs(regs); + savesegment(gs, sel); + return sel; case INAT_SEG_REG_IGNORE: default: return -EINVAL; diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c index 2b3eb8c948a3..a58f451a7dd3 100644 --- a/arch/x86/lib/kaslr.c +++ b/arch/x86/lib/kaslr.c @@ -11,7 +11,7 @@ #include <asm/msr.h> #include <asm/archrandom.h> #include <asm/e820/api.h> -#include <asm/io.h> +#include <asm/shared/io.h> /* * When built for the regular kernel, several functions need to be stubbed out diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/arch/x86/lib/mmx_32.c +++ /dev/null diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index ecb2049c1273..b7dfd60243b7 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) + ENDBR ASM_STAC 1: movb %al,(%_ASM_CX) xor %ecx,%ecx @@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) + ENDBR ASM_STAC 2: movw %ax,(%_ASM_CX) xor %ecx,%ecx @@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) + ENDBR ASM_STAC 3: movl %eax,(%_ASM_CX) xor %ecx,%ecx @@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8) cmp %_ASM_BX,%_ASM_CX jae .Lbad_put_user SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) + ENDBR ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5f87bab4fb8d..b2b2366885a2 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -31,6 +31,7 @@ .align RETPOLINE_THUNK_SIZE SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \ @@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) .align RETPOLINE_THUNK_SIZE SYM_CODE_START(__x86_indirect_thunk_array) - ANNOTATE_NOENDBR // apply_retpolines #define GEN(reg) THUNK reg #include <asm/GEN-for-each-reg.h> diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 0402a749f3a0..0ae6cf804197 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -119,7 +119,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size) /* cache copy and flush to align dest */ if (!IS_ALIGNED(dest, 8)) { - unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); + size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest); memcpy((void *) dest, (void *) source, len); clean_cache_range((void *) dest, len); |