summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 20:45:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-20 20:45:15 +0300
commite6023adc5c6af79ac8ac5b17939f58091fa0d870 (patch)
treea6092129571a51fca4e06b7278ff5d6cfb009b7e /arch/x86
parent4b01f5a4c9c4f0c502875c1fb31dcd5d0df86475 (diff)
parentb68b9907069a8d3a65bc16a35360bf8f8603c8fa (diff)
downloadlinux-e6023adc5c6af79ac8ac5b17939f58091fa0d870.tar.xz
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core fixes from Thomas Gleixner: - A collection of objtool fixes which address recent fallout partially exposed by newer toolchains, clang, BPF and general code changes. - Force USER_DS for user stack traces [ Note: the "objtool fixes" are not all to objtool itself, but for kernel code that triggers objtool warnings. Things like missing function size annotations, or code that confuses the unwinder etc. - Linus] * 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) objtool: Support conditional retpolines objtool: Convert insn type to enum objtool: Fix seg fault on bad switch table entry objtool: Support repeated uses of the same C jump table objtool: Refactor jump table code objtool: Refactor sibling call detection logic objtool: Do frame pointer check before dead end check objtool: Change dead_end_function() to return boolean objtool: Warn on zero-length functions objtool: Refactor function alias logic objtool: Track original function across branches objtool: Add mcsafe_handle_tail() to the uaccess safe list bpf: Disable GCC -fgcse optimization for ___bpf_prog_run() x86/uaccess: Remove redundant CLACs in getuser/putuser error paths x86/uaccess: Don't leak AC flag into fentry from mcsafe_handle_tail() x86/uaccess: Remove ELF function annotation from copy_user_handle_tail() x86/head/64: Annotate start_cpu0() as non-callable x86/entry: Fix thunk function ELF sizes x86/kvm: Don't call kvm_spurious_fault() from .fixup x86/kvm: Replace vmx_vmenter()'s call to kvm_spurious_fault() with UD2 ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/thunk_64.S5
-rw-r--r--arch/x86/include/asm/kvm_host.h34
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kvm/emulate.c44
-rw-r--r--arch/x86/kvm/vmx/vmenter.S6
-rw-r--r--arch/x86/lib/copy_user_64.S2
-rw-r--r--arch/x86/lib/getuser.S20
-rw-r--r--arch/x86/lib/putuser.S29
-rw-r--r--arch/x86/lib/usercopy_64.c2
11 files changed, 87 insertions, 61 deletions
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index cfdca8b42c70..cc20465b2867 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -12,9 +12,7 @@
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
- .globl \name
- .type \name, @function
-\name:
+ ENTRY(\name)
pushq %rbp
movq %rsp, %rbp
@@ -35,6 +33,7 @@
call \func
jmp .L_restore
+ ENDPROC(\name)
_ASM_NOKPROBE(\name)
.endm
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0cc5b611a113..8282b8d41209 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1496,25 +1496,29 @@ enum {
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+asmlinkage void __noreturn kvm_spurious_fault(void);
+
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
- * Trap the fault and ignore the instruction if that happens.
+ * Usually after catching the fault we just panic; during reboot
+ * instead the instruction is ignored.
*/
-asmlinkage void kvm_spurious_fault(void);
-
-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
- "666: " insn "\n\t" \
- "668: \n\t" \
- ".pushsection .fixup, \"ax\" \n" \
- "667: \n\t" \
- cleanup_insn "\n\t" \
- "cmpb $0, kvm_rebooting \n\t" \
- "jne 668b \n\t" \
- __ASM_SIZE(push) " $666b \n\t" \
- "jmp kvm_spurious_fault \n\t" \
- ".popsection \n\t" \
- _ASM_EXTABLE(666b, 667b)
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
+ "666: \n\t" \
+ insn "\n\t" \
+ "jmp 668f \n\t" \
+ "667: \n\t" \
+ "call kvm_spurious_fault \n\t" \
+ "668: \n\t" \
+ ".pushsection .fixup, \"ax\" \n\t" \
+ "700: \n\t" \
+ cleanup_insn "\n\t" \
+ "cmpb $0, kvm_rebooting\n\t" \
+ "je 667b \n\t" \
+ "jmp 668b \n\t" \
+ ".popsection \n\t" \
+ _ASM_EXTABLE(666b, 700b)
#define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "")
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c25c38a05c1c..d6f5ae2c79ab 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -746,6 +746,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
PV_RESTORE_ALL_CALLER_REGS \
FRAME_END \
"ret;" \
+ ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
".popsection")
/* Get a reference to a callee-save function */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index bcd206c8ac90..66b4a7757397 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -253,10 +253,10 @@ END(secondary_startup_64)
* start_secondary() via .Ljump_to_C_code.
*/
ENTRY(start_cpu0)
- movq initial_stack(%rip), %rsp
UNWIND_HINT_EMPTY
+ movq initial_stack(%rip), %rsp
jmp .Ljump_to_C_code
-ENDPROC(start_cpu0)
+END(start_cpu0)
#endif
/* Both SMP bootup and ACPI suspend change these variables */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 82caf01b63dd..6661bd2f08a6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -838,6 +838,7 @@ asm(
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne %al;"
"ret;"
+".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
".popsection");
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8e409ad448f9..718f7d9afedc 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -312,29 +312,42 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
-#define FOP_FUNC(name) \
+#define __FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
name ":\n\t"
-#define FOP_RET "ret \n\t"
+#define FOP_FUNC(name) \
+ __FOP_FUNC(#name)
+
+#define __FOP_RET(name) \
+ "ret \n\t" \
+ ".size " name ", .-" name "\n\t"
+
+#define FOP_RET(name) \
+ __FOP_RET(#name)
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
- FOP_FUNC("em_" #op)
+ ".align " __stringify(FASTOP_SIZE) " \n\t" \
+ "em_" #op ":\n\t"
#define FOP_END \
".popsection")
+#define __FOPNOP(name) \
+ __FOP_FUNC(name) \
+ __FOP_RET(name)
+
#define FOPNOP() \
- FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
- FOP_RET
+ __FOPNOP(__stringify(__UNIQUE_ID(nop)))
#define FOP1E(op, dst) \
- FOP_FUNC(#op "_" #dst) \
- "10: " #op " %" #dst " \n\t" FOP_RET
+ __FOP_FUNC(#op "_" #dst) \
+ "10: " #op " %" #dst " \n\t" \
+ __FOP_RET(#op "_" #dst)
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
@@ -366,8 +379,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END
#define FOP2E(op, dst, src) \
- FOP_FUNC(#op "_" #dst "_" #src) \
- #op " %" #src ", %" #dst " \n\t" FOP_RET
+ __FOP_FUNC(#op "_" #dst "_" #src) \
+ #op " %" #src ", %" #dst " \n\t" \
+ __FOP_RET(#op "_" #dst "_" #src)
#define FASTOP2(op) \
FOP_START(op) \
@@ -405,8 +419,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END
#define FOP3E(op, dst, src, src2) \
- FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
- #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
+ __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
+ #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
+ __FOP_RET(#op "_" #dst "_" #src "_" #src2)
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
@@ -423,7 +438,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
".type " #op ", @function \n\t" \
#op ": \n\t" \
#op " %al \n\t" \
- FOP_RET
+ __FOP_RET(#op)
asm(".pushsection .fixup, \"ax\"\n"
".global kvm_fastop_exception \n"
@@ -449,7 +464,10 @@ FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
-FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_START(salc)
+FOP_FUNC(salc)
+"pushf; sbb %al, %al; popf \n\t"
+FOP_RET(salc)
FOP_END;
/*
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index d4cb1945b2e3..4010d519eb8c 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -54,9 +54,9 @@ ENTRY(vmx_vmenter)
ret
3: cmpb $0, kvm_rebooting
- jne 4f
- call kvm_spurious_fault
-4: ret
+ je 4f
+ ret
+4: ud2
.pushsection .fixup, "ax"
5: jmp 3b
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 378a1f70ae7d..4fe1601dbc5d 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -239,7 +239,7 @@ copy_user_handle_tail:
ret
_ASM_EXTABLE_UA(1b, 2b)
-ENDPROC(copy_user_handle_tail)
+END(copy_user_handle_tail)
/*
* copy_user_nocache - Uncached memory copy with exception handling
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 74fdff968ea3..304f958c27b2 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -115,29 +115,29 @@ ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
+bad_get_user_clac:
+ ASM_CLAC
bad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
- ASM_CLAC
ret
-END(bad_get_user)
#ifdef CONFIG_X86_32
+bad_get_user_8_clac:
+ ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
- ASM_CLAC
ret
-END(bad_get_user_8)
#endif
- _ASM_EXTABLE_UA(1b, bad_get_user)
- _ASM_EXTABLE_UA(2b, bad_get_user)
- _ASM_EXTABLE_UA(3b, bad_get_user)
+ _ASM_EXTABLE_UA(1b, bad_get_user_clac)
+ _ASM_EXTABLE_UA(2b, bad_get_user_clac)
+ _ASM_EXTABLE_UA(3b, bad_get_user_clac)
#ifdef CONFIG_X86_64
- _ASM_EXTABLE_UA(4b, bad_get_user)
+ _ASM_EXTABLE_UA(4b, bad_get_user_clac)
#else
- _ASM_EXTABLE_UA(4b, bad_get_user_8)
- _ASM_EXTABLE_UA(5b, bad_get_user_8)
+ _ASM_EXTABLE_UA(4b, bad_get_user_8_clac)
+ _ASM_EXTABLE_UA(5b, bad_get_user_8_clac)
#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index d2e5c9c39601..14bf78341d3c 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -32,8 +32,6 @@
*/
#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
-#define EXIT ASM_CLAC ; \
- ret
.text
ENTRY(__put_user_1)
@@ -43,7 +41,8 @@ ENTRY(__put_user_1)
ASM_STAC
1: movb %al,(%_ASM_CX)
xor %eax,%eax
- EXIT
+ ASM_CLAC
+ ret
ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
@@ -56,7 +55,8 @@ ENTRY(__put_user_2)
ASM_STAC
2: movw %ax,(%_ASM_CX)
xor %eax,%eax
- EXIT
+ ASM_CLAC
+ ret
ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
@@ -69,7 +69,8 @@ ENTRY(__put_user_4)
ASM_STAC
3: movl %eax,(%_ASM_CX)
xor %eax,%eax
- EXIT
+ ASM_CLAC
+ ret
ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
@@ -85,19 +86,21 @@ ENTRY(__put_user_8)
5: movl %edx,4(%_ASM_CX)
#endif
xor %eax,%eax
- EXIT
+ ASM_CLAC
+ RET
ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
+bad_put_user_clac:
+ ASM_CLAC
bad_put_user:
movl $-EFAULT,%eax
- EXIT
-END(bad_put_user)
+ RET
- _ASM_EXTABLE_UA(1b, bad_put_user)
- _ASM_EXTABLE_UA(2b, bad_put_user)
- _ASM_EXTABLE_UA(3b, bad_put_user)
- _ASM_EXTABLE_UA(4b, bad_put_user)
+ _ASM_EXTABLE_UA(1b, bad_put_user_clac)
+ _ASM_EXTABLE_UA(2b, bad_put_user_clac)
+ _ASM_EXTABLE_UA(3b, bad_put_user_clac)
+ _ASM_EXTABLE_UA(4b, bad_put_user_clac)
#ifdef CONFIG_X86_32
- _ASM_EXTABLE_UA(5b, bad_put_user)
+ _ASM_EXTABLE_UA(5b, bad_put_user_clac)
#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index e0e006f1624e..fff28c6f73a2 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(clear_user);
* but reuse __memcpy_mcsafe in case a new read error is encountered.
* clac() is handled in _copy_to_iter_mcsafe().
*/
-__visible unsigned long
+__visible notrace unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len)
{
for (; len; --len, to++, from++) {