summaryrefslogtreecommitdiff
path: root/arch/x86/entry
diff options
context:
space:
mode:
authorDominik Brodowski <linux@dominikbrodowski.net>2018-03-07 00:18:08 +0300
committerIngo Molnar <mingo@kernel.org>2018-03-07 09:57:31 +0300
commitaf52201d991624d2d5adce2c123805b3d42a8d4d (patch)
tree6fcb0ec95c5ad0c7fff5dd51abb8df12c623dc53 /arch/x86/entry
parent4ddb45db30851b2269101ee8969f079b028dd257 (diff)
downloadlinux-af52201d991624d2d5adce2c123805b3d42a8d4d.tar.xz
x86/entry: Do not special-case clone(2) in compat entry
With the CPU renaming registers on its own, and all the overhead of the syscall entry/exit, it is doubtful whether the compiled output of mov %r8, %rax mov %rcx, %r8 mov %rax, %rcx jmpq sys_clone is measurably slower than the hand-crafted version of xchg %r8, %rcx So get rid of this special case. Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: luto@amacapital.net Cc: viro@zeniv.linux.org.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry')
-rw-r--r--arch/x86/entry/entry_64_compat.S12
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
2 files changed, 1 insertions, 13 deletions
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e811dd9c5e99..ff61b7bb750b 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -406,15 +406,3 @@ ENTRY(entry_INT80_compat)
TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat)
-
-ENTRY(stub32_clone)
- /*
- * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
- * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
- *
- * The native 64-bit kernel's sys_clone() implements the latter,
- * so we need to swap arguments here before calling it:
- */
- xchg %r8, %rcx
- jmp sys_clone
-ENDPROC(stub32_clone)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index e7fd0a76bf99..2a5e99cff859 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -126,7 +126,7 @@
117 i386 ipc sys_ipc compat_sys_ipc
118 i386 fsync sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
-120 i386 clone sys_clone stub32_clone
+120 i386 clone sys_clone compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname
122 i386 uname sys_newuname
123 i386 modify_ldt sys_modify_ldt