diff options
author | Andy Lutomirski <luto@kernel.org> | 2016-04-26 22:23:28 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-04-29 12:56:41 +0300 |
commit | 731e33e39a5b95ad77017811b3ced32ecf9dc666 (patch) | |
tree | 2af8619a78d1713abee9a4bcebf0fc5b453ef536 /arch/x86/kernel/process_64.c | |
parent | b038c842b385f1470f991078e71b7c5b084a7341 (diff) | |
download | linux-731e33e39a5b95ad77017811b3ced32ecf9dc666.tar.xz |
x86/arch_prctl/64: Remove FSBASE/GSBASE < 4G optimization
As far as I know, the optimization doesn't work on any modern distro
because modern distros use high addresses for ASLR. Remove it.
The ptrace code was either wrong or very strange, but the behavior
with this patch should be essentially identical to the behavior
without this patch unless user code goes out of its way to mislead
ptrace.
On newer CPUs, once the FSGSBASE instructions are enabled, we won't
want to use the optimized variant anyway.
This isn't actually much of a performance regression, it has no effect
on normal dynamically linked programs, and it's a considerably
simplification. It also removes some nasty special cases from code
that is already way too full of special cases for comfort.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/dd1599b08866961dba9d2458faa6bbd7fba471d7.1461698311.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process_64.c')
-rw-r--r-- | arch/x86/kernel/process_64.c | 71 |
1 files changed, 11 insertions, 60 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 24d1b7fb4399..864fe2cde3c6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -136,25 +136,6 @@ void release_thread(struct task_struct *dead_task) } } -static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) -{ - struct user_desc ud = { - .base_addr = addr, - .limit = 0xfffff, - .seg_32bit = 1, - .limit_in_pages = 1, - .useable = 1, - }; - struct desc_struct *desc = t->thread.tls_array; - desc += tls; - fill_ldt(desc, &ud); -} - -static inline u32 read_32bit_tls(struct task_struct *t, int tls) -{ - return get_desc_base(&t->thread.tls_array[tls]); -} - int copy_thread_tls(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p, unsigned long tls) { @@ -554,25 +535,12 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) if (addr >= TASK_SIZE_OF(task)) return -EPERM; cpu = get_cpu(); - /* handle small bases via the GDT because that's faster to - switch. */ - if (addr <= 0xffffffff) { - set_32bit_tls(task, GS_TLS, addr); - if (doit) { - load_TLS(&task->thread, cpu); - load_gs_index(GS_TLS_SEL); - } - task->thread.gsindex = GS_TLS_SEL; - task->thread.gs = 0; - } else { - task->thread.gsindex = 0; - task->thread.gs = addr; - if (doit) { - load_gs_index(0); - ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); - } + task->thread.gsindex = 0; + task->thread.gs = addr; + if (doit) { + load_gs_index(0); + ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); } - put_cpu(); break; case ARCH_SET_FS: /* Not strictly needed for fs, but do it for symmetry @@ -580,25 +548,12 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) if (addr >= TASK_SIZE_OF(task)) return -EPERM; cpu = get_cpu(); - /* handle small bases via the GDT because that's faster to - switch. */ - if (addr <= 0xffffffff) { - set_32bit_tls(task, FS_TLS, addr); - if (doit) { - load_TLS(&task->thread, cpu); - loadsegment(fs, FS_TLS_SEL); - } - task->thread.fsindex = FS_TLS_SEL; - task->thread.fs = 0; - } else { - task->thread.fsindex = 0; - task->thread.fs = addr; - if (doit) { - /* set the selector to 0 to not confuse - __switch_to */ - loadsegment(fs, 0); - ret = wrmsrl_safe(MSR_FS_BASE, addr); - } + task->thread.fsindex = 0; + task->thread.fs = addr; + if (doit) { + /* set the selector to 0 to not confuse __switch_to */ + loadsegment(fs, 0); + ret = wrmsrl_safe(MSR_FS_BASE, addr); } put_cpu(); break; @@ -606,8 +561,6 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) unsigned long base; if (doit) rdmsrl(MSR_FS_BASE, base); - else if (task->thread.fsindex == FS_TLS_SEL) - base = read_32bit_tls(task, FS_TLS); else base = task->thread.fs; ret = put_user(base, (unsigned long __user *)addr); @@ -617,8 +570,6 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) unsigned long base; if (doit) rdmsrl(MSR_KERNEL_GS_BASE, base); - else if (task->thread.gsindex == GS_TLS_SEL) - base = read_32bit_tls(task, GS_TLS); else base = task->thread.gs; ret = put_user(base, (unsigned long __user *)addr); |