diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-04-27 06:52:40 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-19 16:47:50 +0300 |
commit | c4d6ee6e2e52ec604cc1d76877791f8e8f5c79b5 (patch) | |
tree | b0287cb4c3ef60a3c2e84503eabe5edca88820e2 /arch/x86 | |
parent | 7366ed771f6ed95e4c4525c335722888a83b4b6c (diff) | |
download | linux-c4d6ee6e2e52ec604cc1d76877791f8e8f5c79b5.tar.xz |
x86/fpu: Remove failure paths from fpstate-alloc low level functions
Now that we always allocate the FPU context as part of task_struct there's
no need for separate allocations - remove them and their primary failure
handling code.
( Note that there's still secondary error codes that have become superfluous,
those will be removed in separate patches. )
Move the somewhat misplaced setup_xstate_comp() call to the core.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/fpu/internal.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/core.c | 51 | ||||
-rw-r--r-- | arch/x86/kernel/fpu/init.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 11 |
5 files changed, 3 insertions, 74 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4ce830fb3f31..9454f21f0edf 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -558,10 +558,6 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) } } -extern void fpstate_cache_init(void); - -extern int fpstate_alloc(struct fpu *fpu); -extern void fpstate_free(struct fpu *fpu); extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); static inline unsigned long diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1697a9a34ff0..6b8d3e1b6ef8 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -226,34 +226,6 @@ void fpstate_init(struct fpu *fpu) EXPORT_SYMBOL_GPL(fpstate_init); /* - * FPU state allocation: - */ -static struct kmem_cache *task_xstate_cachep; - -void fpstate_cache_init(void) -{ - task_xstate_cachep = - kmem_cache_create("task_xstate", xstate_size, - __alignof__(union thread_xstate), - SLAB_PANIC | SLAB_NOTRACK, NULL); - setup_xstate_comp(); -} - -int fpstate_alloc(struct fpu *fpu) -{ - /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */ - WARN_ON((unsigned long)&fpu->state & 15); - - return 0; -} -EXPORT_SYMBOL_GPL(fpstate_alloc); - -void fpstate_free(struct fpu *fpu) -{ -} -EXPORT_SYMBOL_GPL(fpstate_free); - -/* * Copy the current task's FPU state to a new task's FPU context. * * In the 'eager' case we just save to the destination context. @@ -280,13 +252,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) dst_fpu->fpregs_active = 0; dst_fpu->last_cpu = -1; - if (src_fpu->fpstate_active) { - int err = fpstate_alloc(dst_fpu); - - if (err) - return err; + if (src_fpu->fpstate_active) fpu_copy(dst_fpu, src_fpu); - } + return 0; } @@ -305,13 +273,6 @@ int fpstate_alloc_init(struct fpu *fpu) if (WARN_ON_ONCE(fpu->fpstate_active)) return -EINVAL; - /* - * Memory allocation at the first usage of the FPU and other state. - */ - ret = fpstate_alloc(fpu); - if (ret) - return ret; - fpstate_init(fpu); /* Safe to do for the current task: */ @@ -356,13 +317,6 @@ static int fpu__unlazy_stopped(struct fpu *child_fpu) return 0; } - /* - * Memory allocation at the first usage of the FPU and other state. - */ - ret = fpstate_alloc(child_fpu); - if (ret) - return ret; - fpstate_init(child_fpu); /* Safe to do for stopped child tasks: */ @@ -423,7 +377,6 @@ void fpu__clear(struct task_struct *tsk) if (!use_eager_fpu()) { /* FPU state will be reallocated lazily at the first use. */ drop_fpu(fpu); - fpstate_free(fpu); } else { if (!fpu->fpstate_active) { /* kthread execs. TODO: cleanup this horror. */ diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 7ae5a62918c7..460e7e2c6186 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -265,6 +265,7 @@ void fpu__init_system(struct cpuinfo_x86 *c) fpu__init_system_generic(); fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate(); + setup_xstate_comp(); fpu__init_system_ctx_switch(); } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 71f4b4d2f1fd..5d37c26fa89f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -86,16 +86,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return fpu__copy(&dst->thread.fpu, &src->thread.fpu); } -void arch_release_task_struct(struct task_struct *tsk) -{ - fpstate_free(&tsk->thread.fpu); -} - -void arch_task_cache_init(void) -{ - fpstate_cache_init(); -} - /* * Free current thread data structures etc.. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8bb0de5bf9c0..68529251e897 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7008,10 +7008,6 @@ int fx_init(struct kvm_vcpu *vcpu) { int err; - err = fpstate_alloc(&vcpu->arch.guest_fpu); - if (err) - return err; - fpstate_init(&vcpu->arch.guest_fpu); if (cpu_has_xsaves) vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = @@ -7028,11 +7024,6 @@ int fx_init(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(fx_init); -static void fx_free(struct kvm_vcpu *vcpu) -{ - fpstate_free(&vcpu->arch.guest_fpu); -} - void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) @@ -7070,7 +7061,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); - fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } @@ -7126,7 +7116,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_unload(vcpu); vcpu_put(vcpu); - fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } |