summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-09-07 01:58:52 +0400
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-19 02:52:22 +0400
commit5d2bd7009f306c82afddd1ca4d9763ad8473c216 (patch)
tree772bc888c48766b892e216c19e938c82657e2b0e /arch/x86/kernel
parent304bceda6a18ae0b0240b8aac9a6bdf8ce2d2469 (diff)
downloadlinux-5d2bd7009f306c82afddd1ca4d9763ad8473c216.tar.xz
x86, fpu: decouple non-lazy/eager fpu restore from xsave
Decouple non-lazy/eager fpu restore policy from the existence of the xsave feature. Introduce a synthetic CPUID flag to represent the eagerfpu policy. "eagerfpu=on" boot paramter will enable the policy. Requested-by: H. Peter Anvin <hpa@zytor.com> Requested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Link: http://lkml.kernel.org/r/1347300665-6209-2-git-send-email-suresh.b.siddha@intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/i387.c25
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/xsave.c87
5 files changed, 67 insertions, 51 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a5fbc3c5fccc..b0fe078614d8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1297,7 +1297,6 @@ void __cpuinit cpu_init(void)
dbg_restore_debug_regs();
fpu_init();
- xsave_init();
raw_local_save_flags(kernel_eflags);
@@ -1352,6 +1351,5 @@ void __cpuinit cpu_init(void)
dbg_restore_debug_regs();
fpu_init();
- xsave_init();
}
#endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 528557470ddb..6782e3983865 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -22,9 +22,8 @@
/*
* Were we in an interrupt that interrupted kernel mode?
*
- * For now, on xsave platforms we will return interrupted
- * kernel FPU as not-idle. TBD: As we use non-lazy FPU restore
- * for xsave platforms, ideally we can change the return value
+ * For now, with eagerfpu we will return interrupted kernel FPU
+ * state as not-idle. TBD: Ideally we can change the return value
* to something like __thread_has_fpu(current). But we need to
* be careful of doing __thread_clear_has_fpu() before saving
* the FPU etc for supporting nested uses etc. For now, take
@@ -38,7 +37,7 @@
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
- if (use_xsave())
+ if (use_eager_fpu())
return 0;
return !__thread_has_fpu(current) &&
@@ -84,7 +83,7 @@ void kernel_fpu_begin(void)
__save_init_fpu(me);
__thread_clear_has_fpu(me);
/* We do 'stts()' in kernel_fpu_end() */
- } else if (!use_xsave()) {
+ } else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
clts();
}
@@ -93,7 +92,7 @@ EXPORT_SYMBOL(kernel_fpu_begin);
void kernel_fpu_end(void)
{
- if (use_xsave())
+ if (use_eager_fpu())
math_state_restore();
else
stts();
@@ -122,7 +121,6 @@ static void __cpuinit mxcsr_feature_mask_init(void)
{
unsigned long mask = 0;
- clts();
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
asm volatile("fxsave %0" : : "m" (fx_scratch));
@@ -131,7 +129,6 @@ static void __cpuinit mxcsr_feature_mask_init(void)
mask = 0x0000ffbf;
}
mxcsr_feature_mask &= mask;
- stts();
}
static void __cpuinit init_thread_xstate(void)
@@ -185,9 +182,8 @@ void __cpuinit fpu_init(void)
init_thread_xstate();
mxcsr_feature_mask_init();
- /* clean state in init */
- current_thread_info()->status = 0;
- clear_used_math();
+ xsave_init();
+ eager_fpu_init();
}
void fpu_finit(struct fpu *fpu)
@@ -198,12 +194,7 @@ void fpu_finit(struct fpu *fpu)
}
if (cpu_has_fxsr) {
- struct i387_fxsave_struct *fx = &fpu->state->fxsave;
-
- memset(fx, 0, xstate_size);
- fx->cwd = 0x37f;
- if (cpu_has_xmm)
- fx->mxcsr = MXCSR_DEFAULT;
+ fx_finit(&fpu->state->fxsave);
} else {
struct i387_fsave_struct *fp = &fpu->state->fsave;
memset(fp, 0, xstate_size);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c21e30f8923b..dc3567e083f9 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -156,7 +156,7 @@ void flush_thread(void)
* Free the FPU state for non xsave platforms. They get reallocated
* lazily at the first use.
*/
- if (!use_xsave())
+ if (!use_eager_fpu())
free_thread_xstate(tsk);
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ac7d5275f6e8..4f4aba0551b0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(math_state_restore);
dotraplinkage void __kprobes
do_device_not_available(struct pt_regs *regs, long error_code)
{
- BUG_ON(use_xsave());
+ BUG_ON(use_eager_fpu());
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index e7752bd7cac8..c0afd2c43761 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -400,7 +400,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
set_used_math();
}
- if (use_xsave())
+ if (use_eager_fpu())
math_state_restore();
return err;
@@ -450,29 +450,11 @@ static void prepare_fx_sw_frame(void)
*/
static inline void xstate_enable(void)
{
- clts();
set_in_cr4(X86_CR4_OSXSAVE);
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
}
/*
- * This is same as math_state_restore(). But use_xsave() is not yet
- * patched to use math_state_restore().
- */
-static inline void init_restore_xstate(void)
-{
- init_fpu(current);
- __thread_fpu_begin(current);
- xrstor_state(init_xstate_buf, -1);
-}
-
-static inline void xstate_enable_ap(void)
-{
- xstate_enable();
- init_restore_xstate();
-}
-
-/*
* Record the offsets and sizes of different state managed by the xsave
* memory layout.
*/
@@ -500,17 +482,20 @@ static void __init setup_xstate_features(void)
/*
* setup the xstate image representing the init state
*/
-static void __init setup_xstate_init(void)
+static void __init setup_init_fpu_buf(void)
{
- setup_xstate_features();
-
/*
* Setup init_xstate_buf to represent the init state of
* all the features managed by the xsave
*/
init_xstate_buf = alloc_bootmem_align(xstate_size,
__alignof__(struct xsave_struct));
- init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
+ fx_finit(&init_xstate_buf->i387);
+
+ if (!cpu_has_xsave)
+ return;
+
+ setup_xstate_features();
/*
* Init all the features state with header_bv being 0x0
@@ -523,6 +508,17 @@ static void __init setup_xstate_init(void)
xsave_state(init_xstate_buf, -1);
}
+static int disable_eagerfpu;
+static int __init eager_fpu_setup(char *s)
+{
+ if (!strcmp(s, "on"))
+ setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+ else if (!strcmp(s, "off"))
+ disable_eagerfpu = 1;
+ return 1;
+}
+__setup("eagerfpu=", eager_fpu_setup);
+
/*
* Enable and initialize the xsave feature.
*/
@@ -559,15 +555,10 @@ static void __init xstate_enable_boot_cpu(void)
update_regset_xstate_info(xstate_size, pcntxt_mask);
prepare_fx_sw_frame();
-
- setup_xstate_init();
+ setup_init_fpu_buf();
pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
pcntxt_mask, xstate_size);
-
- current->thread.fpu.state =
- alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct));
- init_restore_xstate();
}
/*
@@ -586,6 +577,42 @@ void __cpuinit xsave_init(void)
return;
this_func = next_func;
- next_func = xstate_enable_ap;
+ next_func = xstate_enable;
this_func();
}
+
+static inline void __init eager_fpu_init_bp(void)
+{
+ current->thread.fpu.state =
+ alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct));
+ if (!init_xstate_buf)
+ setup_init_fpu_buf();
+}
+
+void __cpuinit eager_fpu_init(void)
+{
+ static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
+
+ clear_used_math();
+ current_thread_info()->status = 0;
+ if (!cpu_has_eager_fpu) {
+ stts();
+ return;
+ }
+
+ if (boot_func) {
+ boot_func();
+ boot_func = NULL;
+ }
+
+ /*
+ * This is same as math_state_restore(). But use_xsave() is
+ * not yet patched to use math_state_restore().
+ */
+ init_fpu(current);
+ __thread_fpu_begin(current);
+ if (cpu_has_xsave)
+ xrstor_state(init_xstate_buf, -1);
+ else
+ fxrstor_checking(&init_xstate_buf->i387);
+}