summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/i387.h2
-rw-r--r--arch/x86/kernel/fpu/core.c6
-rw-r--r--arch/x86/kernel/fpu/xsave.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/traps.c2
6 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index d6fc84440b73..c8ee395dd6c6 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -23,7 +23,7 @@ extern void fpstate_init(struct fpu *fpu);
extern void fpu__flush_thread(struct task_struct *tsk);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
-extern void math_state_restore(void);
+extern void fpu__restore(void);
extern bool irq_fpu_usable(void);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 7add2fb7369e..15c3cf7bd160 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -228,7 +228,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
}
/*
- * 'math_state_restore()' saves the current math information in the
+ * 'fpu__restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
*
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
@@ -237,7 +237,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
* Must be called with kernel preemption disabled (eg with local
* local interrupts as in the case of do_device_not_available).
*/
-void math_state_restore(void)
+void fpu__restore(void)
{
struct task_struct *tsk = current;
@@ -267,7 +267,7 @@ void math_state_restore(void)
}
kernel_fpu_enable();
}
-EXPORT_SYMBOL_GPL(math_state_restore);
+EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__flush_thread(struct task_struct *tsk)
{
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 163b5cc582ef..d913d5024901 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -404,7 +404,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
set_used_math();
if (use_eager_fpu()) {
preempt_disable();
- math_state_restore();
+ fpu__restore();
preempt_enable();
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 84d647d4b14d..1a0edce626b2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -295,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
- * done before math_state_restore, so the TS bit is up
+ * done before fpu__restore(), so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ae6efeccb46e..99cc4b8589ad 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before
* loading segments that might reference them, and and it must
- * be done before math_state_restore, so the TS bit is up to
+ * be done before fpu__restore(), so the TS bit is up to
* date.
*/
arch_end_context_switch(next_p);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 63c7fc3677b4..22ad90a40dbf 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -846,7 +846,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
return;
}
#endif
- math_state_restore(); /* interrupts still off */
+ fpu__restore(); /* interrupts still off */
#ifdef CONFIG_X86_32
conditional_sti(regs);
#endif