summaryrefslogtreecommitdiff
path: root/include/asm-x86_64/i387.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 02:20:36 +0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-x86_64/i387.h
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-x86_64/i387.h')
-rw-r--r--include/asm-x86_64/i387.h150
1 files changed, 150 insertions, 0 deletions
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
new file mode 100644
index 000000000000..aa39cfd0e001
--- /dev/null
+++ b/include/asm-x86_64/i387.h
@@ -0,0 +1,150 @@
+/*
+ * include/asm-x86_64/i387.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef __ASM_X86_64_I387_H
+#define __ASM_X86_64_I387_H
+
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/user.h>
+#include <asm/thread_info.h>
+#include <asm/uaccess.h>
+
+extern void fpu_init(void);
+extern unsigned int mxcsr_feature_mask;
+extern void mxcsr_feature_mask_init(void);
+extern void init_fpu(struct task_struct *child);
+extern int save_i387(struct _fpstate __user *buf);
+
+/*
+ * FPU lazy state save handling...
+ */
+
+#define unlazy_fpu(tsk) do { \
+ if ((tsk)->thread_info->status & TS_USEDFPU) \
+ save_init_fpu(tsk); \
+} while (0)
+
+/* Ignore delayed exceptions from user space */
+static inline void tolerant_fwait(void)
+{
+ asm volatile("1: fwait\n"
+ "2:\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,2b\n"
+ " .previous\n");
+}
+
+#define clear_fpu(tsk) do { \
+ if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ tolerant_fwait(); \
+ (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ stts(); \
+ } \
+} while (0)
+
+/*
+ * ptrace request handers...
+ */
+extern int get_fpregs(struct user_i387_struct __user *buf,
+ struct task_struct *tsk);
+extern int set_fpregs(struct task_struct *tsk,
+ struct user_i387_struct __user *buf);
+
+/*
+ * i387 state interaction
+ */
+#define get_fpu_mxcsr(t) ((t)->thread.i387.fxsave.mxcsr)
+#define get_fpu_cwd(t) ((t)->thread.i387.fxsave.cwd)
+#define get_fpu_fxsr_twd(t) ((t)->thread.i387.fxsave.twd)
+#define get_fpu_swd(t) ((t)->thread.i387.fxsave.swd)
+#define set_fpu_cwd(t,val) ((t)->thread.i387.fxsave.cwd = (val))
+#define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val))
+#define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val))
+
+static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
+{
+ int err;
+ asm volatile("1: rex64 ; fxrstor (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,3b\n"
+ ".previous"
+ : [err] "=r" (err)
+ : [fx] "r" (fx), "0" (0));
+ if (unlikely(err))
+ init_fpu(current);
+ return err;
+}
+
+static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
+{
+ int err;
+ asm volatile("1: rex64 ; fxsave (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 1b,3b\n"
+ ".previous"
+ : [err] "=r" (err)
+ : [fx] "r" (fx), "0" (0));
+ if (unlikely(err))
+ __clear_user(fx, sizeof(struct i387_fxsave_struct));
+ return err;
+}
+
+static inline void kernel_fpu_begin(void)
+{
+ struct thread_info *me = current_thread_info();
+ preempt_disable();
+ if (me->status & TS_USEDFPU) {
+ asm volatile("rex64 ; fxsave %0 ; fnclex"
+ : "=m" (me->task->thread.i387.fxsave));
+ me->status &= ~TS_USEDFPU;
+ return;
+ }
+ clts();
+}
+
+static inline void kernel_fpu_end(void)
+{
+ stts();
+ preempt_enable();
+}
+
+static inline void save_init_fpu( struct task_struct *tsk )
+{
+ asm volatile( "rex64 ; fxsave %0 ; fnclex"
+ : "=m" (tsk->thread.i387.fxsave));
+ tsk->thread_info->status &= ~TS_USEDFPU;
+ stts();
+}
+
+/*
+ * This restores directly out of user space. Exceptions are handled.
+ */
+static inline int restore_i387(struct _fpstate __user *buf)
+{
+ return restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
+}
+
+#endif /* __ASM_X86_64_I387_H */