summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 15:31:40 +0300
committerIngo Molnar <mingo@elte.hu>2008-01-30 15:31:40 +0300
commitae2e15eb3b6c2a011bee615470bf52d2beb99a4b (patch)
treeb2e323eef35a6cab5f16284378c0c96d4bc159b7 /include
parent1a53905adddf6cc6d795bd7e988c60a19773f72e (diff)
downloadlinux-ae2e15eb3b6c2a011bee615470bf52d2beb99a4b.tar.xz
x86: unify prefetch operations
This patch moves the prefetch[w]? functions to processor.h Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/processor.h30
-rw-r--r--include/asm-x86/processor_32.h25
-rw-r--r--include/asm-x86/processor_64.h8
3 files changed, 30 insertions, 33 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index c6b749a018a7..bfac9739f57e 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -596,6 +596,36 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
+#ifdef CONFIG_X86_32
+#define BASE_PREFETCH ASM_NOP4
+#define ARCH_HAS_PREFETCH
+#else
+#define BASE_PREFETCH "prefetcht0 (%1)"
+#endif
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+ because they are microcoded there and very slow.
+ However we don't do prefetches for pre XP Athlons currently
+ That should be fixed. */
+static inline void prefetch(const void *x)
+{
+ alternative_input(BASE_PREFETCH,
+ "prefetchnta (%1)",
+ X86_FEATURE_XMM,
+ "r" (x));
+}
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for
+ spinlocks to avoid one state transition in the cache coherency protocol. */
+static inline void prefetchw(const void *x)
+{
+ alternative_input(BASE_PREFETCH,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+}
+
#define spin_lock_prefetch(x) prefetchw(x)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 84a4c5e47d57..61a9cae2364b 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define ASM_NOP_MAX 8
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
- because they are microcoded there and very slow.
- However we don't do prefetches for pre XP Athlons currently
- That should be fixed. */
-static inline void prefetch(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchnta (%1)",
- X86_FEATURE_XMM,
- "r" (x));
-}
-
-#define ARCH_HAS_PREFETCH
-
-/* 3dnow! prefetch to get an exclusive cache line. Useful for
- spinlocks to avoid one state transition in the cache coherency protocol. */
-static inline void prefetchw(const void *x)
-{
- alternative_input(ASM_NOP4,
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index 45e382989b33..08b965124b97 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
#define ASM_NOP_MAX 8
-static inline void prefetchw(void *x)
-{
- alternative_input("prefetcht0 (%1)",
- "prefetchw (%1)",
- X86_FEATURE_3DNOW,
- "r" (x));
-}
-
#endif /* __ASM_X86_64_PROCESSOR_H */