summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-07-19 15:30:14 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-20 01:37:05 +0400
commit2d9ce177e68645945e3366cfe2d66ee3c28cd4f2 (patch)
treea98a3a8b0f1e92f0b8f9ecb44b67bb46c3b4451a /include
parent3e1f900bff40460d7bbab0ccd1a9efc3c70aee49 (diff)
downloadlinux-2d9ce177e68645945e3366cfe2d66ee3c28cd4f2.tar.xz
i386: Allow KVM on i386 nonpae
Currently, CONFIG_X86_CMPXCHG64 both enables boot-time checking of the cmpxchg64b feature and enables compilation of the set_64bit() family. Since the option is dependent on PAE, and since KVM depends on set_64bit(), this effectively disables KVM on i386 nopae. Simplify by removing the config option altogether: the boot check is made dependent on CONFIG_X86_PAE directly, and the set_64bit() family is exposed without constraints. It is up to users to check for the feature flag (KVM does not as virtualiation extensions imply its existence). Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/cmpxchg.h14
-rw-r--r--include/asm-i386/required-features.h2
2 files changed, 6 insertions, 10 deletions
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
index 7adcef0cd53b..64dcdf46117b 100644
--- a/include/asm-i386/cmpxchg.h
+++ b/include/asm-i386/cmpxchg.h
@@ -3,14 +3,16 @@
#include <linux/bitops.h> /* for LOCK_PREFIX */
+/*
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
+ * you need to test for the feature in boot_cpu_data.
+ */
+
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
-
-#ifdef CONFIG_X86_CMPXCHG64
-
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) )
-#endif
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
})
#endif
-#ifdef CONFIG_X86_CMPXCHG64
-
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
unsigned long long new)
{
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#endif
-
-#endif
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
index 65848a007050..618feb98f9f5 100644
--- a/include/asm-i386/required-features.h
+++ b/include/asm-i386/required-features.h
@@ -29,7 +29,7 @@
# define NEED_CMOV 0
#endif
-#ifdef CONFIG_X86_CMPXCHG64
+#ifdef CONFIG_X86_PAE
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else
# define NEED_CX8 0