diff options
author | Gerd Hoffmann <kraxel@suse.de> | 2006-03-23 13:59:32 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 18:38:04 +0300 |
commit | 9a0b5817ad97bb718ab85322759d19a238712b47 (patch) | |
tree | 39bd21eb69c4001b99096d96a76a2e5d37904108 /include/asm-i386/atomic.h | |
parent | 4d7d8c82c181711d28c8336108330a9121f5ef07 (diff) | |
download | linux-9a0b5817ad97bb718ab85322759d19a238712b47.tar.xz |
[PATCH] x86: SMP alternatives
Implement SMP alternatives, i.e. switching at runtime between different
code versions for UP and SMP. The code can patch both SMP->UP and UP->SMP.
The UP->SMP case is useful for CPU hotplug.
With CONFIG_CPU_HOTPLUG enabled the code switches to UP at boot time and
when the number of CPUs goes down to 1, and switches to SMP when the number
of CPUs goes up to 2.
Without CONFIG_CPU_HOTPLUG or on non-SMP-capable systems the code is
patched once at boot time (if needed) and the tables are released
afterwards.
The changes in detail:
* The current alternatives bits are moved to a separate file,
the SMP alternatives code is added there.
* The patch adds some new elf sections to the kernel:
.smp_altinstructions
like .altinstructions, also contains a list
of alt_instr structs.
.smp_altinstr_replacement
like .altinstr_replacement, but also has some space to
save original instruction before replaving it.
.smp_locks
list of pointers to lock prefixes which can be nop'ed
out on UP.
The first two are used to replace more complex instruction
sequences such as spinlocks and semaphores. It would be possible
to deal with the lock prefixes with that as well, but by handling
them as special case the table sizes become much smaller.
* The sections are page-aligned and padded up to page size, so they
can be free if they are not needed.
* Splitted the code to release init pages to a separate function and
use it to release the elf sections if they are unused.
Signed-off-by: Gerd Hoffmann <kraxel@suse.de>
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/atomic.h')
-rw-r--r-- | include/asm-i386/atomic.h | 28 |
1 files changed, 11 insertions, 17 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index de649d3aa2d4..78b0032d1f29 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -10,12 +10,6 @@ * resource counting etc.. */ -#ifdef CONFIG_SMP -#define LOCK "lock ; " -#else -#define LOCK "" -#endif - /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, @@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "addl %1,%0" + LOCK_PREFIX "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK "subl %1,%0" + LOCK_PREFIX "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "subl %2,%0; sete %1" + LOCK_PREFIX "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK "incl %0" + LOCK_PREFIX "incl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK "decl %0" + LOCK_PREFIX "decl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "decl %0; sete %1" + LOCK_PREFIX "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "incl %0; sete %1" + LOCK_PREFIX "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK "addl %2,%0; sets %1" + LOCK_PREFIX "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) /* Modern 486+ processor */ __i = i; __asm__ __volatile__( - LOCK "xaddl %0, %1;" + LOCK_PREFIX "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -242,11 +236,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK "andl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK "orl %0,%1" \ +__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ |