diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 11:02:39 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 19:41:24 +0400 |
commit | 69cde6512c3a0227878869f9ba8a02cdc72fc253 (patch) | |
tree | 6a87c90b4182497b0296c6bdd00f3a9b36570918 /include/asm-x86/local.h | |
parent | fb444c7b25420d57ce5e31cab486f734705bd278 (diff) | |
download | linux-69cde6512c3a0227878869f9ba8a02cdc72fc253.tar.xz |
include/asm-x86/local.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/local.h')
-rw-r--r-- | include/asm-x86/local.h | 105 |
1 files changed, 50 insertions, 55 deletions
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index f852c62b3319..330a72496abd 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h @@ -18,32 +18,28 @@ typedef struct { static inline void local_inc(local_t *l) { - __asm__ __volatile__( - _ASM_INC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_INC "%0" + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - __asm__ __volatile__( - _ASM_DEC "%0" - :"+m" (l->a.counter)); + asm volatile(_ASM_DEC "%0" + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - __asm__ __volatile__( - _ASM_ADD "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_ADD "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - __asm__ __volatile__( - _ASM_SUB "%1,%0" - :"+m" (l->a.counter) - :"ir" (i)); + asm volatile(_ASM_SUB "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); } /** @@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_SUB "%2,%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_SUB "%2,%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_DEC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_DEC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_INC "%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); + asm volatile(_ASM_INC "%0; sete %1" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); return c != 0; } @@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l) { unsigned char c; - __asm__ __volatile__( - _ASM_ADD "%2,%0; sets %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); + asm volatile(_ASM_ADD "%2,%0; sets %1" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); return c; } @@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l) #endif /* Modern 486+ processor */ __i = i; - __asm__ __volatile__( - _ASM_XADD "%0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); + asm volatile(_ASM_XADD "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); return i + __i; #ifdef CONFIG_M386 @@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l) #define local_add_unless(l, a, u) \ ({ \ long c, old; \ - c = local_read(l); \ + c = local_read((l)); \ for (;;) { \ if (unlikely(c == (u))) \ break; \ - old = local_cmpxchg((l), c, c + (a)); \ + old = local_cmpxchg((l), c, c + (a)); \ if (likely(old == c)) \ break; \ c = old; \ @@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) +#define cpu_local_wrap_v(l) \ +({ \ + local_t res__; \ + preempt_disable(); \ + res__ = (l); \ + preempt_enable(); \ + res__; \ +}) #define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) +({ \ + preempt_disable(); \ + (l); \ + preempt_enable(); \ +}) \ + +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) + +#define __cpu_local_inc(l) cpu_local_inc((l)) +#define __cpu_local_dec(l) cpu_local_dec((l)) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) |