diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/sync_bitops.h | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index bc249f40e0ee..f1078a5e4ed7 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h @@ -13,7 +13,7 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#define ADDR (*(volatile long *) addr) +#define ADDR (*(volatile long *)addr) /** * sync_set_bit - Atomically set a bit in memory @@ -26,12 +26,12 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_set_bit(int nr, volatile unsigned long * addr) +static inline void sync_set_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btsl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void sync_clear_bit(int nr, volatile unsigned long * addr) +static inline void sync_clear_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btrl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void sync_change_bit(int nr, volatile unsigned long * addr) +static inline void sync_change_bit(int nr, volatile unsigned long *addr) { - __asm__ __volatile__("lock; btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); + asm volatile("lock; btcl %1,%0" + : "+m" (ADDR) + : "Ir" (nr) + : "memory"); } /** @@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } @@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) +static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) { int oldbit; - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); + asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" + : "=r" (oldbit), "+m" (ADDR) + : "Ir" (nr) : "memory"); return oldbit; } |