summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 22:08:46 +0300
committerThomas Gleixner <tglx@linutronix.de>2009-12-15 01:55:32 +0300
commite5931943d02bf751b1ec849c0d2ade23d76a8d41 (patch)
tree119fe3bc583d0d043d97cb9edd98bad52692a546 /arch/powerpc
parentfb3a6bbc912b12347614e5742c7c61416cdb0ca0 (diff)
downloadlinux-e5931943d02bf751b1ec849c0d2ade23d76a8d41.tar.xz
locking: Convert raw_rwlock functions to arch_rwlock
Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/spinlock.h32
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 2fad2c07c593..764094cff681 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
* read-locks.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_write_can_lock(rw) (!(rw)->lock)
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(arch_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(arch_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(arch_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(arch_rwlock_t *rw)
return tmp;
}
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_read_trylock(rw) > 0))
+ if (likely(__arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_write_trylock(rw) == 0))
+ if (likely(__arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
}
}
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock(rw) > 0;
+ return __arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock(rw) == 0;
+ return __arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) __spin_yield(lock)
#define arch_read_relax(lock) __rw_yield(lock)