diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-04-12 10:42:34 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-04-12 10:42:34 +0300 |
commit | ef389b734691cdc8beb009dd402135dcdcb86a56 (patch) | |
tree | 9523a37db93cb7c7874a5f18b4d9a7014898b814 /arch/m32r/include/asm/spinlock.h | |
parent | a774635db5c430cbf21fa5d2f2df3d23aaa8e782 (diff) | |
parent | c76fc98260751e71c884dc1a18a07e427ef033b5 (diff) | |
download | linux-ef389b734691cdc8beb009dd402135dcdcb86a56.tar.xz |
Merge branch 'WIP.x86/asm' into x86/urgent, because the topic is ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/m32r/include/asm/spinlock.h')
-rw-r--r-- | arch/m32r/include/asm/spinlock.h | 308 |
1 files changed, 0 insertions, 308 deletions
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h deleted file mode 100644 index 0189f410f8f5..000000000000 --- a/arch/m32r/include/asm/spinlock.h +++ /dev/null @@ -1,308 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_M32R_SPINLOCK_H -#define _ASM_M32R_SPINLOCK_H - -/* - * linux/include/asm-m32r/spinlock.h - * - * M32R version: - * Copyright (C) 2001, 2002 Hitoshi Yamamoto - * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> - */ - -#include <linux/compiler.h> -#include <linux/atomic.h> -#include <asm/dcache_clear.h> -#include <asm/page.h> -#include <asm/barrier.h> -#include <asm/processor.h> - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - * - * (the type definitions are in asm/spinlock_types.h) - * - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - */ - -#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) - -/** - * arch_spin_trylock - Try spin lock and return a result - * @lock: Pointer to the lock variable - * - * arch_spin_trylock() tries to get the lock and returns a result. - * On the m32r, the result value is 1 (= Success) or 0 (= Failure). - */ -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - int oldval; - unsigned long tmp1, tmp2; - - /* - * lock->slock : =1 : unlock - * : <=0 : lock - * { - * oldval = lock->slock; <--+ need atomic operation - * lock->slock = 0; <--+ - * } - */ - __asm__ __volatile__ ( - "# arch_spin_trylock \n\t" - "ldi %1, #0; \n\t" - "mvfc %2, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r6", "%3") - "lock %0, @%3; \n\t" - "unlock %1, @%3; \n\t" - "mvtc %2, psw; \n\t" - : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2) - : "r" (&lock->slock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r6" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); - - return (oldval > 0); -} - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp0, tmp1; - - /* - * lock->slock : =1 : unlock - * : <=0 : lock - * - * for ( ; ; ) { - * lock->slock -= 1; <-- need atomic operation - * if (lock->slock == 0) break; - * for ( ; lock->slock <= 0 ; ); - * } - */ - __asm__ __volatile__ ( - "# arch_spin_lock \n\t" - ".fillinsn \n" - "1: \n\t" - "mvfc %1, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r6", "%2") - "lock %0, @%2; \n\t" - "addi %0, #-1; \n\t" - "unlock %0, @%2; \n\t" - "mvtc %1, psw; \n\t" - "bltz %0, 2f; \n\t" - LOCK_SECTION_START(".balign 4 \n\t") - ".fillinsn \n" - "2: \n\t" - "ld %0, @%2; \n\t" - "bgtz %0, 1b; \n\t" - "bra 2b; \n\t" - LOCK_SECTION_END - : "=&r" (tmp0), "=&r" (tmp1) - : "r" (&lock->slock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r6" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - mb(); - lock->slock = 1; -} - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * On x86, we implement read-write locks as a 32-bit counter - * with the high bit (sign) being the "contended" bit. - * - * The inline assembly is non-obvious. Think about it. - * - * Changed to use the same technique as rw semaphores. See - * semaphore.h for details. -ben - */ - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned long tmp0, tmp1; - - /* - * rw->lock : >0 : unlock - * : <=0 : lock - * - * for ( ; ; ) { - * rw->lock -= 1; <-- need atomic operation - * if (rw->lock >= 0) break; - * rw->lock += 1; <-- need atomic operation - * for ( ; rw->lock <= 0 ; ); - * } - */ - __asm__ __volatile__ ( - "# read_lock \n\t" - ".fillinsn \n" - "1: \n\t" - "mvfc %1, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r6", "%2") - "lock %0, @%2; \n\t" - "addi %0, #-1; \n\t" - "unlock %0, @%2; \n\t" - "mvtc %1, psw; \n\t" - "bltz %0, 2f; \n\t" - LOCK_SECTION_START(".balign 4 \n\t") - ".fillinsn \n" - "2: \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r6", "%2") - "lock %0, @%2; \n\t" - "addi %0, #1; \n\t" - "unlock %0, @%2; \n\t" - "mvtc %1, psw; \n\t" - ".fillinsn \n" - "3: \n\t" - "ld %0, @%2; \n\t" - "bgtz %0, 1b; \n\t" - "bra 3b; \n\t" - LOCK_SECTION_END - : "=&r" (tmp0), "=&r" (tmp1) - : "r" (&rw->lock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r6" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); -} - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned long tmp0, tmp1, tmp2; - - /* - * rw->lock : =RW_LOCK_BIAS_STR : unlock - * : !=RW_LOCK_BIAS_STR : lock - * - * for ( ; ; ) { - * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation - * if (rw->lock == 0) break; - * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation - * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ; - * } - */ - __asm__ __volatile__ ( - "# write_lock \n\t" - "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" - "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" - ".fillinsn \n" - "1: \n\t" - "mvfc %2, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r7", "%3") - "lock %0, @%3; \n\t" - "sub %0, %1; \n\t" - "unlock %0, @%3; \n\t" - "mvtc %2, psw; \n\t" - "bnez %0, 2f; \n\t" - LOCK_SECTION_START(".balign 4 \n\t") - ".fillinsn \n" - "2: \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r7", "%3") - "lock %0, @%3; \n\t" - "add %0, %1; \n\t" - "unlock %0, @%3; \n\t" - "mvtc %2, psw; \n\t" - ".fillinsn \n" - "3: \n\t" - "ld %0, @%3; \n\t" - "beq %0, %1, 1b; \n\t" - "bra 3b; \n\t" - LOCK_SECTION_END - : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r7" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp0, tmp1; - - __asm__ __volatile__ ( - "# read_unlock \n\t" - "mvfc %1, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r6", "%2") - "lock %0, @%2; \n\t" - "addi %0, #1; \n\t" - "unlock %0, @%2; \n\t" - "mvtc %1, psw; \n\t" - : "=&r" (tmp0), "=&r" (tmp1) - : "r" (&rw->lock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r6" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp0, tmp1, tmp2; - - __asm__ __volatile__ ( - "# write_unlock \n\t" - "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t" - "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t" - "mvfc %2, psw; \n\t" - "clrpsw #0x40 -> nop; \n\t" - DCACHE_CLEAR("%0", "r7", "%3") - "lock %0, @%3; \n\t" - "add %0, %1; \n\t" - "unlock %0, @%3; \n\t" - "mvtc %2, psw; \n\t" - : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r7" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - atomic_t *count = (atomic_t*)lock; - if (atomic_dec_return(count) >= 0) - return 1; - atomic_inc(count); - return 0; -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) - return 1; - atomic_add(RW_LOCK_BIAS, count); - return 0; -} - -#endif /* _ASM_M32R_SPINLOCK_H */ |