diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-17 03:52:29 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-17 03:52:29 +0400 |
commit | 2620bf06f168527e8d5159d6c21ea80e60b663fd (patch) | |
tree | 0ce69c6150ac8e5fd929cfc6cd34d2b4ad1cabc1 /arch/arm/include | |
parent | 359d16ca1bd6adcd9f031da35d807f9c37ec6a6e (diff) | |
parent | 2a2822475d0e734adffab72644329d9c042ce2e1 (diff) | |
download | linux-2620bf06f168527e8d5159d6c21ea80e60b663fd.tar.xz |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King:
"The usual collection of random fixes. Also some further fixes to the
last set of security fixes, and some more from Will (which you may
already have in a slightly different form)"
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7807/1: kexec: validate CPU hotplug support
ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
ARM: 7811/1: locks: use early clobber in arch_spin_trylock
ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event()
ARM: 7809/1: perf: fix event validation for software group leaders
ARM: Fix FIQ code on VIVT CPUs
ARM: Fix !kuser helpers case
ARM: Fix the world famous typo with is_gate_vma()
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/smp_plat.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 51 |
2 files changed, 34 insertions, 20 deletions
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) { return 1 << mpidr_hash.bits; } + +extern int platform_can_cpu_hotplug(void); + #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " subs %1, %0, %0, ror #16\n" " addeq %0, %0, %4\n" " strexeq %2, %0, [%3]" - : "=&r" (slock), "=&r" (contended), "=r" (res) + : "=&r" (slock), "=&r" (contended), "=&r" (res) : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) : "cc"); } while (res); @@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw) { - unsigned long tmp; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%1]\n" -" teq %0, #0\n" -" strexeq %0, %2, [%1]" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " teq %0, #0\n" + " strexeq %1, %3, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock), "r" (0x80000000) + : "cc"); + } while (res); - if (tmp == 0) { + if (!contended) { smp_mb(); return 1; } else { @@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2 = 1; + unsigned long contended, res; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" adds %0, %0, #1\n" -" strexpl %1, %0, [%2]\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "cc"); + do { + __asm__ __volatile__( + " ldrex %0, [%2]\n" + " mov %1, #0\n" + " adds %0, %0, #1\n" + " strexpl %1, %0, [%2]" + : "=&r" (contended), "=&r" (res) + : "r" (&rw->lock) + : "cc"); + } while (res); - smp_mb(); - return tmp2 == 0; + /* If the lock is negative, then it is already held for write. */ + if (contended < 0x80000000) { + smp_mb(); + return 1; + } else { + return 0; + } } /* read_can_lock - would read_trylock() succeed? */ |