diff options
Diffstat (limited to 'arch/arc/include/asm/cmpxchg.h')
-rw-r--r-- | arch/arc/include/asm/cmpxchg.h | 76 |
1 files changed, 68 insertions, 8 deletions
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index a444be67cd53..d819de1c5d10 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -44,7 +44,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } -#else +#elif !defined(CONFIG_ARC_PLAT_EZNPS) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) @@ -64,23 +64,48 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) return prev; } +#else /* CONFIG_ARC_PLAT_EZNPS */ + +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) +{ + /* + * Explicit full memory barrier needed before/after + */ + smp_mb(); + + write_aux_reg(CTOP_AUX_GPA1, expected); + + __asm__ __volatile__( + " mov r2, %0\n" + " mov r3, %1\n" + " .word %2\n" + " mov %0, r2" + : "+r"(new) + : "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3) + : "r2", "r3", "memory"); + + smp_mb(); + + return new; +} + #endif /* CONFIG_ARC_HAS_LLSC */ #define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), (unsigned long)(n))) /* - * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP) - * just to gaurantee semantics. - * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings - * which also happens to be atomic_ops_lock. - * - * Thus despite semantically being different, implementation of atomic_cmpxchg() - * is same as cmpxchg(). + * atomic_cmpxchg is same as cmpxchg + * LLSC: only different in data-type, semantics are exactly same + * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee + * semantics, and this lock also happens to be used by atomic_*() */ #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#ifndef CONFIG_ARC_PLAT_EZNPS + /* * xchg (reg with memory) based on "Native atomic" EX insn */ @@ -143,6 +168,41 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #endif +#else /* CONFIG_ARC_PLAT_EZNPS */ + +static inline unsigned long __xchg(unsigned long val, volatile void *ptr, + int size) +{ + extern unsigned long __xchg_bad_pointer(void); + + switch (size) { + case 4: + /* + * Explicit full memory barrier needed before/after + */ + smp_mb(); + + __asm__ __volatile__( + " mov r2, %0\n" + " mov r3, %1\n" + " .word %2\n" + " mov %0, r2\n" + : "+r"(val) + : "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3) + : "r2", "r3", "memory"); + + smp_mb(); + + return val; + } + return __xchg_bad_pointer(); +} + +#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \ + sizeof(*(ptr)))) + +#endif /* CONFIG_ARC_PLAT_EZNPS */ + /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() |