summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-01-20 21:05:07 +0300
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-02-16 00:39:50 +0300
commit24b44a66fa240f6fc63343623ca730d39754047e (patch)
treec271eea37b9559a06b11eb87743856be9a254663 /arch/arm/include
parenta9221de66d2d94e6e34c3f56bbdd744935020737 (diff)
downloadlinux-24b44a66fa240f6fc63343623ca730d39754047e.tar.xz
ARM: 5889/1: Add atomic64 routines for ARMv6k and above.
In preparation for perf-events support, ARM needs to support atomic64_t operations. v6k and above support the ldrexd and strexd instructions to do just that. This patch adds atomic64 support to the ARM architecture. v6k and above make use of new instructions whilst older cores fall back on the generic solution using spinlocks. If and when v7-M cores are supported by Linux, they will need to fall back on the spinlock implementation too. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h228
1 files changed, 228 insertions, 0 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234e..e8ddec2cb158 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#ifndef CONFIG_GENERIC_ATOMIC64
+typedef struct {
+ u64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline u64 atomic64_read(atomic64_t *v)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ atomic64_read\n"
+" ldrexd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&v->counter)
+ );
+
+ return result;
+}
+
+static inline void atomic64_set(atomic64_t *v, u64 i)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ atomic64_set\n"
+"1: ldrexd %0, %H0, [%1]\n"
+" strexd %0, %2, %H2, [%1]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline void atomic64_add(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ __asm__ __volatile__("@ atomic64_add\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" adds %0, %0, %3\n"
+" adc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_add_return\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" adds %0, %0, %3\n"
+" adc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline void atomic64_sub(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ __asm__ __volatile__("@ atomic64_sub\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, %3\n"
+" sbc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_sub_return\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, %3\n"
+" sbc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+{
+ u64 oldval;
+ unsigned long res;
+
+ smp_mb();
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg\n"
+ "ldrexd %1, %H1, [%2]\n"
+ "mov %0, #0\n"
+ "teq %1, %3\n"
+ "teqeq %H1, %H3\n"
+ "strexdeq %0, %4, %H4, [%2]"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (&ptr->counter), "r" (old), "r" (new)
+ : "cc");
+ } while (res);
+
+ smp_mb();
+
+ return oldval;
+}
+
+static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_xchg\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" strexd %1, %3, %H3, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&ptr->counter), "r" (new)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, #1\n"
+" sbc %H0, %H0, #0\n"
+" teq %H0, #0\n"
+" bmi 2f\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+{
+ u64 val;
+ unsigned long tmp;
+ int ret = 1;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_add_unless\n"
+"1: ldrexd %0, %H0, [%3]\n"
+" teq %0, %4\n"
+" teqeq %H0, %H4\n"
+" moveq %1, #0\n"
+" beq 2f\n"
+" adds %0, %0, %5\n"
+" adc %H0, %H0, %H5\n"
+" strexd %2, %0, %H0, [%3]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (val), "=&r" (ret), "=&r" (tmp)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (ret)
+ smp_mb();
+
+ return ret;
+}
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v) atomic64_add(1LL, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v) atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
+#else /* !CONFIG_GENERIC_ATOMIC64 */
+#include <asm-generic/atomic64.h>
+#endif
#include <asm-generic/atomic-long.h>
#endif
#endif