summaryrefslogtreecommitdiff
path: root/include/asm-sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-07 14:33:38 +0300
committerPaul Mundt <lethal@linux-sh.org>2006-12-12 02:42:08 +0300
commitec723fbe7e19f5a66cea183bca7ca20675631a7a (patch)
tree2a716c86a4ba9924459c9e6436a31b1acb62d449 /include/asm-sh
parenta45e724ba07c02bcf3da96ddc4efefbfe10957f5 (diff)
downloadlinux-ec723fbe7e19f5a66cea183bca7ca20675631a7a.tar.xz
sh: Split out atomic ops logically.
We have a few different ways to do the atomic operations, so split them out in to different headers rather than bloating atomic.h. Kernelspace gUSA will take this up to a third implementation. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
3 files changed, 180 insertions, 151 deletions
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 000000000000..74f7943cff6f
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH_ATOMIC_IRQ_H
+#define __ASM_SH_ATOMIC_IRQ_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v += i;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v -= i;
+ local_irq_restore(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp += i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp -= i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v &= ~mask;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v |= mask;
+ local_irq_restore(flags);
+}
+
+#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 000000000000..4b00b78e3f4f
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
+#ifndef __ASM_SH_ATOMIC_LLSC_H
+#define __ASM_SH_ATOMIC_LLSC_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+/*
+ * SH-4A note:
+ *
+ * We basically get atomic_xxx_return() for free compared with
+ * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
+ * encoding, so the retval is automatically set without having to
+ * do any special work.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add_return \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub_return \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_clear_mask \n"
+" and %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (~mask), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_set_mask \n"
+" or %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (mask), "r" (&v->counter)
+ : "t");
+}
+
+#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3cbddf..e12570b9339d 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/system.h>
-/*
- * To get proper branch prediction for the main line, we must branch
- * forward to code at the end of this object's .text section, then
- * branch back to restart the operation.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
+#include <asm/atomic-llsc.h>
#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v += i;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v -= i;
- local_irq_restore(flags);
+#include <asm/atomic-irq.h>
#endif
-}
-
-/*
- * SH-4A note:
- *
- * We basically get atomic_xxx_return() for free compared with
- * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
- * encoding, so the retval is automatically set without having to
- * do any special work.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add_return \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp += i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub_return \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp -= i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
-
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_clear_mask \n"
-" and %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (~mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v &= ~mask;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_set_mask \n"
-" or %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v |= mask;
- local_irq_restore(flags);
-#endif
-}
-
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()