summaryrefslogtreecommitdiff
path: root/arch/m68k/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/atomic.h13
-rw-r--r--arch/m68k/include/asm/bitops.h531
-rw-r--r--arch/m68k/include/asm/bitops_mm.h501
-rw-r--r--arch/m68k/include/asm/bitops_no.h333
-rw-r--r--arch/m68k/include/asm/delay.h97
-rw-r--r--arch/m68k/include/asm/delay_mm.h57
-rw-r--r--arch/m68k/include/asm/delay_no.h76
-rw-r--r--arch/m68k/include/asm/entry_no.h12
-rw-r--r--arch/m68k/include/asm/hardirq.h35
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h19
-rw-r--r--arch/m68k/include/asm/irq.h11
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/module.h31
-rw-r--r--arch/m68k/include/asm/posix_types.h2
-rw-r--r--arch/m68k/include/asm/processor.h4
-rw-r--r--arch/m68k/include/asm/ptrace.h1
-rw-r--r--arch/m68k/include/asm/signal.h15
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
-rw-r--r--arch/m68k/include/asm/traps.h1
22 files changed, 879 insertions, 1417 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 03ae3d14cd4a..65c6be6c8180 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -169,21 +169,21 @@ static inline int atomic_add_negative(int i, atomic_t *v)
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
- : "id" (i));
+ : ASM_DI (i));
return c != 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
+ __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
+ __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -195,10 +195,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
@@ -206,6 +205,4 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
-#include <asm-generic/atomic64.h>
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index ce163abddaba..c6baa913592a 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -1,5 +1,530 @@
-#ifdef __uClinux__
-#include "bitops_no.h"
+#ifndef _M68K_BITOPS_H
+#define _M68K_BITOPS_H
+/*
+ * Copyright 1992, Linus Torvalds.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+
+/*
+ * Bit access functions vary across the ColdFire and 68k families.
+ * So we will break them out here, and then macro in the ones we want.
+ *
+ * ColdFire - supports standard bset/bclr/bchg with register operand only
+ * 68000 - supports standard bset/bclr/bchg with memory operand
+ * >= 68020 - also supports the bfset/bfclr/bfchg instructions
+ *
+ * Although it is possible to use only the bset/bclr/bchg with register
+ * operands on all platforms you end up with larger generated code.
+ * So we use the best form possible on a given platform.
+ */
+
+static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bset %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bset %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfset %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
+#else
+#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bset_mem_set_bit(nr, vaddr) : \
+ bfset_mem_set_bit(nr, vaddr))
+#endif
+
+#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
+
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bclr %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bclr %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfclr %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
+#else
+#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bclr_mem_clear_bit(nr, vaddr) : \
+ bfclr_mem_clear_bit(nr, vaddr))
+#endif
+
+#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
+
+
+static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bchg %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bchg %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfchg %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
+#else
+#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bchg_mem_change_bit(nr, vaddr) : \
+ bfchg_mem_change_bit(nr, vaddr))
+#endif
+
+#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
+
+
+static inline int test_bit(int nr, const unsigned long *vaddr)
+{
+ return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
+}
+
+
+static inline int bset_reg_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bset %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bset_mem_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bset %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfset_mem_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
+#else
+#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bset_mem_test_and_set_bit(nr, vaddr) : \
+ bfset_mem_test_and_set_bit(nr, vaddr))
+#endif
+
+#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
+
+
+static inline int bclr_reg_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bclr %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bclr_mem_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bclr %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfclr_mem_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
+#else
+#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bclr_mem_test_and_clear_bit(nr, vaddr) : \
+ bfclr_mem_test_and_clear_bit(nr, vaddr))
+#endif
+
+#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
+
+
+static inline int bchg_reg_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bchg %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bchg_mem_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bchg %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfchg_mem_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
+#else
+#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bchg_mem_test_and_change_bit(nr, vaddr) : \
+ bfchg_mem_test_and_change_bit(nr, vaddr))
+#endif
+
+#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
+
+
+/*
+ * The true 68020 and more advanced processors support the "bfffo"
+ * instruction for finding bits. ColdFire and simple 68000 parts
+ * (including CPU32) do not support this. They simply use the generic
+ * functions.
+ */
+#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffz.h>
+#else
+
+static inline int find_first_zero_bit(const unsigned long *vaddr,
+ unsigned size)
+{
+ const unsigned long *p = vaddr;
+ int res = 32;
+ unsigned int words;
+ unsigned long num;
+
+ if (!size)
+ return 0;
+
+ words = (size + 31) >> 5;
+ while (!(num = ~*p++)) {
+ if (!--words)
+ goto out;
+ }
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ res ^= 31;
+out:
+ res += ((long)p - (long)vaddr - 4) * 8;
+ return res < size ? res : size;
+}
+#define find_first_zero_bit find_first_zero_bit
+
+static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
+ int offset)
+{
+ const unsigned long *p = vaddr + (offset >> 5);
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ unsigned long num = ~*p++ & (~0UL << bit);
+ offset -= bit;
+
+ /* Look for zero in first longword */
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ if (res < 32) {
+ offset += res ^ 31;
+ return offset < size ? offset : size;
+ }
+ offset += 32;
+
+ if (offset >= size)
+ return size;
+ }
+ /* No zero yet, search remaining full bytes for a zero */
+ return offset + find_first_zero_bit(p, size - offset);
+}
+#define find_next_zero_bit find_next_zero_bit
+
+static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
+{
+ const unsigned long *p = vaddr;
+ int res = 32;
+ unsigned int words;
+ unsigned long num;
+
+ if (!size)
+ return 0;
+
+ words = (size + 31) >> 5;
+ while (!(num = *p++)) {
+ if (!--words)
+ goto out;
+ }
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ res ^= 31;
+out:
+ res += ((long)p - (long)vaddr - 4) * 8;
+ return res < size ? res : size;
+}
+#define find_first_bit find_first_bit
+
+static inline int find_next_bit(const unsigned long *vaddr, int size,
+ int offset)
+{
+ const unsigned long *p = vaddr + (offset >> 5);
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ unsigned long num = *p++ & (~0UL << bit);
+ offset -= bit;
+
+ /* Look for one in first longword */
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ if (res < 32) {
+ offset += res ^ 31;
+ return offset < size ? offset : size;
+ }
+ offset += 32;
+
+ if (offset >= size)
+ return size;
+ }
+ /* No one yet, search remaining full bytes for a one */
+ return offset + find_first_bit(p, size - offset);
+}
+#define find_next_bit find_next_bit
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ int res;
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (~word & -~word));
+ return res ^ 31;
+}
+
+#endif
+
+#ifdef __KERNEL__
+
+#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+
+/*
+ * The newer ColdFire family members support a "bitrev" instruction
+ * and we can use that to implement a fast ffs. Older Coldfire parts,
+ * and normal 68000 parts don't have anything special, so we use the
+ * generic functions for those.
+ */
+#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
+ !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
+static inline int __ffs(int x)
+{
+ __asm__ __volatile__ ("bitrev %0; ff1 %0"
+ : "=d" (x)
+ : "0" (x));
+ return x;
+}
+
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+ return __ffs(x) + 1;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+
#else
-#include "bitops_mm.h"
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int cnt;
+
+ __asm__ ("bfffo %1{#0:#0},%0"
+ : "=d" (cnt)
+ : "dm" (x & -x));
+ return 32 - cnt;
+}
+#define __ffs(x) (ffs(x) - 1)
+
+/*
+ * fls: find last bit set.
+ */
+static inline int fls(int x)
+{
+ int cnt;
+
+ __asm__ ("bfffo %1{#0,#0},%0"
+ : "=d" (cnt)
+ : "dm" (x));
+ return 32 - cnt;
+}
+
+static inline int __fls(int x)
+{
+ return fls(x) - 1;
+}
+
#endif
+
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#endif /* __KERNEL__ */
+
+#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
deleted file mode 100644
index 89cf5b814a4d..000000000000
--- a/arch/m68k/include/asm/bitops_mm.h
+++ /dev/null
@@ -1,501 +0,0 @@
-#ifndef _M68K_BITOPS_H
-#define _M68K_BITOPS_H
-/*
- * Copyright 1992, Linus Torvalds.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-#include <linux/compiler.h>
-
-/*
- * Require 68020 or better.
- *
- * They use the standard big-endian m680x0 bit ordering.
- */
-
-#define test_and_set_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_set_bit(nr, vaddr) : \
- __generic_test_and_set_bit(nr, vaddr))
-
-#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
-
-static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-#define set_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_set_bit(nr, vaddr) : \
- __generic_set_bit(nr, vaddr))
-
-#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
-
-static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bset %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfset %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-#define test_and_clear_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_clear_bit(nr, vaddr) : \
- __generic_test_and_clear_bit(nr, vaddr))
-
-#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
-
-static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-#define clear_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_clear_bit(nr, vaddr) : \
- __generic_clear_bit(nr, vaddr))
-#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
-
-static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bclr %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfclr %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-#define test_and_change_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_change_bit(nr, vaddr) : \
- __generic_test_and_change_bit(nr, vaddr))
-
-#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
-#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
-
-static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bchg %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-#define change_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_change_bit(nr, vaddr) : \
- __generic_change_bit(nr, vaddr))
-
-static inline void __constant_change_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bchg %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_change_bit(int nr, unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfchg %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-static inline int test_bit(int nr, const unsigned long *vaddr)
-{
- return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
-}
-
-static inline int find_first_zero_bit(const unsigned long *vaddr,
- unsigned size)
-{
- const unsigned long *p = vaddr;
- int res = 32;
- unsigned int words;
- unsigned long num;
-
- if (!size)
- return 0;
-
- words = (size + 31) >> 5;
- while (!(num = ~*p++)) {
- if (!--words)
- goto out;
- }
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- res ^= 31;
-out:
- res += ((long)p - (long)vaddr - 4) * 8;
- return res < size ? res : size;
-}
-#define find_first_zero_bit find_first_zero_bit
-
-static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
- int offset)
-{
- const unsigned long *p = vaddr + (offset >> 5);
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- if (bit) {
- unsigned long num = ~*p++ & (~0UL << bit);
- offset -= bit;
-
- /* Look for zero in first longword */
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- if (res < 32) {
- offset += res ^ 31;
- return offset < size ? offset : size;
- }
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No zero yet, search remaining full bytes for a zero */
- return offset + find_first_zero_bit(p, size - offset);
-}
-#define find_next_zero_bit find_next_zero_bit
-
-static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr;
- int res = 32;
- unsigned int words;
- unsigned long num;
-
- if (!size)
- return 0;
-
- words = (size + 31) >> 5;
- while (!(num = *p++)) {
- if (!--words)
- goto out;
- }
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- res ^= 31;
-out:
- res += ((long)p - (long)vaddr - 4) * 8;
- return res < size ? res : size;
-}
-#define find_first_bit find_first_bit
-
-static inline int find_next_bit(const unsigned long *vaddr, int size,
- int offset)
-{
- const unsigned long *p = vaddr + (offset >> 5);
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- if (bit) {
- unsigned long num = *p++ & (~0UL << bit);
- offset -= bit;
-
- /* Look for one in first longword */
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- if (res < 32) {
- offset += res ^ 31;
- return offset < size ? offset : size;
- }
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No one yet, search remaining full bytes for a one */
- return offset + find_first_bit(p, size - offset);
-}
-#define find_next_bit find_next_bit
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
- int res;
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (~word & -~word));
- return res ^ 31;
-}
-
-#ifdef __KERNEL__
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-static inline int ffs(int x)
-{
- int cnt;
-
- asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
-
- return 32 - cnt;
-}
-#define __ffs(x) (ffs(x) - 1)
-
-/*
- * fls: find last bit set.
- */
-
-static inline int fls(int x)
-{
- int cnt;
-
- asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
-
- return 32 - cnt;
-}
-
-static inline int __fls(int x)
-{
- return fls(x) - 1;
-}
-
-#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-
-/* Bitmap functions for the little endian bitmap. */
-
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(nr ^ 24, addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(nr ^ 24, addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, void *addr)
-{
- return __test_and_set_bit(nr ^ 24, addr);
-}
-
-static inline int test_and_set_bit_le(int nr, void *addr)
-{
- return test_and_set_bit(nr ^ 24, addr);
-}
-
-static inline int __test_and_clear_bit_le(int nr, void *addr)
-{
- return __test_and_clear_bit(nr ^ 24, addr);
-}
-
-static inline int test_and_clear_bit_le(int nr, void *addr)
-{
- return test_and_clear_bit(nr ^ 24, addr);
-}
-
-static inline int test_bit_le(int nr, const void *vaddr)
-{
- const unsigned char *p = vaddr;
- return (p[nr >> 3] & (1U << (nr & 7))) != 0;
-}
-
-static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr, *addr = vaddr;
- int res = 0;
- unsigned int words;
-
- if (!size)
- return 0;
-
- words = (size >> 5) + ((size & 31) > 0);
- while (*p++ == ~0UL) {
- if (--words == 0)
- goto out;
- }
-
- --p;
- for (res = 0; res < 32; res++)
- if (!test_bit_le(res, p))
- break;
-out:
- res += (p - addr) * 32;
- return res < size ? res : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- p += offset >> 5;
-
- if (bit) {
- offset -= bit;
- /* Look for zero in first longword */
- for (res = bit; res < 32; res++)
- if (!test_bit_le(res, p)) {
- offset += res;
- return offset < size ? offset : size;
- }
- p++;
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No zero yet, search remaining full bytes for a zero */
- return offset + find_first_zero_bit_le(p, size - offset);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline int find_first_bit_le(const void *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr, *addr = vaddr;
- int res = 0;
- unsigned int words;
-
- if (!size)
- return 0;
-
- words = (size >> 5) + ((size & 31) > 0);
- while (*p++ == 0UL) {
- if (--words == 0)
- goto out;
- }
-
- --p;
- for (res = 0; res < 32; res++)
- if (test_bit_le(res, p))
- break;
-out:
- res += (p - addr) * 32;
- return res < size ? res : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- p += offset >> 5;
-
- if (bit) {
- offset -= bit;
- /* Look for one in first longword */
- for (res = bit; res < 32; res++)
- if (test_bit_le(res, p)) {
- offset += res;
- return offset < size ? offset : size;
- }
- p++;
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No set bit yet, search remaining full bytes for a set bit */
- return offset + find_first_bit_le(p, size - offset);
-}
-#define find_next_bit_le find_next_bit_le
-
-/* Bitmap functions for the ext2 filesystem. */
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit_le(nr, addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit_le(nr, addr)
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
deleted file mode 100644
index 72e85acdd7bd..000000000000
--- a/arch/m68k/include/asm/bitops_no.h
+++ /dev/null
@@ -1,333 +0,0 @@
-#ifndef _M68KNOMMU_BITOPS_H
-#define _M68KNOMMU_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-#include <linux/compiler.h>
-#include <asm/byteorder.h> /* swab32 */
-
-#ifdef __KERNEL__
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-#if defined (__mcfisaaplus__) || defined (__mcfisac__)
-static inline int ffs(unsigned int val)
-{
- if (!val)
- return 0;
-
- asm volatile(
- "bitrev %0\n\t"
- "ff1 %0\n\t"
- : "=d" (val)
- : "0" (val)
- );
- val++;
- return val;
-}
-
-static inline int __ffs(unsigned int val)
-{
- asm volatile(
- "bitrev %0\n\t"
- "ff1 %0\n\t"
- : "=d" (val)
- : "0" (val)
- );
- return val;
-}
-
-#else
-#include <asm-generic/bitops/ffs.h>
-#include <asm-generic/bitops/__ffs.h>
-#endif
-
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffz.h>
-
-static __inline__ void set_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bset %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __set_bit(nr, addr) set_bit(nr, addr)
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bclr %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __clear_bit(nr, addr) clear_bit(nr, addr)
-
-static __inline__ void change_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bchg %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __change_bit(nr, addr) change_bit(nr, addr)
-
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
-
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
-
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bchg %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
-
-/*
- * This routine doesn't need to be atomic.
- */
-static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
-{
- int * a = (int *) addr;
- int mask;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- return ((mask & *a) != 0);
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)))
-
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#include <asm-generic/bitops/ext2-atomic.h>
-
-static inline int test_bit_le(int nr, const volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
- : "=d" (retval)
- : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("btst %2,%1; sne %0"
- : "=d" (retval)
- : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-
-static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease performance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-extern unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
-#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d2598e3dd7b2..9c09becfd4c9 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -1,5 +1,96 @@
-#ifdef __uClinux__
-#include "delay_no.h"
+#ifndef _M68K_DELAY_H
+#define _M68K_DELAY_H
+
+#include <asm/param.h>
+
+/*
+ * Copyright (C) 1994 Hamish Macdonald
+ * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+#if defined(CONFIG_COLDFIRE)
+/*
+ * The ColdFire runs the delay loop at significantly different speeds
+ * depending upon long word alignment or not. We'll pad it to
+ * long word alignment which is the faster version.
+ * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
+ * than using a NOP (0x4e71) instruction because it executes in one
+ * cycle not three and doesn't allow for an arbitrary delay waiting
+ * for bus cycles to finish. Also fp/a6 isn't likely to cause a
+ * stall waiting for the register to become valid if such is added
+ * to the coldfire at some stage.
+ */
+#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
#else
-#include "delay_mm.h"
+/*
+ * No instruction alignment required for other m68k types.
+ */
+#define DELAY_ALIGN
#endif
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__ (
+ DELAY_ALIGN
+ "1: subql #1,%0\n\t"
+ "jcc 1b"
+ : "=d" (loops)
+ : "0" (loops));
+}
+
+extern void __bad_udelay(void);
+
+
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+/*
+ * The simpler m68k and ColdFire processors do not have a 32*32->64
+ * multiply instruction. So we need to handle them a little differently.
+ * We use a bit of shifting and a single 32*32->32 multiply to get close.
+ * This is a macro so that the const version can factor out the first
+ * multiply and shift.
+ */
+#define HZSCALE (268435456 / (1000000 / HZ))
+
+#define __const_udelay(u) \
+ __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
+
+#else
+
+static inline void __xdelay(unsigned long xloops)
+{
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1"
+ : "=d" (xloops), "=d" (tmp)
+ : "d" (xloops), "1" (loops_per_jiffy));
+ __delay(xloops * HZ);
+}
+
+/*
+ * The definition of __const_udelay is specifically made a macro so that
+ * the const factor (4295 = 2**32 / 1000000) can be optimized out when
+ * the delay is a const.
+ */
+#define __const_udelay(n) (__xdelay((n) * 4295))
+
+#endif
+
+static inline void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs);
+}
+
+/*
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+#define udelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
+
+
+#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_mm.h b/arch/m68k/include/asm/delay_mm.h
deleted file mode 100644
index 5ed92851bc6f..000000000000
--- a/arch/m68k/include/asm/delay_mm.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _M68K_DELAY_H
-#define _M68K_DELAY_H
-
-#include <asm/param.h>
-
-/*
- * Copyright (C) 1994 Hamish Macdonald
- *
- * Delay routines, using a pre-computed "loops_per_jiffy" value.
- */
-
-static inline void __delay(unsigned long loops)
-{
- __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
- : "=d" (loops) : "0" (loops));
-}
-
-extern void __bad_udelay(void);
-
-/*
- * Use only for very small delays ( < 1 msec). Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays. This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)
- */
-static inline void __const_udelay(unsigned long xloops)
-{
- unsigned long tmp;
-
- __asm__ ("mulul %2,%0:%1"
- : "=d" (xloops), "=d" (tmp)
- : "d" (xloops), "1" (loops_per_jiffy));
- __delay(xloops * HZ);
-}
-
-static inline void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
-}
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
- __udelay(n))
-
-static inline unsigned long muldiv(unsigned long a, unsigned long b,
- unsigned long c)
-{
- unsigned long tmp;
-
- __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
- : "=d" (tmp), "=d" (a)
- : "d" (b), "d" (c), "1" (a));
- return a;
-}
-
-#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_no.h b/arch/m68k/include/asm/delay_no.h
deleted file mode 100644
index c3a0edc90f21..000000000000
--- a/arch/m68k/include/asm/delay_no.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _M68KNOMMU_DELAY_H
-#define _M68KNOMMU_DELAY_H
-
-/*
- * Copyright (C) 1994 Hamish Macdonald
- * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
- */
-
-#include <asm/param.h>
-
-static inline void __delay(unsigned long loops)
-{
-#if defined(CONFIG_COLDFIRE)
- /* The coldfire runs this loop at significantly different speeds
- * depending upon long word alignment or not. We'll pad it to
- * long word alignment which is the faster version.
- * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
- * than using a NOP (0x4e71) instruction because it executes in one
- * cycle not three and doesn't allow for an arbitrary delay waiting
- * for bus cycles to finish. Also fp/a6 isn't likely to cause a
- * stall waiting for the register to become valid if such is added
- * to the coldfire at some stage.
- */
- __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
- "1: subql #1, %0\n\t"
- "jcc 1b"
- : "=d" (loops) : "0" (loops));
-#else
- __asm__ __volatile__ ( "1: subql #1, %0\n\t"
- "jcc 1b"
- : "=d" (loops) : "0" (loops));
-#endif
-}
-
-/*
- * Ideally we use a 32*32->64 multiply to calculate the number of
- * loop iterations, but the older standard 68k and ColdFire do not
- * have this instruction. So for them we have a clsoe approximation
- * loop using 32*32->32 multiplies only. This calculation based on
- * the ARM version of delay.
- *
- * We want to implement:
- *
- * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
- */
-
-#define HZSCALE (268435456 / (1000000/HZ))
-
-extern unsigned long loops_per_jiffy;
-
-static inline void _udelay(unsigned long usecs)
-{
-#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
- defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
- defined(CONFIG_COLDFIRE)
- __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
-#else
- unsigned long tmp;
-
- usecs *= 4295; /* 2**32 / 1000000 */
- __asm__ ("mulul %2,%0:%1"
- : "=d" (usecs), "=d" (tmp)
- : "d" (usecs), "1" (loops_per_jiffy*HZ));
- __delay(usecs);
-#endif
-}
-
-/*
- * Moved the udelay() function into library code, no longer inlined.
- * I had to change the algorithm because we are overflowing now on
- * the faster ColdFire parts. The code is a little bigger, so it makes
- * sense to library it.
- */
-extern void udelay(unsigned long usecs);
-
-#endif /* defined(_M68KNOMMU_DELAY_H) */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index 627d69bacc58..68611e3dbb1d 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -96,11 +96,11 @@
.endm
.macro RDUSP
- movel sw_usp,%a2
+ movel sw_usp,%a3
.endm
.macro WRUSP
- movel %a0,sw_usp
+ movel %a3,sw_usp
.endm
#else /* !CONFIG_COLDFIRE_SW_A7 */
@@ -127,13 +127,13 @@
.endm
.macro RDUSP
- /*move %usp,%a2*/
- .word 0x4e6a
+ /*move %usp,%a3*/
+ .word 0x4e6b
.endm
.macro WRUSP
- /*move %a0,%usp*/
- .word 0x4e60
+ /*move %a3,%usp*/
+ .word 0x4e63
.endm
#endif /* !CONFIG_COLDFIRE_SW_A7 */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231c..870e5347155b 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,34 @@
-#ifdef __uClinux__
-#include "hardirq_no.h"
+#ifndef __M68K_HARDIRQ_H
+#define __M68K_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/cache.h>
+#include <asm/irq.h>
+
+#define HARDIRQ_BITS 8
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+#ifdef CONFIG_MMU
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
#else
-#include "hardirq_mm.h"
+
+#include <asm-generic/hardirq.h>
+
+#endif /* !CONFIG_MMU */
+
#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015c..000000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define HARDIRQ_BITS 8
-
-#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index b44b14be87d9..000000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS 8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 907eff1edd2f..69ed0d74d532 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -33,15 +33,6 @@
#include <linux/spinlock_types.h>
/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-/*
* Interrupt source definitions
* General interrupt sources are the level 1-7.
* Adding an interrupt service routine for one of these sources
@@ -131,4 +122,6 @@ asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
#define irq_canonicalize(irq) (irq)
#endif /* CONFIG_MMU */
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
+
#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 415d5484916c..789f3b2de0e9 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
extern void config_BSP(char *command, int len);
-extern void do_IRQ(int irq, struct pt_regs *fp);
#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/module.h b/arch/m68k/include/asm/module.h
index 5f21e11071bd..edffe66b7f49 100644
--- a/arch/m68k/include/asm/module.h
+++ b/arch/m68k/include/asm/module.h
@@ -1,46 +1,41 @@
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
-#ifdef CONFIG_MMU
+enum m68k_fixup_type {
+ m68k_fixup_memoffset,
+ m68k_fixup_vnode_shift,
+};
+
+struct m68k_fixup_info {
+ enum m68k_fixup_type type;
+ void *addr;
+};
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
+#ifdef CONFIG_MMU
+
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
-enum m68k_fixup_type {
- m68k_fixup_memoffset,
- m68k_fixup_vnode_shift,
-};
-
-struct m68k_fixup_info {
- enum m68k_fixup_type type;
- void *addr;
-};
-
#define m68k_fixup(type, addr) \
" .section \".m68k_fixup\",\"aw\"\n" \
" .long " #type "," #addr "\n" \
" .previous\n"
+#endif /* CONFIG_MMU */
+
extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
struct module;
extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end);
-#else
-
-struct mod_arch_specific {
-};
-
-#endif /* CONFIG_MMU */
-
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 63cdcc142d93..98d0970d9bad 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -51,7 +51,7 @@ typedef struct {
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#undef __FD_ISSET
-#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index f111b02b704f..d8ef53ac03f9 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -105,9 +105,6 @@ struct thread_struct {
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp)
{
- /* reads from user space */
- set_fs(USER_DS);
-
regs->pc = pc;
regs->sr &= ~0x2000;
wrusp(usp);
@@ -129,7 +126,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
#define start_thread(_regs, _pc, _usp) \
do { \
- set_fs(USER_DS); /* reads from user space */ \
(_regs)->pc = (_pc); \
((struct switch_stack *)(_regs))[-1].a6 = 0; \
reformat(_regs); \
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index 6e6e3ac1d913..65322b17b6cf 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -85,7 +85,6 @@ struct switch_stack {
#define user_mode(regs) (!((regs)->sr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c787a11..60e88660169c 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -150,7 +150,7 @@ typedef struct sigaltstack {
#ifdef __KERNEL__
#include <asm/sigcontext.h>
-#ifndef __uClinux__
+#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
#define __HAVE_ARCH_SIG_BITOPS
static inline void sigaddset(sigset_t *set, int _sig)
@@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
return word ^ 31;
}
-struct pt_regs;
-extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
+#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
-#else
-
-#undef __HAVE_ARCH_SIG_BITOPS
+#ifdef __uClinux__
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
+#else
+struct pt_regs;
+extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
#endif /* __uClinux__ */
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f5..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
-#ifdef __uClinux__
-#include "system_no.h"
+#ifndef _M68K_SYSTEM_H
+#define _M68K_SYSTEM_H
+
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <asm/segment.h>
+#include <asm/entry.h>
+
+#ifdef __KERNEL__
+
+/*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+ * also clears the TS-flag if the task we switched to has used the
+ * math co-processor latest.
+ */
+/*
+ * switch_to() saves the extra registers, that are not saved
+ * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
+ * a0-a1. Some of these are used by schedule() and its predecessors
+ * and so we might get see unexpected behaviors when a task returns
+ * with unexpected register values.
+ *
+ * syscall stores these registers itself and none of them are used
+ * by syscall after the function in the syscall has been called.
+ *
+ * Beware that resume now expects *next to be in d1 and the offset of
+ * tss to be in a1. This saves a few instructions as we no longer have
+ * to push them onto the stack and read them back right after.
+ *
+ * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
+ *
+ * Changed 96/09/19 by Andreas Schwab
+ * pass prev in a0, next in a1
+ */
+asmlinkage void resume(void);
+#define switch_to(prev,next,last) do { \
+ register void *_prev __asm__ ("a0") = (prev); \
+ register void *_next __asm__ ("a1") = (next); \
+ register void *_last __asm__ ("d1"); \
+ __asm__ __volatile__("jbsr resume" \
+ : "=a" (_prev), "=a" (_next), "=d" (_last) \
+ : "0" (_prev), "1" (_next) \
+ : "d0", "d2", "d3", "d4", "d5"); \
+ (last) = _last; \
+} while (0)
+
+
+/*
+ * Force strict CPU ordering.
+ * Not really required on m68k...
+ */
+#define nop() do { asm volatile ("nop"); barrier(); } while (0)
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define read_barrier_depends() ((void)0)
+#define set_mb(var, value) ({ (var) = (value); wmb(); })
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() ((void)0)
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+#ifndef CONFIG_RMW_INSNS
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ unsigned long flags, tmp;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ tmp = *(u8 *)ptr;
+ *(u8 *)ptr = x;
+ x = tmp;
+ break;
+ case 2:
+ tmp = *(u16 *)ptr;
+ *(u16 *)ptr = x;
+ x = tmp;
+ break;
+ case 4:
+ tmp = *(u32 *)ptr;
+ *(u32 *)ptr = x;
+ x = tmp;
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return x;
+}
#else
-#include "system_mm.h"
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__
+ ("moveb %2,%0\n\t"
+ "1:\n\t"
+ "casb %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__
+ ("movew %2,%0\n\t"
+ "1:\n\t"
+ "casw %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__
+ ("movel %2,%0\n\t"
+ "1:\n\t"
+ "casl %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ }
+ return x;
+}
#endif
+
+#include <asm-generic/cmpxchg-local.h>
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#ifdef CONFIG_RMW_INSNS
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ ("casb %0,%2,%1"
+ : "=d" (old), "=m" (*(char *)p)
+ : "d" (new), "0" (old), "m" (*(char *)p));
+ break;
+ case 2:
+ __asm__ __volatile__ ("casw %0,%2,%1"
+ : "=d" (old), "=m" (*(short *)p)
+ : "d" (new), "0" (old), "m" (*(short *)p));
+ break;
+ case 4:
+ __asm__ __volatile__ ("casl %0,%2,%1"
+ : "=d" (old), "=m" (*(int *)p)
+ : "d" (new), "0" (old), "m" (*(int *)p));
+ break;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#else
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#include <asm-generic/cmpxchg.h>
+
+#endif
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726bc..000000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef _M68K_SYSTEM_H
-#define _M68K_SYSTEM_H
-
-#include <linux/linkage.h>
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-#include <asm/segment.h>
-#include <asm/entry.h>
-
-#ifdef __KERNEL__
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing. This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1
- */
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) do { \
- register void *_prev __asm__ ("a0") = (prev); \
- register void *_next __asm__ ("a1") = (next); \
- register void *_last __asm__ ("d1"); \
- __asm__ __volatile__("jbsr resume" \
- : "=a" (_prev), "=a" (_next), "=d" (_last) \
- : "0" (_prev), "1" (_next) \
- : "d0", "d2", "d3", "d4", "d5"); \
- (last) = _last; \
-} while (0)
-
-
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
-#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define read_barrier_depends() ((void)0)
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() ((void)0)
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-#ifndef CONFIG_RMW_INSNS
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long flags, tmp;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- tmp = *(u8 *)ptr;
- *(u8 *)ptr = x;
- x = tmp;
- break;
- case 2:
- tmp = *(u16 *)ptr;
- *(u16 *)ptr = x;
- x = tmp;
- break;
- case 4:
- tmp = *(u32 *)ptr;
- *(u32 *)ptr = x;
- x = tmp;
- break;
- default:
- BUG();
- }
-
- local_irq_restore(flags);
- return x;
-}
-#else
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "1:\n\t"
- "casb %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "1:\n\t"
- "casw %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "1:\n\t"
- "casl %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- return x;
-}
-#endif
-
-#include <asm-generic/cmpxchg-local.h>
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-#ifdef CONFIG_RMW_INSNS
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__ ("casb %0,%2,%1"
- : "=d" (old), "=m" (*(char *)p)
- : "d" (new), "0" (old), "m" (*(char *)p));
- break;
- case 2:
- __asm__ __volatile__ ("casw %0,%2,%1"
- : "=d" (old), "=m" (*(short *)p)
- : "d" (new), "0" (old), "m" (*(short *)p));
- break;
- case 4:
- __asm__ __volatile__ ("casl %0,%2,%1"
- : "=d" (old), "=m" (*(int *)p)
- : "d" (new), "0" (old), "m" (*(int *)p));
- break;
- }
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#else
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-
-#include <asm-generic/cmpxchg.h>
-
-#endif
-
-#define arch_align_stack(x) (x)
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3ff..000000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
-#ifndef _M68KNOMMU_SYSTEM_H
-#define _M68KNOMMU_SYSTEM_H
-
-#include <linux/linkage.h>
-#include <linux/irqflags.h>
-#include <asm/segment.h>
-#include <asm/entry.h>
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing. This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1, offset of tss in d1, and whether
- * the mm structures are shared in d2 (to avoid atc flushing).
- */
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) \
-{ \
- void *_last; \
- __asm__ __volatile__( \
- "movel %1, %%a0\n\t" \
- "movel %2, %%a1\n\t" \
- "jbsr resume\n\t" \
- "movel %%d1, %0\n\t" \
- : "=d" (_last) \
- : "d" (prev), "d" (next) \
- : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
- (last) = _last; \
-}
-
-#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
-
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
-#define nop() asm volatile ("nop"::)
-#define mb() asm volatile ("" : : :"memory")
-#define rmb() asm volatile ("" : : :"memory")
-#define wmb() asm volatile ("" : : :"memory")
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-
-#define read_barrier_depends() ((void)0)
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-#ifndef CONFIG_RMW_INSNS
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "moveb %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "movew %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "movel %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- local_irq_restore(flags);
- return tmp;
-}
-#else
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "1:\n\t"
- "casb %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "1:\n\t"
- "casw %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "1:\n\t"
- "casl %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- return x;
-}
-#endif
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#include <asm-generic/cmpxchg.h>
-
-#define arch_align_stack(x) (x)
-
-
-#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 0bffb17d5db7..151068f64f44 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -22,7 +22,6 @@ extern e_vector vectors[];
asmlinkage void auto_inthandler(void);
asmlinkage void user_inthandler(void);
asmlinkage void bad_inthandler(void);
-extern void init_vectors(void);
#endif