summaryrefslogtreecommitdiff
path: root/arch/mn10300/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 02:58:21 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 02:58:21 +0400
commit0195c00244dc2e9f522475868fa278c473ba7339 (patch)
treef97ca98ae64ede2c33ad3de05ed7bbfa4f4495ed /arch/mn10300/include
parentf21ce8f8447c8be8847dadcfdbcc76b0d7365fa5 (diff)
parent141124c02059eee9dbc5c86ea797b1ca888e77f7 (diff)
downloadlinux-0195c00244dc2e9f522475868fa278c473ba7339.tar.xz
Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system
Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
Diffstat (limited to 'arch/mn10300/include')
-rw-r--r--arch/mn10300/include/asm/atomic.h109
-rw-r--r--arch/mn10300/include/asm/barrier.h37
-rw-r--r--arch/mn10300/include/asm/cmpxchg.h115
-rw-r--r--arch/mn10300/include/asm/dma.h1
-rw-r--r--arch/mn10300/include/asm/exec.h16
-rw-r--r--arch/mn10300/include/asm/switch_to.h49
-rw-r--r--arch/mn10300/include/asm/system.h102
7 files changed, 220 insertions, 209 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index b9a8f8461262..975e1841ca64 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -12,112 +12,7 @@
#define _ASM_ATOMIC_H
#include <asm/irqflags.h>
-
-#ifndef __ASSEMBLY__
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long status;
- unsigned long oldval;
-
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " mov %5,(_ADR,%3) \n"
- " mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(oldval), "=m"(*m)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
- : "memory", "cc");
-
- return oldval;
-}
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long status;
- unsigned long oldval;
-
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " cmp %5,%1 \n"
- " bne 2f \n"
- " mov %6,(_ADR,%3) \n"
- "2: mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(oldval), "=m"(*m)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
- "r"(old), "r"(new)
- : "memory", "cc");
-
- return oldval;
-}
-#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
-#error "No SMP atomic operation support!"
-#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
-
-#else /* CONFIG_SMP */
-
-/*
- * Emulate xchg for non-SMP MN10300
- */
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long oldval;
- unsigned long flags;
-
- flags = arch_local_cli_save();
- oldval = *m;
- *m = val;
- arch_local_irq_restore(flags);
- return oldval;
-}
-
-/*
- * Emulate cmpxchg for non-SMP MN10300
- */
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long oldval;
- unsigned long flags;
-
- flags = arch_local_cli_save();
- oldval = *m;
- if (oldval == old)
- *m = new;
- arch_local_irq_restore(flags);
- return oldval;
-}
-
-#endif /* CONFIG_SMP */
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
- (unsigned long)(v)))
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-
-#endif /* !__ASSEMBLY__ */
+#include <asm/cmpxchg.h>
#ifndef CONFIG_SMP
#include <asm-generic/atomic.h>
@@ -269,6 +164,8 @@ static inline void atomic_dec(atomic_t *v)
c; \
})
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
/**
* atomic_clear_mask - Atomically clear bits in memory
diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
new file mode 100644
index 000000000000..2bd97a5c8af7
--- /dev/null
+++ b/arch/mn10300/include/asm/barrier.h
@@ -0,0 +1,37 @@
+/* MN10300 memory barrier definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_BARRIER_H
+#define _ASM_BARRIER_H
+
+#define nop() asm volatile ("nop")
+
+#define mb() asm volatile ("": : :"memory")
+#define rmb() mb()
+#define wmb() asm volatile ("": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else /* CONFIG_SMP */
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#endif /* CONFIG_SMP */
+
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define read_barrier_depends() do {} while (0)
+#define smp_read_barrier_depends() do {} while (0)
+
+#endif /* _ASM_BARRIER_H */
diff --git a/arch/mn10300/include/asm/cmpxchg.h b/arch/mn10300/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..97a4aaf387a6
--- /dev/null
+++ b/arch/mn10300/include/asm/cmpxchg.h
@@ -0,0 +1,115 @@
+/* MN10300 Atomic xchg/cmpxchg operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_CMPXCHG_H
+#define _ASM_CMPXCHG_H
+
+#include <asm/irqflags.h>
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " mov %5,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+ : "memory", "cc");
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " cmp %5,%1 \n"
+ " bne 2f \n"
+ " mov %6,(_ADR,%3) \n"
+ "2: mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+ "r"(old), "r"(new)
+ : "memory", "cc");
+
+ return oldval;
+}
+#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ *m = val;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ if (oldval == old)
+ *m = new;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
+ (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#endif /* _ASM_CMPXCHG_H */
diff --git a/arch/mn10300/include/asm/dma.h b/arch/mn10300/include/asm/dma.h
index 098df2e617ab..10b77d4628c2 100644
--- a/arch/mn10300/include/asm/dma.h
+++ b/arch/mn10300/include/asm/dma.h
@@ -11,7 +11,6 @@
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
-#include <asm/system.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <linux/delay.h>
diff --git a/arch/mn10300/include/asm/exec.h b/arch/mn10300/include/asm/exec.h
new file mode 100644
index 000000000000..c74e367f4b9d
--- /dev/null
+++ b/arch/mn10300/include/asm/exec.h
@@ -0,0 +1,16 @@
+/* MN10300 process execution definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_EXEC_H
+#define _ASM_EXEC_H
+
+#define arch_align_stack(x) (x)
+
+#endif /* _ASM_EXEC_H */
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
new file mode 100644
index 000000000000..393d311735c8
--- /dev/null
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -0,0 +1,49 @@
+/* MN10300 task switching definitions
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_SWITCH_TO_H
+#define _ASM_SWITCH_TO_H
+
+#include <asm/barrier.h>
+
+struct task_struct;
+struct thread_struct;
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next) \
+ do { \
+ if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
+ (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
+ (prev)->thread.uregs->epsw &= ~EPSW_FE; \
+ fpu_save(&(prev)->thread.fpu_state); \
+ } \
+ } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
+
+/* context switching is now performed out-of-line in switch_to.S */
+extern asmlinkage
+struct task_struct *__switch_to(struct thread_struct *prev,
+ struct thread_struct *next,
+ struct task_struct *prev_task);
+
+#define switch_to(prev, next, last) \
+do { \
+ switch_fpu(prev, next); \
+ current->thread.wchan = (u_long) __builtin_return_address(0); \
+ (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
+ mb(); \
+ current->thread.wchan = 0; \
+} while (0)
+
+#endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
deleted file mode 100644
index 94b4c5e1491b..000000000000
--- a/arch/mn10300/include/asm/system.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* MN10300 System definitions
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_SYSTEM_H
-#define _ASM_SYSTEM_H
-
-#include <asm/cpu-regs.h>
-#include <asm/intctl-regs.h>
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-#include <linux/atomic.h>
-
-#if !defined(CONFIG_LAZY_SAVE_FPU)
-struct fpu_state_struct;
-extern asmlinkage void fpu_save(struct fpu_state_struct *);
-#define switch_fpu(prev, next) \
- do { \
- if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
- (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
- (prev)->thread.uregs->epsw &= ~EPSW_FE; \
- fpu_save(&(prev)->thread.fpu_state); \
- } \
- } while (0)
-#else
-#define switch_fpu(prev, next) do {} while (0)
-#endif
-
-struct task_struct;
-struct thread_struct;
-
-extern asmlinkage
-struct task_struct *__switch_to(struct thread_struct *prev,
- struct thread_struct *next,
- struct task_struct *prev_task);
-
-/* context switching is now performed out-of-line in switch_to.S */
-#define switch_to(prev, next, last) \
-do { \
- switch_fpu(prev, next); \
- current->thread.wchan = (u_long) __builtin_return_address(0); \
- (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
- mb(); \
- current->thread.wchan = 0; \
-} while (0)
-
-#define arch_align_stack(x) (x)
-
-#define nop() asm volatile ("nop")
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-
-#define mb() asm volatile ("": : :"memory")
-#define rmb() mb()
-#define wmb() asm volatile ("": : :"memory")
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#else /* CONFIG_SMP */
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#endif /* CONFIG_SMP */
-
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define read_barrier_depends() do {} while (0)
-#define smp_read_barrier_depends() do {} while (0)
-
-#endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* _ASM_SYSTEM_H */