diff options
Diffstat (limited to 'arch/mn10300/include')
-rw-r--r-- | arch/mn10300/include/asm/atomic.h | 109 | ||||
-rw-r--r-- | arch/mn10300/include/asm/barrier.h | 37 | ||||
-rw-r--r-- | arch/mn10300/include/asm/cmpxchg.h | 115 | ||||
-rw-r--r-- | arch/mn10300/include/asm/dma.h | 1 | ||||
-rw-r--r-- | arch/mn10300/include/asm/exec.h | 16 | ||||
-rw-r--r-- | arch/mn10300/include/asm/switch_to.h | 49 | ||||
-rw-r--r-- | arch/mn10300/include/asm/system.h | 102 |
7 files changed, 220 insertions, 209 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index b9a8f8461262..975e1841ca64 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -12,112 +12,7 @@ #define _ASM_ATOMIC_H #include <asm/irqflags.h> - -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_SMP -#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT -static inline -unsigned long __xchg(volatile unsigned long *m, unsigned long val) -{ - unsigned long status; - unsigned long oldval; - - asm volatile( - "1: mov %4,(_AAR,%3) \n" - " mov (_ADR,%3),%1 \n" - " mov %5,(_ADR,%3) \n" - " mov (_ADR,%3),%0 \n" /* flush */ - " mov (_ASR,%3),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=&r"(oldval), "=m"(*m) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) - : "memory", "cc"); - - return oldval; -} - -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long status; - unsigned long oldval; - - asm volatile( - "1: mov %4,(_AAR,%3) \n" - " mov (_ADR,%3),%1 \n" - " cmp %5,%1 \n" - " bne 2f \n" - " mov %6,(_ADR,%3) \n" - "2: mov (_ADR,%3),%0 \n" /* flush */ - " mov (_ASR,%3),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=&r"(oldval), "=m"(*m) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), - "r"(old), "r"(new) - : "memory", "cc"); - - return oldval; -} -#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ -#error "No SMP atomic operation support!" -#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ - -#else /* CONFIG_SMP */ - -/* - * Emulate xchg for non-SMP MN10300 - */ -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) - -static inline -unsigned long __xchg(volatile unsigned long *m, unsigned long val) -{ - unsigned long oldval; - unsigned long flags; - - flags = arch_local_cli_save(); - oldval = *m; - *m = val; - arch_local_irq_restore(flags); - return oldval; -} - -/* - * Emulate cmpxchg for non-SMP MN10300 - */ -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long oldval; - unsigned long flags; - - flags = arch_local_cli_save(); - oldval = *m; - if (oldval == old) - *m = new; - arch_local_irq_restore(flags); - return oldval; -} - -#endif /* CONFIG_SMP */ - -#define xchg(ptr, v) \ - ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ - (unsigned long)(v))) - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ - (unsigned long)(o), \ - (unsigned long)(n))) - -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) - -#endif /* !__ASSEMBLY__ */ +#include <asm/cmpxchg.h> #ifndef CONFIG_SMP #include <asm-generic/atomic.h> @@ -269,6 +164,8 @@ static inline void atomic_dec(atomic_t *v) c; \ }) +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) /** * atomic_clear_mask - Atomically clear bits in memory diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h new file mode 100644 index 000000000000..2bd97a5c8af7 --- /dev/null +++ b/arch/mn10300/include/asm/barrier.h @@ -0,0 +1,37 @@ +/* MN10300 memory barrier definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_BARRIER_H +#define _ASM_BARRIER_H + +#define nop() asm volatile ("nop") + +#define mb() asm volatile ("": : :"memory") +#define rmb() mb() +#define wmb() asm volatile ("": : :"memory") + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define set_mb(var, value) do { xchg(&var, value); } while (0) +#else /* CONFIG_SMP */ +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#endif /* CONFIG_SMP */ + +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define read_barrier_depends() do {} while (0) +#define smp_read_barrier_depends() do {} while (0) + +#endif /* _ASM_BARRIER_H */ diff --git a/arch/mn10300/include/asm/cmpxchg.h b/arch/mn10300/include/asm/cmpxchg.h new file mode 100644 index 000000000000..97a4aaf387a6 --- /dev/null +++ b/arch/mn10300/include/asm/cmpxchg.h @@ -0,0 +1,115 @@ +/* MN10300 Atomic xchg/cmpxchg operations + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_CMPXCHG_H +#define _ASM_CMPXCHG_H + +#include <asm/irqflags.h> + +#ifdef CONFIG_SMP +#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " mov %5,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) + : "memory", "cc"); + + return oldval; +} + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " cmp %5,%1 \n" + " bne 2f \n" + " mov %6,(_ADR,%3) \n" + "2: mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), + "r"(old), "r"(new) + : "memory", "cc"); + + return oldval; +} +#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ +#error "No SMP atomic operation support!" +#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ + +#else /* CONFIG_SMP */ + +/* + * Emulate xchg for non-SMP MN10300 + */ +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + *m = val; + arch_local_irq_restore(flags); + return oldval; +} + +/* + * Emulate cmpxchg for non-SMP MN10300 + */ +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + if (oldval == old) + *m = new; + arch_local_irq_restore(flags); + return oldval; +} + +#endif /* CONFIG_SMP */ + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#endif /* _ASM_CMPXCHG_H */ diff --git a/arch/mn10300/include/asm/dma.h b/arch/mn10300/include/asm/dma.h index 098df2e617ab..10b77d4628c2 100644 --- a/arch/mn10300/include/asm/dma.h +++ b/arch/mn10300/include/asm/dma.h @@ -11,7 +11,6 @@ #ifndef _ASM_DMA_H #define _ASM_DMA_H -#include <asm/system.h> #include <linux/spinlock.h> #include <asm/io.h> #include <linux/delay.h> diff --git a/arch/mn10300/include/asm/exec.h b/arch/mn10300/include/asm/exec.h new file mode 100644 index 000000000000..c74e367f4b9d --- /dev/null +++ b/arch/mn10300/include/asm/exec.h @@ -0,0 +1,16 @@ +/* MN10300 process execution definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_EXEC_H */ diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h new file mode 100644 index 000000000000..393d311735c8 --- /dev/null +++ b/arch/mn10300/include/asm/switch_to.h @@ -0,0 +1,49 @@ +/* MN10300 task switching definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <asm/barrier.h> + +struct task_struct; +struct thread_struct; + +#if !defined(CONFIG_LAZY_SAVE_FPU) +struct fpu_state_struct; +extern asmlinkage void fpu_save(struct fpu_state_struct *); +#define switch_fpu(prev, next) \ + do { \ + if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ + (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ + (prev)->thread.uregs->epsw &= ~EPSW_FE; \ + fpu_save(&(prev)->thread.fpu_state); \ + } \ + } while (0) +#else +#define switch_fpu(prev, next) do {} while (0) +#endif + +/* context switching is now performed out-of-line in switch_to.S */ +extern asmlinkage +struct task_struct *__switch_to(struct thread_struct *prev, + struct thread_struct *next, + struct task_struct *prev_task); + +#define switch_to(prev, next, last) \ +do { \ + switch_fpu(prev, next); \ + current->thread.wchan = (u_long) __builtin_return_address(0); \ + (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ + mb(); \ + current->thread.wchan = 0; \ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h deleted file mode 100644 index 94b4c5e1491b..000000000000 --- a/arch/mn10300/include/asm/system.h +++ /dev/null @@ -1,102 +0,0 @@ -/* MN10300 System definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H - -#include <asm/cpu-regs.h> -#include <asm/intctl-regs.h> - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <linux/kernel.h> -#include <linux/irqflags.h> -#include <linux/atomic.h> - -#if !defined(CONFIG_LAZY_SAVE_FPU) -struct fpu_state_struct; -extern asmlinkage void fpu_save(struct fpu_state_struct *); -#define switch_fpu(prev, next) \ - do { \ - if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ - (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ - (prev)->thread.uregs->epsw &= ~EPSW_FE; \ - fpu_save(&(prev)->thread.fpu_state); \ - } \ - } while (0) -#else -#define switch_fpu(prev, next) do {} while (0) -#endif - -struct task_struct; -struct thread_struct; - -extern asmlinkage -struct task_struct *__switch_to(struct thread_struct *prev, - struct thread_struct *next, - struct task_struct *prev_task); - -/* context switching is now performed out-of-line in switch_to.S */ -#define switch_to(prev, next, last) \ -do { \ - switch_fpu(prev, next); \ - current->thread.wchan = (u_long) __builtin_return_address(0); \ - (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ - mb(); \ - current->thread.wchan = 0; \ -} while (0) - -#define arch_align_stack(x) (x) - -#define nop() asm volatile ("nop") - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - * - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* _ASM_SYSTEM_H */ |