summaryrefslogtreecommitdiff
path: root/arch/avr32
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 17:08:05 +0400
committerJiri Kosina <jkosina@suse.cz>2011-09-15 17:08:18 +0400
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /arch/avr32
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
downloadlinux-e060c38434b2caa78efe7cedaff4191040b65a15.tar.xz
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'arch/avr32')
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/include/asm/atomic.h60
-rw-r--r--arch/avr32/include/asm/ptrace.h2
-rw-r--r--arch/avr32/kernel/syscall_table.S2
4 files changed, 27 insertions, 38 deletions
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index e9d689b7c833..197e96f70405 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -10,6 +10,7 @@ config AVR32
select GENERIC_IRQ_PROBE
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index bbce6a1c6bb6..e0ac2631c87e 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -78,70 +78,63 @@ static inline int atomic_add_return(int i, atomic_t *v)
/*
* atomic_sub_unless - sub unless the number is a given value
* @v: pointer of type atomic_t
- * @a: the amount to add to v...
+ * @a: the amount to subtract from v...
* @u: ...unless v is equal to u.
*
- * If the atomic value v is not equal to u, this function subtracts a
- * from v, and returns non zero. If v is equal to u then it returns
- * zero. This is done as an atomic operation.
+ * Atomically subtract @a from @v, so long as it was not @u.
+ * Returns the old value of @v.
*/
-static inline int atomic_sub_unless(atomic_t *v, int a, int u)
+static inline void atomic_sub_unless(atomic_t *v, int a, int u)
{
- int tmp, result = 0;
+ int tmp;
asm volatile(
"/* atomic_sub_unless */\n"
"1: ssrf 5\n"
- " ld.w %0, %3\n"
- " cp.w %0, %5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
" breq 1f\n"
- " sub %0, %4\n"
- " stcond %2, %0\n"
+ " sub %0, %3\n"
+ " stcond %1, %0\n"
" brne 1b\n"
- " mov %1, 1\n"
"1:"
- : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result)
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "rKs21"(a), "rKs21"(u)
: "cc", "memory");
-
- return result;
}
/*
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * If the atomic value v is not equal to u, this function adds a to v,
- * and returns non zero. If v is equal to u then it returns zero. This
- * is done as an atomic operation.
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int tmp, result;
+ int tmp, old = atomic_read(v);
if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
- result = atomic_sub_unless(v, -a, u);
+ atomic_sub_unless(v, -a, u);
else {
- result = 0;
asm volatile(
- "/* atomic_add_unless */\n"
+ "/* __atomic_add_unless */\n"
"1: ssrf 5\n"
- " ld.w %0, %3\n"
- " cp.w %0, %5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
" breq 1f\n"
- " add %0, %4\n"
- " stcond %2, %0\n"
+ " add %0, %3\n"
+ " stcond %1, %0\n"
" brne 1b\n"
- " mov %1, 1\n"
"1:"
- : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "r"(a), "ir"(u), "1"(result)
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "r"(a), "ir"(u)
: "cc", "memory");
}
- return result;
+ return old;
}
/*
@@ -188,7 +181,6 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic_inc_not_zero(v) atomic_add_unless(v, 1, 0)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#define smp_mb__before_atomic_dec() barrier()
@@ -196,6 +188,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
-
#endif /* __ASM_AVR32_ATOMIC_H */
diff --git a/arch/avr32/include/asm/ptrace.h b/arch/avr32/include/asm/ptrace.h
index e53dd0d900f5..c67a007f672a 100644
--- a/arch/avr32/include/asm/ptrace.h
+++ b/arch/avr32/include/asm/ptrace.h
@@ -132,8 +132,6 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs (struct pt_regs *);
-
static __inline__ int valid_user_regs(struct pt_regs *regs)
{
/*
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index c7fd394d28a4..6eba53530d1c 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -158,7 +158,7 @@ sys_call_table:
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_poll
- .long sys_nfsservctl /* 145 */
+ .long sys_ni_syscall /* 145 was nfsservctl */
.long sys_setresgid
.long sys_getresgid
.long sys_prctl