diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-12-08 05:25:50 +0300 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-12-22 12:43:52 +0300 |
commit | 77ba93a7ac5fb0d9338bffbf97c787b8efe00806 (patch) | |
tree | 5446453de9b7317e0c5ca255e13e553843c30093 /arch/sh/include | |
parent | c6f17cb2272121475c87592560534b157b17544e (diff) | |
download | linux-77ba93a7ac5fb0d9338bffbf97c787b8efe00806.tar.xz |
sh: Fix up the SH-4A mutex fastpath semantics.
This fixes up the __mutex_fastpath_xxx() routines to match the semantics
noted in the comment. Previously these were looping rather than doing a
single-pass, which is counter-intuitive, as the slow path takes care of
the looping for us in the event of contention.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include')
-rw-r--r-- | arch/sh/include/asm/mutex-llsc.h | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index 7c75af5e734b..a91990c6e8e5 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h @@ -21,16 +21,18 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) fail_fn(count); } @@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) __res = fail_fn(count); @@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "1: movli.l @%1, %0 \n\t" + "movli.l @%2, %0 \n\t" "add #1, %0 \n\t" - "movco.l %0, @%1 \n\t" - "bf 1b\n\t" - : "=&z" (__res) + "movco.l %0, @%2 \n\t" + "movt %1 \n\t" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); - if (unlikely(__res <= 0)) + __res |= !__ex_flag; + if (unlikely(__res != 0)) fail_fn(count); } |