summaryrefslogtreecommitdiff
path: root/include/asm-generic/atomic-instrumented.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-06-21 15:13:18 +0300
committerIngo Molnar <mingo@kernel.org>2018-06-21 15:25:24 +0300
commit18cc1814d4e7560412c9c8c6d28f9d6782c8b402 (patch)
tree7295996dcedd3f3d90da1e570a1b459499a0d54a /include/asm-generic/atomic-instrumented.h
parent356701329fb391184618eda7b7fb68cb35271506 (diff)
downloadlinux-18cc1814d4e7560412c9c8c6d28f9d6782c8b402.tar.xz
atomics/treewide: Make test ops optional
Some of the atomics return the result of a test applied after the atomic operation, and almost all architectures implement these as trivial wrappers around the underlying atomic. Specifically: * <atomic>_inc_and_test(v) is (<atomic>_inc_return(v) == 0) * <atomic>_dec_and_test(v) is (<atomic>_dec_return(v) == 0) * <atomic>_sub_and_test(i, v) is (<atomic>_sub_return(i, v) == 0) * <atomic>_add_negative(i, v) is (<atomic>_add_return(i, v) < 0) Rather than have these definitions duplicated in all architectures, with minor inconsistencies in formatting and documentation, let's make these operations optional, with default fallbacks as above. Implementations must now provide a preprocessor symbol. The instrumented atomics are updated accordingly. Both x86 and m68k have custom implementations, which are left as-is, given preprocessor symbols to avoid being overridden. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Palmer Dabbelt <palmer@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-16-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic/atomic-instrumented.h')
-rw-r--r--include/asm-generic/atomic-instrumented.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 2b487f28ef35..6b64c200de73 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -225,29 +225,41 @@ static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
return arch_atomic64_dec_if_positive(v);
}
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -333,29 +345,41 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
static __always_inline unsigned long
cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)