diff options
Diffstat (limited to 'include')
28 files changed, 2611 insertions, 1291 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h deleted file mode 100644 index 073cf40f431b..000000000000 --- a/include/asm-generic/atomic-long.h +++ /dev/null @@ -1,1014 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -// Generated by scripts/atomic/gen-atomic-long.sh -// DO NOT MODIFY THIS FILE DIRECTLY - -#ifndef _ASM_GENERIC_ATOMIC_LONG_H -#define _ASM_GENERIC_ATOMIC_LONG_H - -#include <linux/compiler.h> -#include <asm/types.h> - -#ifdef CONFIG_64BIT -typedef atomic64_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define atomic_long_cond_read_acquire atomic64_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed -#else -typedef atomic_t atomic_long_t; -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define atomic_long_cond_read_acquire atomic_cond_read_acquire -#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed -#endif - -#ifdef CONFIG_64BIT - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic64_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic64_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic64_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic64_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic64_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic64_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic64_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic64_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic64_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic64_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic64_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic64_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic64_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic64_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic64_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic64_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic64_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic64_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic64_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic64_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic64_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic64_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic64_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic64_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic64_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic64_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic64_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic64_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic64_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic64_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic64_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic64_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic64_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic64_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic64_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic64_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic64_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic64_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic64_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_release(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic64_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic64_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic64_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic64_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic64_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic64_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic64_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic64_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic64_dec_if_positive(v); -} - -#else /* CONFIG_64BIT */ - -static __always_inline long -atomic_long_read(const atomic_long_t *v) -{ - return atomic_read(v); -} - -static __always_inline long -atomic_long_read_acquire(const atomic_long_t *v) -{ - return atomic_read_acquire(v); -} - -static __always_inline void -atomic_long_set(atomic_long_t *v, long i) -{ - atomic_set(v, i); -} - -static __always_inline void -atomic_long_set_release(atomic_long_t *v, long i) -{ - atomic_set_release(v, i); -} - -static __always_inline void -atomic_long_add(long i, atomic_long_t *v) -{ - atomic_add(i, v); -} - -static __always_inline long -atomic_long_add_return(long i, atomic_long_t *v) -{ - return atomic_add_return(i, v); -} - -static __always_inline long -atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return atomic_add_return_acquire(i, v); -} - -static __always_inline long -atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return atomic_add_return_release(i, v); -} - -static __always_inline long -atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_add_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return atomic_fetch_add(i, v); -} - -static __always_inline long -atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_add_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return atomic_fetch_add_release(i, v); -} - -static __always_inline long -atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_add_relaxed(i, v); -} - -static __always_inline void -atomic_long_sub(long i, atomic_long_t *v) -{ - atomic_sub(i, v); -} - -static __always_inline long -atomic_long_sub_return(long i, atomic_long_t *v) -{ - return atomic_sub_return(i, v); -} - -static __always_inline long -atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return atomic_sub_return_acquire(i, v); -} - -static __always_inline long -atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return atomic_sub_return_release(i, v); -} - -static __always_inline long -atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return atomic_sub_return_relaxed(i, v); -} - -static __always_inline long -atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return atomic_fetch_sub(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_release(i, v); -} - -static __always_inline long -atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_sub_relaxed(i, v); -} - -static __always_inline void -atomic_long_inc(atomic_long_t *v) -{ - atomic_inc(v); -} - -static __always_inline long -atomic_long_inc_return(atomic_long_t *v) -{ - return atomic_inc_return(v); -} - -static __always_inline long -atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return atomic_inc_return_acquire(v); -} - -static __always_inline long -atomic_long_inc_return_release(atomic_long_t *v) -{ - return atomic_inc_return_release(v); -} - -static __always_inline long -atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return atomic_inc_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_inc(atomic_long_t *v) -{ - return atomic_fetch_inc(v); -} - -static __always_inline long -atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return atomic_fetch_inc_acquire(v); -} - -static __always_inline long -atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return atomic_fetch_inc_release(v); -} - -static __always_inline long -atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return atomic_fetch_inc_relaxed(v); -} - -static __always_inline void -atomic_long_dec(atomic_long_t *v) -{ - atomic_dec(v); -} - -static __always_inline long -atomic_long_dec_return(atomic_long_t *v) -{ - return atomic_dec_return(v); -} - -static __always_inline long -atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return atomic_dec_return_acquire(v); -} - -static __always_inline long -atomic_long_dec_return_release(atomic_long_t *v) -{ - return atomic_dec_return_release(v); -} - -static __always_inline long -atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return atomic_dec_return_relaxed(v); -} - -static __always_inline long -atomic_long_fetch_dec(atomic_long_t *v) -{ - return atomic_fetch_dec(v); -} - -static __always_inline long -atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return atomic_fetch_dec_acquire(v); -} - -static __always_inline long -atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return atomic_fetch_dec_release(v); -} - -static __always_inline long -atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return atomic_fetch_dec_relaxed(v); -} - -static __always_inline void -atomic_long_and(long i, atomic_long_t *v) -{ - atomic_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return atomic_fetch_and(i, v); -} - -static __always_inline long -atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_and_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return atomic_fetch_and_release(i, v); -} - -static __always_inline long -atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_and_relaxed(i, v); -} - -static __always_inline void -atomic_long_andnot(long i, atomic_long_t *v) -{ - atomic_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_release(i, v); -} - -static __always_inline long -atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -atomic_long_or(long i, atomic_long_t *v) -{ - atomic_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return atomic_fetch_or(i, v); -} - -static __always_inline long -atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_or_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return atomic_fetch_or_release(i, v); -} - -static __always_inline long -atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_or_relaxed(i, v); -} - -static __always_inline void -atomic_long_xor(long i, atomic_long_t *v) -{ - atomic_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return atomic_fetch_xor(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_acquire(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_release(i, v); -} - -static __always_inline long -atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return atomic_fetch_xor_relaxed(i, v); -} - -static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) -{ - return atomic_xchg(v, i); -} - -static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return atomic_xchg_acquire(v, i); -} - -static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return atomic_xchg_release(v, i); -} - -static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return atomic_xchg_relaxed(v, i); -} - -static __always_inline long -atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_release(v, old, new); -} - -static __always_inline long -atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return atomic_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_acquire(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_release(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return atomic_try_cmpxchg_relaxed(v, (int *)old, new); -} - -static __always_inline bool -atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return atomic_sub_and_test(i, v); -} - -static __always_inline bool -atomic_long_dec_and_test(atomic_long_t *v) -{ - return atomic_dec_and_test(v); -} - -static __always_inline bool -atomic_long_inc_and_test(atomic_long_t *v) -{ - return atomic_inc_and_test(v); -} - -static __always_inline bool -atomic_long_add_negative(long i, atomic_long_t *v) -{ - return atomic_add_negative(i, v); -} - -static __always_inline long -atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_fetch_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return atomic_add_unless(v, a, u); -} - -static __always_inline bool -atomic_long_inc_not_zero(atomic_long_t *v) -{ - return atomic_inc_not_zero(v); -} - -static __always_inline bool -atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return atomic_inc_unless_negative(v); -} - -static __always_inline bool -atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return atomic_dec_unless_positive(v); -} - -static __always_inline long -atomic_long_dec_if_positive(atomic_long_t *v) -{ - return atomic_dec_if_positive(v); -} - -#endif /* CONFIG_64BIT */ -#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 0e7316a86240..3096f086b5a3 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -11,25 +11,29 @@ * See Documentation/atomic_bitops.txt for details. */ -static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } -static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } -static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } -static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p) if (READ_ONCE(*p) & mask) return 1; - old = atomic_long_fetch_or(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } -static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p) if (!(READ_ONCE(*p) & mask)) return 0; - old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } -static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p) +static __always_inline int +arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = atomic_long_fetch_xor(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); return !!(old & mask); } +#include <asm-generic/bitops/instrumented-atomic.h> + #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 3ae021368f48..630f2f6b9595 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -7,7 +7,7 @@ #include <asm/barrier.h> /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * @@ -15,8 +15,8 @@ * the returned value is 0. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock(unsigned int nr, - volatile unsigned long *p) +static __always_inline int +arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr, if (READ_ONCE(*p) & mask) return 1; - old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) * * See for example x86's implementation. */ -static inline void __clear_bit_unlock(unsigned int nr, - volatile unsigned long *p) +static inline void +arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { unsigned long old; p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); - atomic_long_set_release((atomic_long_t *)p, old); + arch_atomic_long_set_release((atomic_long_t *)p, old); } /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. + * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ -#ifndef clear_bit_unlock_is_negative_byte -static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_clear_bit_unlock_is_negative_byte +static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #endif +#include <asm-generic/bitops/instrumented-lock.h> + #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 7e10c4b50c5d..365377fb104b 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -5,7 +5,7 @@ #include <asm/types.h> /** - * __set_bit - Set a bit in memory + * arch___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -13,24 +13,28 @@ * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p |= mask; } +#define __set_bit arch___set_bit -static inline void __clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +arch___clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; } +#define __clear_bit arch___clear_bit /** - * __change_bit - Toggle a bit in memory + * arch___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -38,16 +42,18 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) +static __always_inline +void arch___change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p ^= mask; } +#define __change_bit arch___change_bit /** - * __test_and_set_bit - Set a bit and return its old value + * arch___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * @@ -55,7 +61,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static __always_inline int +arch___test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -64,9 +71,10 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) *p = old | mask; return (old & mask) != 0; } +#define __test_and_set_bit arch___test_and_set_bit /** - * __test_and_clear_bit - Clear a bit and return its old value + * arch___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * @@ -74,7 +82,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static __always_inline int +arch___test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -83,10 +92,11 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) *p = old & ~mask; return (old & mask) != 0; } +#define __test_and_clear_bit arch___test_and_clear_bit /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static __always_inline int +arch___test_and_change_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -95,15 +105,18 @@ static inline int __test_and_change_bit(int nr, *p = old ^ mask; return (old & mask) != 0; } +#define __test_and_change_bit arch___test_and_change_bit /** - * test_bit - Determine whether a bit is set + * arch_test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static inline int test_bit(int nr, const volatile unsigned long *addr) +static __always_inline int +arch_test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); } +#define test_bit arch_test_bit #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ed1d3ffd5b9d..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,9 +77,8 @@ __ret; \ }) -#include <linux/atomic-arch-fallback.h> -#include <asm-generic/atomic-instrumented.h> - -#include <asm-generic/atomic-long.h> +#include <linux/atomic/atomic-arch-fallback.h> +#include <linux/atomic/atomic-long.h> +#include <linux/atomic/atomic-instrumented.h> #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..a3dba31df01e 100644 --- a/include/linux/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h diff --git a/include/asm-generic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index bc45af52c93b..a0f654370da3 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -14,8 +14,8 @@ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid * double instrumentation. */ -#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H -#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> #include <linux/compiler.h> @@ -1177,6 +1177,584 @@ atomic64_dec_if_positive(atomic64_t *v) return arch_atomic64_dec_if_positive(v); } +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_if_positive(v); +} + #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ @@ -1333,5 +1911,5 @@ atomic64_dec_if_positive(atomic64_t *v) arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) -#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// 1d7c3a25aca5c7fb031c307be4c3d24c7b48fcd5 +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +// 2a9553f0a9d5619f19151092df5cabbbf16ce835 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h new file mode 100644 index 000000000000..800b8c35992d --- /dev/null +++ b/include/linux/atomic/atomic-long.h @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_LONG_H +#define _LINUX_ATOMIC_LONG_H + +#include <linux/compiler.h> +#include <asm/types.h> + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic64_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic64_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic64_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic64_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic64_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic64_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic64_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic64_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic64_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic64_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic64_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic64_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic64_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic64_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic64_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _LINUX_ATOMIC_LONG_H */ +// e8f0e08ff072b74d180eabe2ad001282b38c2c88 diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index edb5c186b0b7..3f49e65169c6 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -3,8 +3,7 @@ #define __LINUX_DEBUG_LOCKING_H #include <linux/atomic.h> -#include <linux/bug.h> -#include <linux/printk.h> +#include <linux/cache.h> struct task_struct; diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ded90b097e6e..975e33b793a7 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,8 @@ #include <linux/percpu-defs.h> #include <linux/lockdep.h> +#ifndef CONFIG_PREEMPT_RT + typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -14,29 +16,14 @@ typedef struct { } local_lock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LL_DEP_MAP_INIT(lockname) \ +# define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ - .lock_type = LD_LOCK_PERCPU, \ - } -#else -# define LL_DEP_MAP_INIT(lockname) -#endif - -#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } - -#define __local_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ - lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \ - LD_WAIT_CONFIG, LD_WAIT_INV, \ - LD_LOCK_PERCPU); \ -} while (0) + .lock_type = LD_LOCK_PERCPU, \ + }, \ + .owner = NULL, -#ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void local_lock_acquire(local_lock_t *l) { lock_map_acquire(&l->dep_map); @@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l) lock_map_release(&l->dep_map); } +static inline void local_lock_debug_init(local_lock_t *l) +{ + l->owner = NULL; +} #else /* CONFIG_DEBUG_LOCK_ALLOC */ +# define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } +static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ +#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_PERCPU); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { } local_lock_release(this_cpu_ptr(lock)); \ local_irq_restore(flags); \ } while (0) + +#else /* !CONFIG_PREEMPT_RT */ + +/* + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the + * critical section while staying preemptible. + */ +typedef spinlock_t local_lock_t; + +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) + +#define __local_lock_init(l) \ + do { \ + local_spin_lock_init((l)); \ + } while (0) + +#define __local_lock(__lock) \ + do { \ + migrate_disable(); \ + spin_lock(this_cpu_ptr((__lock))); \ + } while (0) + +#define __local_lock_irq(lock) __local_lock(lock) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __local_lock(lock); \ + } while (0) + +#define __local_unlock(__lock) \ + do { \ + spin_unlock(this_cpu_ptr((__lock))); \ + migrate_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) __local_unlock(lock) + +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) + +#endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index e19323521f9c..8f226d460f51 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,8 +20,17 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> -struct ww_class; -struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* * Simple, straightforward mutexes with strict semantics: @@ -53,7 +62,7 @@ struct ww_acquire_ctx; */ struct mutex { atomic_long_t owner; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif @@ -66,27 +75,6 @@ struct mutex { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - -/* - * This is the control structure for tasks blocked on mutex, - * which resides on the blocked task's kernel stack: - */ -struct mutex_waiter { - struct list_head list; - struct task_struct *task; - struct ww_acquire_ctx *ww_ctx; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -117,19 +105,9 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } @@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ extern bool mutex_is_locked(struct mutex *lock); +#else /* !CONFIG_PREEMPT_RT */ +/* + * Preempt-RT variant based on rtmutexes. + */ +#include <linux/rtmutex.h> + +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ +} + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_rt_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +extern int mutex_trylock(struct mutex *lock); + +static inline void mutex_destroy(struct mutex *lock) { } + +#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) + +#define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_base_init(&(mutex)->rtmutex); \ + __mutex_rt_init((mutex), name, key); \ +} while (0) + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +#endif /* CONFIG_PREEMPT_RT */ + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 9881eac0698f..4d244e295e85 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -121,7 +121,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d31ecaf4fdd3..235047d7a1b5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -17,24 +17,14 @@ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H +#include <linux/rbtree_types.h> + #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/rcupdate.h> -struct rb_node { - unsigned long __rb_parent_color; - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root { - struct rb_node *rb_node; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) -#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } - /* Same as rb_first(), but O(1) */ #define rb_first_cached(root) (root)->rb_leftmost diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h new file mode 100644 index 000000000000..45b6ecde3665 --- /dev/null +++ b/include/linux/rbtree_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_RBTREE_TYPES_H +#define _LINUX_RBTREE_TYPES_H + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); +/* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +#endif diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index d1672de9ca89..9deedfeec2b1 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,12 +13,39 @@ #ifndef __LINUX_RT_MUTEX_H #define __LINUX_RT_MUTEX_H +#include <linux/compiler.h> #include <linux/linkage.h> -#include <linux/rbtree.h> -#include <linux/spinlock_types.h> +#include <linux/rbtree_types.h> +#include <linux/spinlock_types_raw.h> extern int max_lock_depth; /* for sysctl */ +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ +{ \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ + .waiters = RB_ROOT_CACHED, \ + .owner = NULL \ +} + +/** + * rt_mutex_base_is_locked - is the rtmutex locked + * @lock: the mutex to be queried + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) +{ + return READ_ONCE(lock->owner) != NULL; +} + +extern void rt_mutex_base_init(struct rt_mutex_base *rtb); + /** * The rt_mutex structure * @@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */ * @owner: the mutex owner */ struct rt_mutex { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; + struct rt_mutex_base rtmutex; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -52,32 +77,24 @@ do { \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ - , .dep_map = { .name = #mutexname } +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + .dep_map = { \ + .name = #mutexname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT_CACHED \ - , .owner = NULL \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} +#define __RT_MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ +} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -/** - * rt_mutex_is_locked - is the mutex locked - * @lock: the mutex to be queried - * - * Returns 1 if the mutex is locked, 0 if unlocked. - */ -static inline int rt_mutex_is_locked(struct rt_mutex *lock) -{ - return lock->owner != NULL; -} - extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h new file mode 100644 index 000000000000..1d264dd08625 --- /dev/null +++ b/include/linux/rwbase_rt.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef _LINUX_RWBASE_RT_H +#define _LINUX_RWBASE_RT_H + +#include <linux/rtmutex.h> +#include <linux/atomic.h> + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rwbase_rt { + atomic_t readers; + struct rt_mutex_base rtmutex; +}; + +#define __RWBASE_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \ +} + +#define init_rwbase_rt(rwbase) \ + do { \ + rt_mutex_base_init(&(rwbase)->rtmutex); \ + atomic_set(&(rwbase)->readers, READER_BIAS); \ + } while (0) + + +static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) != READER_BIAS; +} + +static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) > 0; +} + +#endif /* _LINUX_RWBASE_RT_H */ diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 000000000000..49c1f3842ed5 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_RT_H +#error Do not #include directly. Use <linux/spinlock.h>. +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name, + struct lock_class_key *key); +#else +static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name, + struct lock_class_key *key) +{ +} +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(rwl)->rwbase); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void rt_read_lock(rwlock_t *rwlock); +extern int rt_read_trylock(rwlock_t *rwlock); +extern void rt_read_unlock(rwlock_t *rwlock); +extern void rt_write_lock(rwlock_t *rwlock); +extern int rt_write_trylock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock); + +static __always_inline void read_lock(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_irq(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_read_lock(lock); \ + flags = 0; \ + } while (0) + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) + +static __always_inline void read_unlock(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_bh(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void read_unlock_irq(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void write_lock(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_irq(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_write_lock(lock); \ + flags = 0; \ + } while (0) + +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = write_trylock(lock); \ + __locked; \ +}) + +static __always_inline void write_unlock(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_bh(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void write_unlock_irq(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_write_unlock(rwlock); +} + +#define rwlock_is_contended(lock) (((void)(lock), 0)) + +#endif /* __LINUX_RWLOCK_RT_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 3bd03e18061c..1948442e7750 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,9 +1,23 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers + * generic rwlock type definitions and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). @@ -21,16 +35,6 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ @@ -46,4 +50,29 @@ typedef struct { #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +typedef struct { + struct rwbase_rt rwbase; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define __RWLOCK_RT_INITIALIZER(name) \ +{ \ + .rwbase = __RWBASE_INITIALIZER(name), \ + RW_DEP_MAP_INIT(name) \ +} + +#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index a66038d88878..426e98e0b675 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,19 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + }, +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) /* Common initializer macros and functions */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - }, -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_RWSEMS # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else @@ -119,6 +122,61 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +struct rw_semaphore { + struct rwbase_rt rwbase; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ + { \ + .rwbase = __RWBASE_INITIALIZER(name), \ + __RWSEM_DEP_MAP_INIT(name) \ + } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); +#else +static inline void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key) +{ +} +#endif + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(sem)->rwbase); \ + __rwsem_init((sem), #sem, &__key); \ +} while (0) + +static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return rw_base_is_locked(&sem->rwbase); +} + +static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return rw_base_is_contended(&sem->rwbase); +} + +#endif /* CONFIG_PREEMPT_RT */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 7c6a77da8b10..af7179f8572c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -95,7 +95,9 @@ struct task_group; #define TASK_WAKING 0x0200 #define TASK_NOLOAD 0x0400 #define TASK_NEW 0x0800 -#define TASK_STATE_MAX 0x1000 +/* RT specific auxilliary flag to mark RT lock waiters */ +#define TASK_RTLOCK_WAIT 0x1000 +#define TASK_STATE_MAX 0x2000 /* Convenience macros for the sake of set_current_state: */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -121,8 +123,6 @@ struct task_group; #define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - /* * Special states are those that do not use the normal wait-loop pattern. See * the comment with set_special_state(). @@ -130,30 +130,37 @@ struct task_group; #define is_special_task_state(state) \ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) -#define __set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - } while (0) - -#define set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->__state, (state_value)); \ +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define debug_normal_state_change(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value)); \ + current->task_state_change = _THIS_IP_; \ } while (0) -#define set_special_state(state_value) \ +# define debug_special_state_change(state_value) \ do { \ - unsigned long flags; /* may shadow */ \ WARN_ON_ONCE(!is_special_task_state(state_value)); \ - raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) + +# define debug_rtlock_wait_set_state() \ + do { \ + current->saved_state_change = current->task_state_change;\ + current->task_state_change = _THIS_IP_; \ + } while (0) + +# define debug_rtlock_wait_restore_state() \ + do { \ + current->task_state_change = current->saved_state_change;\ + } while (0) + #else +# define debug_normal_state_change(cond) do { } while (0) +# define debug_special_state_change(cond) do { } while (0) +# define debug_rtlock_wait_set_state() do { } while (0) +# define debug_rtlock_wait_restore_state() do { } while (0) +#endif + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -192,26 +199,77 @@ struct task_group; * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ - WRITE_ONCE(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + WRITE_ONCE(current->__state, (state_value)); \ + } while (0) #define set_current_state(state_value) \ - smp_store_mb(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + smp_store_mb(current->__state, (state_value)); \ + } while (0) /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must - * serialize against wakeups such that any possible in-flight TASK_RUNNING stores - * will not collide with our state change. + * serialize against wakeups such that any possible in-flight TASK_RUNNING + * stores will not collide with our state change. */ #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ + \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + debug_special_state_change((state_value)); \ WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) -#endif +/* + * PREEMPT_RT specific variants for "sleeping" spin/rwlocks + * + * RT's spin/rwlock substitutions are state preserving. The state of the + * task when blocking on the lock is saved in task_struct::saved_state and + * restored after the lock has been acquired. These operations are + * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT + * lock related wakeups while the task is blocked on the lock are + * redirected to operate on task_struct::saved_state to ensure that these + * are not dropped. On restore task_struct::saved_state is set to + * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. + * + * The lock operation looks like this: + * + * current_save_and_set_rtlock_wait_state(); + * for (;;) { + * if (try_lock()) + * break; + * raw_spin_unlock_irq(&lock->wait_lock); + * schedule_rtlock(); + * raw_spin_lock_irq(&lock->wait_lock); + * set_current_state(TASK_RTLOCK_WAIT); + * } + * current_restore_rtlock_saved_state(); + */ +#define current_save_and_set_rtlock_wait_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + current->saved_state = current->__state; \ + debug_rtlock_wait_set_state(); \ + WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); + +#define current_restore_rtlock_saved_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + debug_rtlock_wait_restore_state(); \ + WRITE_ONCE(current->__state, current->saved_state); \ + current->saved_state = TASK_RUNNING; \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); #define get_current_state() READ_ONCE(current->__state) @@ -230,6 +288,9 @@ extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); asmlinkage void preempt_schedule_irq(void); +#ifdef CONFIG_PREEMPT_RT + extern void schedule_rtlock(void); +#endif extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); @@ -668,6 +729,11 @@ struct task_struct { #endif unsigned int __state; +#ifdef CONFIG_PREEMPT_RT + /* saved state for "spinlock sleepers" */ + unsigned int saved_state; +#endif + /* * This begins the randomizable portion of task_struct. Only * scheduling-critical items should be added above here. @@ -1362,6 +1428,9 @@ struct task_struct { struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; +# ifdef CONFIG_PREEMPT_RT + unsigned long saved_state_change; +# endif #endif int pagefault_disabled; #ifdef CONFIG_MMU diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 26a2013ac39c..06cd8fb2f409 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -42,8 +42,11 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } +#define WAKE_Q_HEAD_INITIALIZER(name) \ + { WAKE_Q_TAIL, &name.first } + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name) static inline void wake_q_init(struct wake_q_head *head) { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc..45310ea1b1d7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -12,6 +12,8 @@ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * + * linux/spinlock_types_raw: + * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -31,6 +33,8 @@ * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * + * linux/spinlock_types_raw: + * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -/* Include rwlock functions */ +#ifndef CONFIG_PREEMPT_RT +/* Include rwlock functions for !RT */ #include <linux/rwlock.h> +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include <linux/spinlock_api_up.h> #endif +/* Non PREEMPT_RT kernel, map to raw spinlocks: */ +#ifndef CONFIG_PREEMPT_RT + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#else /* !CONFIG_PREEMPT_RT */ +# include <linux/spinlock_rt.h> +#endif /* CONFIG_PREEMPT_RT */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 19a9be9d97ee..6b8e1a0b137b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } +/* PREEMPT_RT has its own rwlock implementation */ +#ifndef CONFIG_PREEMPT_RT #include <linux/rwlock_api_smp.h> +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 000000000000..835aedaf68ac --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu); +#else +static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu) +{ +} +#endif + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ +} while (0) + +extern void rt_spin_lock(spinlock_t *lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); +extern void rt_spin_unlock(spinlock_t *lock); +extern void rt_spin_lock_unlock(spinlock_t *lock); +extern int rt_spin_trylock_bh(spinlock_t *lock); +extern int rt_spin_trylock(spinlock_t *lock); + +static __always_inline void spin_lock(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#ifdef CONFIG_LOCKDEP +# define __spin_lock_nested(lock, subclass) \ + rt_spin_lock_nested(lock, subclass) + +# define __spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __spin_lock_nested(lock, subclass); \ + } while (0) + +#else + /* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(((void)(subclass), (lock)), flags) +#endif + +#define spin_lock_nested(lock, subclass) \ + __spin_lock_nested(lock, subclass) + +#define spin_lock_nest_lock(lock, nest_lock) \ + __spin_lock_nest_lock(lock, nest_lock) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + __spin_lock_irqsave_nested(lock, flags, subclass) + +static __always_inline void spin_lock_bh(spinlock_t *lock) +{ + /* Investigate: Drop bh when blocking ? */ + local_bh_disable(); + rt_spin_lock(lock); +} + +static __always_inline void spin_lock_irq(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static __always_inline void spin_unlock(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_bh(spinlock_t *lock) +{ + rt_spin_unlock(lock); + local_bh_enable(); +} + +static __always_inline void spin_unlock_irq(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, + unsigned long flags) +{ + rt_spin_unlock(lock); +} + +#define spin_trylock(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock_bh(lock) \ + __cond_lock(lock, rt_spin_trylock_bh(lock)) + +#define spin_trylock_irq(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define __spin_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = spin_trylock(lock); \ + __locked; \ +}) + +#define spin_trylock_irqsave(lock, flags) \ + __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) + +#define spin_is_contended(lock) (((void)(lock), 0)) + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_base_is_locked(&lock->lock); +} + +#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock)) + +#include <linux/rwlock_rt.h> + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index b981caafe8bf..2dfa35ffec76 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,65 +9,11 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include <asm/spinlock_types.h> -#else -# include <linux/spinlock_types_up.h> -#endif - -#include <linux/lockdep_types.h> +#include <linux/spinlock_types_raw.h> -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RAW_SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SPIN, \ - } -# define SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RAW_SPIN_DEP_MAP_INIT(lockname) -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - RAW_SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +#ifndef CONFIG_PREEMPT_RT +/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ typedef struct spinlock { union { struct raw_spinlock rlock; @@ -96,6 +42,35 @@ typedef struct spinlock { #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +/* PREEMPT_RT kernels map spinlock to rt_mutex */ +#include <linux/rtmutex.h> + +typedef struct spinlock { + struct rt_mutex_base lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#define __SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) \ + } + +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 000000000000..91cb36b65a17 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,73 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#include <linux/types.h> + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep_types.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SPIN, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } +#else +# define RAW_SPIN_DEP_MAP_INIT(lockname) +# define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +{ \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + RAW_SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */ diff --git a/include/linux/static_call.h b/include/linux/static_call.h index fc94faa53b5b..3e56a9751c06 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -17,11 +17,17 @@ * DECLARE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL_NULL(name, typename); + * DEFINE_STATIC_CALL_RET0(name, typename); + * + * __static_call_return0; + * * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); * static_call_query(name); * + * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}() + * * Usage example: * * # Start with the following functions (with identical prototypes): @@ -96,6 +102,33 @@ * To query which function is currently set to be called, use: * * func = static_call_query(name); + * + * + * DEFINE_STATIC_CALL_RET0 / __static_call_return0: + * + * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the + * conditional void function call, DEFINE_STATIC_CALL_RET0 / + * __static_call_return0 optimize the do nothing return 0 function. + * + * This feature is strictly UB per the C standard (since it casts a function + * pointer to a different signature) and relies on the architecture ABI to + * make things work. In particular it relies on Caller Stack-cleanup and the + * whole return register being clobbered for short return values. All normal + * CDECL style ABIs conform. + * + * In particular the x86_64 implementation replaces the 5 byte CALL + * instruction at the callsite with a 5 byte clear of the RAX register, + * completely eliding any function call overhead. + * + * Notably argument setup is unconditional. + * + * + * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP(): + * + * The difference is that the _TRAMP variant tries to only export the + * trampoline with the result that a module can use static_call{,_cond}() but + * not static_call_update(). + * */ #include <linux/types.h> diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index b77f39f319ad..29db736af86d 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -18,6 +18,24 @@ #define __LINUX_WW_MUTEX_H #include <linux/mutex.h> +#include <linux/rtmutex.h> + +#if defined(CONFIG_DEBUG_MUTEXES) || \ + (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES)) +#define DEBUG_WW_MUTEXES +#endif + +#ifndef CONFIG_PREEMPT_RT +#define WW_MUTEX_BASE mutex +#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) mutex_trylock(l) +#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) +#else +#define WW_MUTEX_BASE rt_mutex +#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) rt_mutex_trylock(l) +#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex) +#endif struct ww_class { atomic_long_t stamp; @@ -28,16 +46,24 @@ struct ww_class { unsigned int is_wait_die; }; +struct ww_mutex { + struct WW_MUTEX_BASE base; + struct ww_acquire_ctx *ctx; +#ifdef DEBUG_WW_MUTEXES + struct ww_class *ww_class; +#endif +}; + struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES unsigned int done_acquire; struct ww_class *ww_class; - struct ww_mutex *contending_lock; + void *contending_lock; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -74,9 +100,9 @@ struct ww_acquire_ctx { static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lock->ww_class = ww_class; #endif } @@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, ctx->acquired = 0; ctx->wounded = false; ctx->is_wait_die = ww_class->is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES ctx->ww_class = ww_class; ctx->done_acquire = 0; ctx->contending_lock = NULL; @@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, */ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lockdep_assert_held(ctx); DEBUG_LOCKS_WARN_ON(ctx->done_acquire); @@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) #ifdef CONFIG_DEBUG_LOCK_ALLOC mutex_release(&ctx->dep_map, _THIS_IP_); #endif -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->acquired); if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* @@ -269,7 +295,7 @@ static inline void ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif ret = ww_mutex_lock(lock, ctx); @@ -305,7 +331,7 @@ static inline int __must_check ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif return ww_mutex_lock_interruptible(lock, ctx); @@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock); */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { - return mutex_trylock(&lock->base); + return ww_mutex_base_trylock(&lock->base); } /*** @@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) */ static inline void ww_mutex_destroy(struct ww_mutex *lock) { +#ifndef CONFIG_PREEMPT_RT mutex_destroy(&lock->base); +#endif } /** @@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock) */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { - return mutex_is_locked(&lock->base); + return ww_mutex_base_is_locked(&lock->base); } #endif |