diff options
Diffstat (limited to 'include')
81 files changed, 6120 insertions, 2174 deletions
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 0d4b1d3dbc1e..e8730c6b9fe2 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -1,3 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-instrumented.sh +// DO NOT MODIFY THIS FILE DIRECTLY + /* * This file provides wrappers with KASAN instrumentation for atomic operations. * To use this functionality an arch's atomic.h file needs to define all @@ -9,459 +14,1775 @@ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid * double instrumentation. */ - -#ifndef _LINUX_ATOMIC_INSTRUMENTED_H -#define _LINUX_ATOMIC_INSTRUMENTED_H +#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H +#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> #include <linux/kasan-checks.h> -static __always_inline int atomic_read(const atomic_t *v) +static inline int +atomic_read(const atomic_t *v) { kasan_check_read(v, sizeof(*v)); return arch_atomic_read(v); } +#define atomic_read atomic_read -static __always_inline s64 atomic64_read(const atomic64_t *v) +#if defined(arch_atomic_read_acquire) +static inline int +atomic_read_acquire(const atomic_t *v) { kasan_check_read(v, sizeof(*v)); - return arch_atomic64_read(v); + return arch_atomic_read_acquire(v); } +#define atomic_read_acquire atomic_read_acquire +#endif -static __always_inline void atomic_set(atomic_t *v, int i) +static inline void +atomic_set(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); arch_atomic_set(v, i); } +#define atomic_set atomic_set -static __always_inline void atomic64_set(atomic64_t *v, s64 i) +#if defined(arch_atomic_set_release) +static inline void +atomic_set_release(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_set(v, i); + arch_atomic_set_release(v, i); } +#define atomic_set_release atomic_set_release +#endif -static __always_inline int atomic_xchg(atomic_t *v, int i) +static inline void +atomic_add(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_xchg(v, i); + arch_atomic_add(i, v); } +#define atomic_add atomic_add -static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) +#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) +static inline int +atomic_add_return(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_xchg(v, i); + return arch_atomic_add_return(i, v); } +#define atomic_add_return atomic_add_return +#endif -static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) +#if defined(arch_atomic_add_return_acquire) +static inline int +atomic_add_return_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_cmpxchg(v, old, new); + return arch_atomic_add_return_acquire(i, v); } +#define atomic_add_return_acquire atomic_add_return_acquire +#endif -static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +#if defined(arch_atomic_add_return_release) +static inline int +atomic_add_return_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg(v, old, new); + return arch_atomic_add_return_release(i, v); } +#define atomic_add_return_release atomic_add_return_release +#endif -#ifdef arch_atomic_try_cmpxchg -#define atomic_try_cmpxchg atomic_try_cmpxchg -static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +#if defined(arch_atomic_add_return_relaxed) +static inline int +atomic_add_return_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - kasan_check_read(old, sizeof(*old)); - return arch_atomic_try_cmpxchg(v, old, new); + return arch_atomic_add_return_relaxed(i, v); } +#define atomic_add_return_relaxed atomic_add_return_relaxed #endif -#ifdef arch_atomic64_try_cmpxchg -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) +static inline int +atomic_fetch_add(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - kasan_check_read(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg(v, old, new); + return arch_atomic_fetch_add(i, v); } +#define atomic_fetch_add atomic_fetch_add #endif -#ifdef arch_atomic_fetch_add_unless -#define atomic_fetch_add_unless atomic_fetch_add_unless -static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +#if defined(arch_atomic_fetch_add_acquire) +static inline int +atomic_fetch_add_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_add_unless(v, a, u); + return arch_atomic_fetch_add_acquire(i, v); } +#define atomic_fetch_add_acquire atomic_fetch_add_acquire #endif -#ifdef arch_atomic64_fetch_add_unless -#define atomic64_fetch_add_unless atomic64_fetch_add_unless -static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +#if defined(arch_atomic_fetch_add_release) +static inline int +atomic_fetch_add_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_unless(v, a, u); + return arch_atomic_fetch_add_release(i, v); } +#define atomic_fetch_add_release atomic_fetch_add_release #endif -#ifdef arch_atomic_inc -#define atomic_inc atomic_inc -static __always_inline void atomic_inc(atomic_t *v) +#if defined(arch_atomic_fetch_add_relaxed) +static inline int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed +#endif + +static inline void +atomic_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} +#define atomic_sub atomic_sub + +#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) +static inline int +atomic_sub_return(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} +#define atomic_sub_return atomic_sub_return +#endif + +#if defined(arch_atomic_sub_return_acquire) +static inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_acquire(i, v); +} +#define atomic_sub_return_acquire atomic_sub_return_acquire +#endif + +#if defined(arch_atomic_sub_return_release) +static inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_release(i, v); +} +#define atomic_sub_return_release atomic_sub_return_release +#endif + +#if defined(arch_atomic_sub_return_relaxed) +static inline int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_return_relaxed(i, v); +} +#define atomic_sub_return_relaxed atomic_sub_return_relaxed +#endif + +#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) +static inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} +#define atomic_fetch_sub atomic_fetch_sub +#endif + +#if defined(arch_atomic_fetch_sub_acquire) +static inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_acquire(i, v); +} +#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#endif + +#if defined(arch_atomic_fetch_sub_release) +static inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_release(i, v); +} +#define atomic_fetch_sub_release atomic_fetch_sub_release +#endif + +#if defined(arch_atomic_fetch_sub_relaxed) +static inline int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#endif + +#if defined(arch_atomic_inc) +static inline void +atomic_inc(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_inc(v); } +#define atomic_inc atomic_inc #endif -#ifdef arch_atomic64_inc -#define atomic64_inc atomic64_inc -static __always_inline void atomic64_inc(atomic64_t *v) +#if defined(arch_atomic_inc_return) +static inline int +atomic_inc_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_inc(v); + return arch_atomic_inc_return(v); } +#define atomic_inc_return atomic_inc_return #endif -#ifdef arch_atomic_dec -#define atomic_dec atomic_dec -static __always_inline void atomic_dec(atomic_t *v) +#if defined(arch_atomic_inc_return_acquire) +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#if defined(arch_atomic_inc_return_release) +static inline int +atomic_inc_return_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#if defined(arch_atomic_inc_return_relaxed) +static inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} +#define atomic_inc_return_relaxed atomic_inc_return_relaxed +#endif + +#if defined(arch_atomic_fetch_inc) +static inline int +atomic_fetch_inc(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#if defined(arch_atomic_fetch_inc_acquire) +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#if defined(arch_atomic_fetch_inc_release) +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#if defined(arch_atomic_fetch_inc_relaxed) +static inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} +#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed +#endif + +#if defined(arch_atomic_dec) +static inline void +atomic_dec(atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_dec(v); } +#define atomic_dec atomic_dec #endif -#ifdef atch_atomic64_dec -#define atomic64_dec -static __always_inline void atomic64_dec(atomic64_t *v) +#if defined(arch_atomic_dec_return) +static inline int +atomic_dec_return(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_dec(v); + return arch_atomic_dec_return(v); } +#define atomic_dec_return atomic_dec_return #endif -static __always_inline void atomic_add(int i, atomic_t *v) +#if defined(arch_atomic_dec_return_acquire) +static inline int +atomic_dec_return_acquire(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic_add(i, v); + return arch_atomic_dec_return_acquire(v); } +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif -static __always_inline void atomic64_add(s64 i, atomic64_t *v) +#if defined(arch_atomic_dec_return_release) +static inline int +atomic_dec_return_release(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_add(i, v); + return arch_atomic_dec_return_release(v); } +#define atomic_dec_return_release atomic_dec_return_release +#endif -static __always_inline void atomic_sub(int i, atomic_t *v) +#if defined(arch_atomic_dec_return_relaxed) +static inline int +atomic_dec_return_relaxed(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic_sub(i, v); + return arch_atomic_dec_return_relaxed(v); } +#define atomic_dec_return_relaxed atomic_dec_return_relaxed +#endif -static __always_inline void atomic64_sub(s64 i, atomic64_t *v) +#if defined(arch_atomic_fetch_dec) +static inline int +atomic_fetch_dec(atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_sub(i, v); + return arch_atomic_fetch_dec(v); } +#define atomic_fetch_dec atomic_fetch_dec +#endif -static __always_inline void atomic_and(int i, atomic_t *v) +#if defined(arch_atomic_fetch_dec_acquire) +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#if defined(arch_atomic_fetch_dec_release) +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#if defined(arch_atomic_fetch_dec_relaxed) +static inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} +#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed +#endif + +static inline void +atomic_and(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_and(i, v); } +#define atomic_and atomic_and -static __always_inline void atomic64_and(s64 i, atomic64_t *v) +#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) +static inline int +atomic_fetch_and(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_and(i, v); + return arch_atomic_fetch_and(i, v); +} +#define atomic_fetch_and atomic_fetch_and +#endif + +#if defined(arch_atomic_fetch_and_acquire) +static inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_acquire(i, v); +} +#define atomic_fetch_and_acquire atomic_fetch_and_acquire +#endif + +#if defined(arch_atomic_fetch_and_release) +static inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_release(i, v); +} +#define atomic_fetch_and_release atomic_fetch_and_release +#endif + +#if defined(arch_atomic_fetch_and_relaxed) +static inline int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_and_relaxed(i, v); } +#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed +#endif + +#if defined(arch_atomic_andnot) +static inline void +atomic_andnot(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} +#define atomic_andnot atomic_andnot +#endif -static __always_inline void atomic_or(int i, atomic_t *v) +#if defined(arch_atomic_fetch_andnot) +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#if defined(arch_atomic_fetch_andnot_acquire) +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#if defined(arch_atomic_fetch_andnot_release) +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#if defined(arch_atomic_fetch_andnot_relaxed) +static inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#endif + +static inline void +atomic_or(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_or(i, v); } +#define atomic_or atomic_or -static __always_inline void atomic64_or(s64 i, atomic64_t *v) +#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) +static inline int +atomic_fetch_or(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_or(i, v); + return arch_atomic_fetch_or(i, v); +} +#define atomic_fetch_or atomic_fetch_or +#endif + +#if defined(arch_atomic_fetch_or_acquire) +static inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_acquire(i, v); +} +#define atomic_fetch_or_acquire atomic_fetch_or_acquire +#endif + +#if defined(arch_atomic_fetch_or_release) +static inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_release(i, v); +} +#define atomic_fetch_or_release atomic_fetch_or_release +#endif + +#if defined(arch_atomic_fetch_or_relaxed) +static inline int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_fetch_or_relaxed(i, v); } +#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed +#endif -static __always_inline void atomic_xor(int i, atomic_t *v) +static inline void +atomic_xor(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); arch_atomic_xor(i, v); } +#define atomic_xor atomic_xor -static __always_inline void atomic64_xor(s64 i, atomic64_t *v) +#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) +static inline int +atomic_fetch_xor(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - arch_atomic64_xor(i, v); + return arch_atomic_fetch_xor(i, v); } +#define atomic_fetch_xor atomic_fetch_xor +#endif -#ifdef arch_atomic_inc_return -#define atomic_inc_return atomic_inc_return -static __always_inline int atomic_inc_return(atomic_t *v) +#if defined(arch_atomic_fetch_xor_acquire) +static inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_inc_return(v); + return arch_atomic_fetch_xor_acquire(i, v); } +#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire #endif -#ifdef arch_atomic64_in_return -#define atomic64_inc_return atomic64_inc_return -static __always_inline s64 atomic64_inc_return(atomic64_t *v) +#if defined(arch_atomic_fetch_xor_release) +static inline int +atomic_fetch_xor_release(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_inc_return(v); + return arch_atomic_fetch_xor_release(i, v); } +#define atomic_fetch_xor_release atomic_fetch_xor_release #endif -#ifdef arch_atomic_dec_return -#define atomic_dec_return atomic_dec_return -static __always_inline int atomic_dec_return(atomic_t *v) +#if defined(arch_atomic_fetch_xor_relaxed) +static inline int +atomic_fetch_xor_relaxed(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_dec_return(v); + return arch_atomic_fetch_xor_relaxed(i, v); } +#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #endif -#ifdef arch_atomic64_dec_return -#define atomic64_dec_return atomic64_dec_return -static __always_inline s64 atomic64_dec_return(atomic64_t *v) +#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) +static inline int +atomic_xchg(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_dec_return(v); + return arch_atomic_xchg(v, i); } +#define atomic_xchg atomic_xchg #endif -#ifdef arch_atomic64_inc_not_zero -#define atomic64_inc_not_zero atomic64_inc_not_zero -static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) +#if defined(arch_atomic_xchg_acquire) +static inline int +atomic_xchg_acquire(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_inc_not_zero(v); + return arch_atomic_xchg_acquire(v, i); } +#define atomic_xchg_acquire atomic_xchg_acquire #endif -#ifdef arch_atomic64_dec_if_positive -#define atomic64_dec_if_positive atomic64_dec_if_positive -static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) +#if defined(arch_atomic_xchg_release) +static inline int +atomic_xchg_release(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_dec_if_positive(v); + return arch_atomic_xchg_release(v, i); } +#define atomic_xchg_release atomic_xchg_release #endif -#ifdef arch_atomic_dec_and_test -#define atomic_dec_and_test atomic_dec_and_test -static __always_inline bool atomic_dec_and_test(atomic_t *v) +#if defined(arch_atomic_xchg_relaxed) +static inline int +atomic_xchg_relaxed(atomic_t *v, int i) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_dec_and_test(v); + return arch_atomic_xchg_relaxed(v, i); } +#define atomic_xchg_relaxed atomic_xchg_relaxed #endif -#ifdef arch_atomic64_dec_and_test -#define atomic64_dec_and_test atomic64_dec_and_test -static __always_inline bool atomic64_dec_and_test(atomic64_t *v) +#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_dec_and_test(v); + return arch_atomic_cmpxchg(v, old, new); } +#define atomic_cmpxchg atomic_cmpxchg #endif -#ifdef arch_atomic_inc_and_test -#define atomic_inc_and_test atomic_inc_and_test -static __always_inline bool atomic_inc_and_test(atomic_t *v) +#if defined(arch_atomic_cmpxchg_acquire) +static inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_acquire(v, old, new); +} +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#endif + +#if defined(arch_atomic_cmpxchg_release) +static inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_release(v, old, new); +} +#define atomic_cmpxchg_release atomic_cmpxchg_release +#endif + +#if defined(arch_atomic_cmpxchg_relaxed) +static inline int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed +#endif + +#if defined(arch_atomic_try_cmpxchg) +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#if defined(arch_atomic_try_cmpxchg_acquire) +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_acquire(v, old, new); +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#if defined(arch_atomic_try_cmpxchg_release) +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_release(v, old, new); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#if defined(arch_atomic_try_cmpxchg_relaxed) +static inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +#endif + +#if defined(arch_atomic_sub_and_test) +static inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} +#define atomic_sub_and_test atomic_sub_and_test +#endif + +#if defined(arch_atomic_dec_and_test) +static inline bool +atomic_dec_and_test(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} +#define atomic_dec_and_test atomic_dec_and_test +#endif + +#if defined(arch_atomic_inc_and_test) +static inline bool +atomic_inc_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } +#define atomic_inc_and_test atomic_inc_and_test #endif -#ifdef arch_atomic64_inc_and_test -#define atomic64_inc_and_test atomic64_inc_and_test -static __always_inline bool atomic64_inc_and_test(atomic64_t *v) +#if defined(arch_atomic_add_negative) +static inline bool +atomic_add_negative(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_inc_and_test(v); + return arch_atomic_add_negative(i, v); } +#define atomic_add_negative atomic_add_negative #endif -static __always_inline int atomic_add_return(int i, atomic_t *v) +#if defined(arch_atomic_fetch_add_unless) +static inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_add_return(i, v); + return arch_atomic_fetch_add_unless(v, a, u); +} +#define atomic_fetch_add_unless atomic_fetch_add_unless +#endif + +#if defined(arch_atomic_add_unless) +static inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} +#define atomic_add_unless atomic_add_unless +#endif + +#if defined(arch_atomic_inc_not_zero) +static inline bool +atomic_inc_not_zero(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} +#define atomic_inc_not_zero atomic_inc_not_zero +#endif + +#if defined(arch_atomic_inc_unless_negative) +static inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} +#define atomic_inc_unless_negative atomic_inc_unless_negative +#endif + +#if defined(arch_atomic_dec_unless_positive) +static inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} +#define atomic_dec_unless_positive atomic_dec_unless_positive +#endif + +#if defined(arch_atomic_dec_if_positive) +static inline int +atomic_dec_if_positive(atomic_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} +#define atomic_dec_if_positive atomic_dec_if_positive +#endif + +static inline s64 +atomic64_read(const atomic64_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} +#define atomic64_read atomic64_read + +#if defined(arch_atomic64_read_acquire) +static inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + kasan_check_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} +#define atomic64_read_acquire atomic64_read_acquire +#endif + +static inline void +atomic64_set(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} +#define atomic64_set atomic64_set + +#if defined(arch_atomic64_set_release) +static inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); } +#define atomic64_set_release atomic64_set_release +#endif -static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) +static inline void +atomic64_add(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} +#define atomic64_add atomic64_add + +#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) +static inline s64 +atomic64_add_return(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } +#define atomic64_add_return atomic64_add_return +#endif -static __always_inline int atomic_sub_return(int i, atomic_t *v) +#if defined(arch_atomic64_add_return_acquire) +static inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_sub_return(i, v); + return arch_atomic64_add_return_acquire(i, v); } +#define atomic64_add_return_acquire atomic64_add_return_acquire +#endif -static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) +#if defined(arch_atomic64_add_return_release) +static inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic64_sub_return(i, v); + return arch_atomic64_add_return_release(i, v); } +#define atomic64_add_return_release atomic64_add_return_release +#endif -static __always_inline int atomic_fetch_add(int i, atomic_t *v) +#if defined(arch_atomic64_add_return_relaxed) +static inline s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_add(i, v); + return arch_atomic64_add_return_relaxed(i, v); } +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#endif -static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) +#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) +static inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } +#define atomic64_fetch_add atomic64_fetch_add +#endif -static __always_inline int atomic_fetch_sub(int i, atomic_t *v) +#if defined(arch_atomic64_fetch_add_acquire) +static inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_sub(i, v); + return arch_atomic64_fetch_add_acquire(i, v); } +#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#endif + +#if defined(arch_atomic64_fetch_add_release) +static inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_release(i, v); +} +#define atomic64_fetch_add_release atomic64_fetch_add_release +#endif -static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) +#if defined(arch_atomic64_fetch_add_relaxed) +static inline s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed +#endif + +static inline void +atomic64_sub(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} +#define atomic64_sub atomic64_sub + +#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) +static inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} +#define atomic64_sub_return atomic64_sub_return +#endif + +#if defined(arch_atomic64_sub_return_acquire) +static inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_acquire(i, v); +} +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#endif + +#if defined(arch_atomic64_sub_return_release) +static inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_release(i, v); +} +#define atomic64_sub_return_release atomic64_sub_return_release +#endif + +#if defined(arch_atomic64_sub_return_relaxed) +static inline s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed +#endif + +#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) +static inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } +#define atomic64_fetch_sub atomic64_fetch_sub +#endif -static __always_inline int atomic_fetch_and(int i, atomic_t *v) +#if defined(arch_atomic64_fetch_sub_acquire) +static inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_and(i, v); + return arch_atomic64_fetch_sub_acquire(i, v); +} +#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#endif + +#if defined(arch_atomic64_fetch_sub_release) +static inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_release(i, v); } +#define atomic64_fetch_sub_release atomic64_fetch_sub_release +#endif -static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) +#if defined(arch_atomic64_fetch_sub_relaxed) +static inline s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +#endif + +#if defined(arch_atomic64_inc) +static inline void +atomic64_inc(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} +#define atomic64_inc atomic64_inc +#endif + +#if defined(arch_atomic64_inc_return) +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#if defined(arch_atomic64_inc_return_acquire) +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#if defined(arch_atomic64_inc_return_release) +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#if defined(arch_atomic64_inc_return_relaxed) +static inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} +#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed +#endif + +#if defined(arch_atomic64_fetch_inc) +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#if defined(arch_atomic64_fetch_inc_acquire) +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#if defined(arch_atomic64_fetch_inc_release) +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#if defined(arch_atomic64_fetch_inc_relaxed) +static inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed +#endif + +#if defined(arch_atomic64_dec) +static inline void +atomic64_dec(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} +#define atomic64_dec atomic64_dec +#endif + +#if defined(arch_atomic64_dec_return) +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#if defined(arch_atomic64_dec_return_acquire) +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#if defined(arch_atomic64_dec_return_release) +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#if defined(arch_atomic64_dec_return_relaxed) +static inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} +#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed +#endif + +#if defined(arch_atomic64_fetch_dec) +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#if defined(arch_atomic64_fetch_dec_acquire) +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#if defined(arch_atomic64_fetch_dec_release) +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#if defined(arch_atomic64_fetch_dec_relaxed) +static inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed +#endif + +static inline void +atomic64_and(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} +#define atomic64_and atomic64_and + +#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) +static inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } +#define atomic64_fetch_and atomic64_fetch_and +#endif -static __always_inline int atomic_fetch_or(int i, atomic_t *v) +#if defined(arch_atomic64_fetch_and_acquire) +static inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_or(i, v); + return arch_atomic64_fetch_and_acquire(i, v); +} +#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#endif + +#if defined(arch_atomic64_fetch_and_release) +static inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_release(i, v); +} +#define atomic64_fetch_and_release atomic64_fetch_and_release +#endif + +#if defined(arch_atomic64_fetch_and_relaxed) +static inline s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed +#endif + +#if defined(arch_atomic64_andnot) +static inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} +#define atomic64_andnot atomic64_andnot +#endif + +#if defined(arch_atomic64_fetch_andnot) +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#if defined(arch_atomic64_fetch_andnot_acquire) +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#if defined(arch_atomic64_fetch_andnot_release) +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#if defined(arch_atomic64_fetch_andnot_relaxed) +static inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#endif + +static inline void +atomic64_or(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_or(i, v); } +#define atomic64_or atomic64_or -static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) +#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) +static inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } +#define atomic64_fetch_or atomic64_fetch_or +#endif -static __always_inline int atomic_fetch_xor(int i, atomic_t *v) +#if defined(arch_atomic64_fetch_or_acquire) +static inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_fetch_xor(i, v); + return arch_atomic64_fetch_or_acquire(i, v); +} +#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#endif + +#if defined(arch_atomic64_fetch_or_release) +static inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_release(i, v); +} +#define atomic64_fetch_or_release atomic64_fetch_or_release +#endif + +#if defined(arch_atomic64_fetch_or_relaxed) +static inline s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed +#endif + +static inline void +atomic64_xor(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); } +#define atomic64_xor atomic64_xor -static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) +#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) +static inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } +#define atomic64_fetch_xor atomic64_fetch_xor +#endif -#ifdef arch_atomic_sub_and_test -#define atomic_sub_and_test atomic_sub_and_test -static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) +#if defined(arch_atomic64_fetch_xor_acquire) +static inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_sub_and_test(i, v); + return arch_atomic64_fetch_xor_acquire(i, v); } +#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire #endif -#ifdef arch_atomic64_sub_and_test -#define atomic64_sub_and_test atomic64_sub_and_test -static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) +#if defined(arch_atomic64_fetch_xor_release) +static inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_release(i, v); +} +#define atomic64_fetch_xor_release atomic64_fetch_xor_release +#endif + +#if defined(arch_atomic64_fetch_xor_relaxed) +static inline s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +#endif + +#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) +static inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} +#define atomic64_xchg atomic64_xchg +#endif + +#if defined(arch_atomic64_xchg_acquire) +static inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_acquire(v, i); +} +#define atomic64_xchg_acquire atomic64_xchg_acquire +#endif + +#if defined(arch_atomic64_xchg_release) +static inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_release(v, i); +} +#define atomic64_xchg_release atomic64_xchg_release +#endif + +#if defined(arch_atomic64_xchg_relaxed) +static inline s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_xchg_relaxed(v, i); +} +#define atomic64_xchg_relaxed atomic64_xchg_relaxed +#endif + +#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) +static inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} +#define atomic64_cmpxchg atomic64_cmpxchg +#endif + +#if defined(arch_atomic64_cmpxchg_acquire) +static inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_acquire(v, old, new); +} +#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#endif + +#if defined(arch_atomic64_cmpxchg_release) +static inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_release(v, old, new); +} +#define atomic64_cmpxchg_release atomic64_cmpxchg_release +#endif + +#if defined(arch_atomic64_cmpxchg_relaxed) +static inline s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed +#endif + +#if defined(arch_atomic64_try_cmpxchg) +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#if defined(arch_atomic64_try_cmpxchg_acquire) +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#if defined(arch_atomic64_try_cmpxchg_release) +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_release(v, old, new); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#if defined(arch_atomic64_try_cmpxchg_relaxed) +static inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + kasan_check_write(v, sizeof(*v)); + kasan_check_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +#endif + +#if defined(arch_atomic64_sub_and_test) +static inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } +#define atomic64_sub_and_test atomic64_sub_and_test #endif -#ifdef arch_atomic_add_negative -#define atomic_add_negative atomic_add_negative -static __always_inline bool atomic_add_negative(int i, atomic_t *v) +#if defined(arch_atomic64_dec_and_test) +static inline bool +atomic64_dec_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); - return arch_atomic_add_negative(i, v); + return arch_atomic64_dec_and_test(v); } +#define atomic64_dec_and_test atomic64_dec_and_test #endif -#ifdef arch_atomic64_add_negative -#define atomic64_add_negative atomic64_add_negative -static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) +#if defined(arch_atomic64_inc_and_test) +static inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} +#define atomic64_inc_and_test atomic64_inc_and_test +#endif + +#if defined(arch_atomic64_add_negative) +static inline bool +atomic64_add_negative(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } +#define atomic64_add_negative atomic64_add_negative +#endif + +#if defined(arch_atomic64_fetch_add_unless) +static inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#endif + +#if defined(arch_atomic64_add_unless) +static inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} +#define atomic64_add_unless atomic64_add_unless +#endif + +#if defined(arch_atomic64_inc_not_zero) +static inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} +#define atomic64_inc_not_zero atomic64_inc_not_zero +#endif + +#if defined(arch_atomic64_inc_unless_negative) +static inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +#endif + +#if defined(arch_atomic64_dec_unless_positive) +static inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +#endif + +#if defined(arch_atomic64_dec_if_positive) +static inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + kasan_check_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} +#define atomic64_dec_if_positive atomic64_dec_if_positive +#endif + +#if !defined(arch_xchg_relaxed) || defined(arch_xchg) +#define xchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_acquire) +#define xchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_release) +#define xchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_release(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_xchg_relaxed) +#define xchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg) +#define cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg_acquire) +#define cmpxchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) +#endif + +#if defined(arch_cmpxchg_release) +#define cmpxchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ +}) #endif -#define xchg(ptr, new) \ +#if defined(arch_cmpxchg_relaxed) +#define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg(__ai_ptr, (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) +#endif -#define cmpxchg(ptr, old, new) \ +#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64) +#define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg(__ai_ptr, (old), (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) +#endif -#define sync_cmpxchg(ptr, old, new) \ +#if defined(arch_cmpxchg64_acquire) +#define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_sync_cmpxchg(__ai_ptr, (old), (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) +#endif -#define cmpxchg_local(ptr, old, new) \ +#if defined(arch_cmpxchg64_release) +#define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_local(__ai_ptr, (old), (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) +#endif -#define cmpxchg64(ptr, old, new) \ +#if defined(arch_cmpxchg64_relaxed) +#define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64(__ai_ptr, (old), (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) +#endif -#define cmpxchg64_local(ptr, old, new) \ +#define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_local(__ai_ptr, (old), (new)); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) -#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ +#define cmpxchg64_local(ptr, ...) \ ({ \ - typeof(p1) __ai_p1 = (p1); \ - kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ - arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) -#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ -({ \ - typeof(p1) __ai_p1 = (p1); \ - kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \ - arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \ +#define sync_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_double(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ +}) + + +#define cmpxchg_double_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) -#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ +// b29b625d5de9280f680e42c7be859b55b15e5f6a diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 87d14476edc2..881c7e27af28 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -1,269 +1,1013 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H -/* - * Copyright (C) 2005 Silicon Graphics, Inc. - * Christoph Lameter - * - * Allows to provide arch independent atomic definitions without the need to - * edit all arch specific atomic.h files. - */ #include <asm/types.h> -/* - * Suppport for atomic_long_t - * - * Casts for parameters are avoided for existing atomic functions in order to - * avoid issues with cast-as-lval under gcc 4.x and other limitations that the - * macros of a platform may have. - */ +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif -#if BITS_PER_LONG == 64 +#ifdef CONFIG_64BIT -typedef atomic64_t atomic_long_t; +static inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic64_read(v); +} -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) -#define ATOMIC_LONG_PFX(x) atomic64 ## x -#define ATOMIC_LONG_TYPE s64 +static inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic64_read_acquire(v); +} -#else +static inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic64_set(v, i); +} -typedef atomic_t atomic_long_t; +static inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic64_set_release(v, i); +} -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -#define ATOMIC_LONG_PFX(x) atomic ## x -#define ATOMIC_LONG_TYPE int +static inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic64_add(i, v); +} -#endif +static inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic64_add_return(i, v); +} + +static inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_add_return_acquire(i, v); +} + +static inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic64_add_return_release(i, v); +} + +static inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_add_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic64_fetch_add(i, v); +} + +static inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_acquire(i, v); +} + +static inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_release(i, v); +} + +static inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_relaxed(i, v); +} + +static inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic64_sub(i, v); +} + +static inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic64_sub_return(i, v); +} + +static inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_sub_return_acquire(i, v); +} + +static inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic64_sub_return_release(i, v); +} + +static inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_sub_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub(i, v); +} + +static inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_acquire(i, v); +} + +static inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_release(i, v); +} + +static inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_relaxed(i, v); +} + +static inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic64_inc(v); +} + +static inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic64_inc_return(v); +} + +static inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic64_inc_return_acquire(v); +} + +static inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic64_inc_return_release(v); +} + +static inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic64_inc_return_relaxed(v); +} + +static inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic64_fetch_inc(v); +} + +static inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic64_fetch_inc_acquire(v); +} + +static inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic64_fetch_inc_release(v); +} + +static inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_inc_relaxed(v); +} + +static inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic64_dec(v); +} + +static inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic64_dec_return(v); +} + +static inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic64_dec_return_acquire(v); +} + +static inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic64_dec_return_release(v); +} + +static inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic64_dec_return_relaxed(v); +} + +static inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic64_fetch_dec(v); +} + +static inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic64_fetch_dec_acquire(v); +} + +static inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic64_fetch_dec_release(v); +} + +static inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_dec_relaxed(v); +} + +static inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic64_and(i, v); +} + +static inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic64_fetch_and(i, v); +} + +static inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_acquire(i, v); +} + +static inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_release(i, v); +} + +static inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_relaxed(i, v); +} + +static inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic64_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_acquire(i, v); +} + +static inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_release(i, v); +} + +static inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_relaxed(i, v); +} + +static inline void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic64_or(i, v); +} + +static inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic64_fetch_or(i, v); +} + +static inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_acquire(i, v); +} + +static inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_release(i, v); +} + +static inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_relaxed(i, v); +} + +static inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic64_xor(i, v); +} + +static inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor(i, v); +} + +static inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_acquire(i, v); +} + +static inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_release(i, v); +} + +static inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_relaxed(i, v); +} + +static inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic64_xchg(v, i); +} + +static inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic64_xchg_acquire(v, i); +} + +static inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic64_xchg_release(v, i); +} + +static inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic64_xchg_relaxed(v, i); +} + +static inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg(v, old, new); +} + +static inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_acquire(v, old, new); +} + +static inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_release(v, old, new); +} + +static inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_relaxed(v, old, new); +} + +static inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic64_sub_and_test(i, v); +} + +static inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic64_dec_and_test(v); +} + +static inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic64_inc_and_test(v); +} + +static inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic64_add_negative(i, v); +} + +static inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_fetch_add_unless(v, a, u); +} + +static inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_add_unless(v, a, u); +} + +static inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic64_inc_not_zero(v); +} + +static inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic64_inc_unless_negative(v); +} + +static inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic64_dec_unless_positive(v); +} + +static inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static inline long +atomic_long_read(const atomic_long_t *v) +{ + return atomic_read(v); +} + +static inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic_read_acquire(v); +} + +static inline void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic_set(v, i); +} + +static inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic_set_release(v, i); +} + +static inline void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic_add(i, v); +} + +static inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic_add_return(i, v); +} + +static inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic_add_return_acquire(i, v); +} + +static inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic_add_return_release(i, v); +} + +static inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_add_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic_fetch_add(i, v); +} + +static inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_add_acquire(i, v); +} + +static inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic_fetch_add_release(i, v); +} + +static inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_add_relaxed(i, v); +} + +static inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic_sub(i, v); +} + +static inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic_sub_return(i, v); +} + +static inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic_sub_return_acquire(i, v); +} + +static inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic_sub_return_release(i, v); +} + +static inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic_sub_return_relaxed(i, v); +} + +static inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic_fetch_sub(i, v); +} + +static inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_acquire(i, v); +} + +static inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_release(i, v); +} + +static inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_sub_relaxed(i, v); +} + +static inline void +atomic_long_inc(atomic_long_t *v) +{ + atomic_inc(v); +} + +static inline long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic_inc_return(v); +} + +static inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic_inc_return_acquire(v); +} + +static inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic_inc_return_release(v); +} + +static inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic_inc_return_relaxed(v); +} + +static inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic_fetch_inc(v); +} + +static inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic_fetch_inc_acquire(v); +} + +static inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic_fetch_inc_release(v); +} + +static inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic_fetch_inc_relaxed(v); +} + +static inline void +atomic_long_dec(atomic_long_t *v) +{ + atomic_dec(v); +} + +static inline long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic_dec_return(v); +} + +static inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic_dec_return_acquire(v); +} + +static inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic_dec_return_release(v); +} + +static inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic_dec_return_relaxed(v); +} + +static inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic_fetch_dec(v); +} + +static inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic_fetch_dec_acquire(v); +} + +static inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic_fetch_dec_release(v); +} + +static inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic_fetch_dec_relaxed(v); +} + +static inline void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic_and(i, v); +} + +static inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic_fetch_and(i, v); +} + +static inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_and_acquire(i, v); +} + +static inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic_fetch_and_release(i, v); +} + +static inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_and_relaxed(i, v); +} + +static inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot(i, v); +} + +static inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_acquire(i, v); +} + +static inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_release(i, v); +} + +static inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_andnot_relaxed(i, v); +} -#define ATOMIC_LONG_READ_OP(mo) \ -static inline long atomic_long_read##mo(const atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - return (long)ATOMIC_LONG_PFX(_read##mo)(v); \ -} -ATOMIC_LONG_READ_OP() -ATOMIC_LONG_READ_OP(_acquire) - -#undef ATOMIC_LONG_READ_OP - -#define ATOMIC_LONG_SET_OP(mo) \ -static inline void atomic_long_set##mo(atomic_long_t *l, long i) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - ATOMIC_LONG_PFX(_set##mo)(v, i); \ -} -ATOMIC_LONG_SET_OP() -ATOMIC_LONG_SET_OP(_release) - -#undef ATOMIC_LONG_SET_OP - -#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \ -static inline long \ -atomic_long_##op##_return##mo(long i, atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \ -} -ATOMIC_LONG_ADD_SUB_OP(add,) -ATOMIC_LONG_ADD_SUB_OP(add, _relaxed) -ATOMIC_LONG_ADD_SUB_OP(add, _acquire) -ATOMIC_LONG_ADD_SUB_OP(add, _release) -ATOMIC_LONG_ADD_SUB_OP(sub,) -ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed) -ATOMIC_LONG_ADD_SUB_OP(sub, _acquire) -ATOMIC_LONG_ADD_SUB_OP(sub, _release) - -#undef ATOMIC_LONG_ADD_SUB_OP - -#define atomic_long_cmpxchg_relaxed(l, old, new) \ - (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ - (old), (new))) -#define atomic_long_cmpxchg_acquire(l, old, new) \ - (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ - (old), (new))) -#define atomic_long_cmpxchg_release(l, old, new) \ - (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ - (old), (new))) -#define atomic_long_cmpxchg(l, old, new) \ - (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) - - -#define atomic_long_try_cmpxchg_relaxed(l, old, new) \ - (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ - (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) -#define atomic_long_try_cmpxchg_acquire(l, old, new) \ - (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ - (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) -#define atomic_long_try_cmpxchg_release(l, old, new) \ - (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ - (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) -#define atomic_long_try_cmpxchg(l, old, new) \ - (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ - (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) - - -#define atomic_long_xchg_relaxed(v, new) \ - (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) -#define atomic_long_xchg_acquire(v, new) \ - (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new))) -#define atomic_long_xchg_release(v, new) \ - (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new))) -#define atomic_long_xchg(v, new) \ - (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) - -static __always_inline void atomic_long_inc(atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - ATOMIC_LONG_PFX(_inc)(v); -} - -static __always_inline void atomic_long_dec(atomic_long_t *l) +static inline void +atomic_long_or(long i, atomic_long_t *v) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + atomic_or(i, v); +} - ATOMIC_LONG_PFX(_dec)(v); +static inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic_fetch_or(i, v); } -#define ATOMIC_LONG_FETCH_OP(op, mo) \ -static inline long \ -atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ +static inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_or_acquire(i, v); } -ATOMIC_LONG_FETCH_OP(add, ) -ATOMIC_LONG_FETCH_OP(add, _relaxed) -ATOMIC_LONG_FETCH_OP(add, _acquire) -ATOMIC_LONG_FETCH_OP(add, _release) -ATOMIC_LONG_FETCH_OP(sub, ) -ATOMIC_LONG_FETCH_OP(sub, _relaxed) -ATOMIC_LONG_FETCH_OP(sub, _acquire) -ATOMIC_LONG_FETCH_OP(sub, _release) -ATOMIC_LONG_FETCH_OP(and, ) -ATOMIC_LONG_FETCH_OP(and, _relaxed) -ATOMIC_LONG_FETCH_OP(and, _acquire) -ATOMIC_LONG_FETCH_OP(and, _release) -ATOMIC_LONG_FETCH_OP(andnot, ) -ATOMIC_LONG_FETCH_OP(andnot, _relaxed) -ATOMIC_LONG_FETCH_OP(andnot, _acquire) -ATOMIC_LONG_FETCH_OP(andnot, _release) -ATOMIC_LONG_FETCH_OP(or, ) -ATOMIC_LONG_FETCH_OP(or, _relaxed) -ATOMIC_LONG_FETCH_OP(or, _acquire) -ATOMIC_LONG_FETCH_OP(or, _release) -ATOMIC_LONG_FETCH_OP(xor, ) -ATOMIC_LONG_FETCH_OP(xor, _relaxed) -ATOMIC_LONG_FETCH_OP(xor, _acquire) -ATOMIC_LONG_FETCH_OP(xor, _release) +static inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic_fetch_or_release(i, v); +} -#undef ATOMIC_LONG_FETCH_OP +static inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_or_relaxed(i, v); +} -#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo) \ -static inline long \ -atomic_long_fetch_##op##mo(atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v); \ +static inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic_xor(i, v); } -ATOMIC_LONG_FETCH_INC_DEC_OP(inc,) -ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed) -ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire) -ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release) -ATOMIC_LONG_FETCH_INC_DEC_OP(dec,) -ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed) -ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire) -ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release) +static inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic_fetch_xor(i, v); +} -#undef ATOMIC_LONG_FETCH_INC_DEC_OP +static inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_acquire(i, v); +} -#define ATOMIC_LONG_OP(op) \ -static __always_inline void \ -atomic_long_##op(long i, atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - ATOMIC_LONG_PFX(_##op)(i, v); \ +static inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_release(i, v); } -ATOMIC_LONG_OP(add) -ATOMIC_LONG_OP(sub) -ATOMIC_LONG_OP(and) -ATOMIC_LONG_OP(andnot) -ATOMIC_LONG_OP(or) -ATOMIC_LONG_OP(xor) +static inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic_fetch_xor_relaxed(i, v); +} -#undef ATOMIC_LONG_OP +static inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic_xchg(v, i); +} -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +static inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + return atomic_xchg_acquire(v, i); +} - return ATOMIC_LONG_PFX(_sub_and_test)(i, v); +static inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic_xchg_release(v, i); } -static inline int atomic_long_dec_and_test(atomic_long_t *l) +static inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + return atomic_xchg_relaxed(v, i); +} - return ATOMIC_LONG_PFX(_dec_and_test)(v); +static inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg(v, old, new); } -static inline int atomic_long_inc_and_test(atomic_long_t *l) +static inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + return atomic_cmpxchg_acquire(v, old, new); +} - return ATOMIC_LONG_PFX(_inc_and_test)(v); +static inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic_cmpxchg_release(v, old, new); } -static inline int atomic_long_add_negative(long i, atomic_long_t *l) +static inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + return atomic_cmpxchg_relaxed(v, old, new); +} - return ATOMIC_LONG_PFX(_add_negative)(i, v); +static inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg(v, (int *)old, new); } -#define ATOMIC_LONG_INC_DEC_OP(op, mo) \ -static inline long \ -atomic_long_##op##_return##mo(atomic_long_t *l) \ -{ \ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ - \ - return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ +static inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_acquire(v, (int *)old, new); } -ATOMIC_LONG_INC_DEC_OP(inc,) -ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) -ATOMIC_LONG_INC_DEC_OP(inc, _acquire) -ATOMIC_LONG_INC_DEC_OP(inc, _release) -ATOMIC_LONG_INC_DEC_OP(dec,) -ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) -ATOMIC_LONG_INC_DEC_OP(dec, _acquire) -ATOMIC_LONG_INC_DEC_OP(dec, _release) -#undef ATOMIC_LONG_INC_DEC_OP +static inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic_sub_and_test(i, v); +} + +static inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic_dec_and_test(v); +} + +static inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic_inc_and_test(v); +} + +static inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic_add_negative(i, v); +} + +static inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_fetch_add_unless(v, a, u); +} + +static inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic_add_unless(v, a, u); +} -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +static inline bool +atomic_long_inc_not_zero(atomic_long_t *v) { - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; + return atomic_inc_not_zero(v); +} - return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u); +static inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic_inc_unless_negative(v); } -#define atomic_long_inc_not_zero(l) \ - ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) +static inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic_dec_unless_positive(v); +} -#define atomic_long_cond_read_relaxed(v, c) \ - ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c)) -#define atomic_long_cond_read_acquire(v, c) \ - ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) +static inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic_dec_if_positive(v); +} -#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +#endif /* CONFIG_64BIT */ +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +// 77558968132ce4f911ad53f6f52ce423006f6268 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 20561a60db9c..0e9bd9c83870 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -211,9 +211,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint, /* * WARN_ON_SMP() is for cases that the warning is either * meaningless for !SMP or may even cause failures. - * This is usually used for cases that we have - * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() - * returns 0 for uniprocessor settings. * It can also be used with values that are only defined * on SMP: * diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h index 27bf3377b0cb..fe801f01625e 100644 --- a/include/asm-generic/page.h +++ b/include/asm-generic/page.h @@ -7,7 +7,7 @@ */ #ifdef CONFIG_MMU -#error need to prove a real asm/page.h +#error need to provide a real asm/page.h #endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index bdb0d5548f39..a3184416ddc5 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -57,8 +57,7 @@ #include <linux/workqueue.h> #include <linux/dma-fence.h> #include <linux/module.h> - -#include <asm/mman.h> +#include <linux/mman.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> diff --git a/include/dt-bindings/clock/r8a7778-clock.h b/include/dt-bindings/clock/r8a7778-clock.h index f6b07c5399de..d0bff9ec5c66 100644 --- a/include/dt-bindings/clock/r8a7778-clock.h +++ b/include/dt-bindings/clock/r8a7778-clock.h @@ -30,6 +30,8 @@ #define R8A7778_CLK_SCIF3 23 #define R8A7778_CLK_SCIF4 22 #define R8A7778_CLK_SCIF5 21 +#define R8A7778_CLK_HSCIF0 19 +#define R8A7778_CLK_HSCIF1 18 #define R8A7778_CLK_TMU0 16 #define R8A7778_CLK_TMU1 15 #define R8A7778_CLK_TMU2 14 diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h index 15d531aa6e78..ef4a7f944848 100644 --- a/include/dt-bindings/power/mt8173-power.h +++ b/include/dt-bindings/power/mt8173-power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H -#define _DT_BINDINGS_POWER_MT8183_POWER_H +#ifndef _DT_BINDINGS_POWER_MT8173_POWER_H +#define _DT_BINDINGS_POWER_MT8173_POWER_H #define MT8173_POWER_DOMAIN_VDEC 0 #define MT8173_POWER_DOMAIN_VENC 1 @@ -13,4 +13,4 @@ #define MT8173_POWER_DOMAIN_MFG_2D 8 #define MT8173_POWER_DOMAIN_MFG 9 -#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */ +#endif /* _DT_BINDINGS_POWER_MT8173_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h new file mode 100644 index 000000000000..87d9c6611682 --- /dev/null +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ + +#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H +#define _DT_BINDINGS_POWER_QCOM_RPMPD_H + +/* SDM845 Power Domain Indexes */ +#define SDM845_EBI 0 +#define SDM845_MX 1 +#define SDM845_MX_AO 2 +#define SDM845_CX 3 +#define SDM845_CX_AO 4 +#define SDM845_LMX 5 +#define SDM845_LCX 6 +#define SDM845_GFX 7 +#define SDM845_MSS 8 + +/* SDM845 Power Domain performance levels */ +#define RPMH_REGULATOR_LEVEL_RETENTION 16 +#define RPMH_REGULATOR_LEVEL_MIN_SVS 48 +#define RPMH_REGULATOR_LEVEL_LOW_SVS 64 +#define RPMH_REGULATOR_LEVEL_SVS 128 +#define RPMH_REGULATOR_LEVEL_SVS_L1 192 +#define RPMH_REGULATOR_LEVEL_NOM 256 +#define RPMH_REGULATOR_LEVEL_NOM_L1 320 +#define RPMH_REGULATOR_LEVEL_NOM_L2 336 +#define RPMH_REGULATOR_LEVEL_TURBO 384 +#define RPMH_REGULATOR_LEVEL_TURBO_L1 416 + +/* MSM8996 Power Domain Indexes */ +#define MSM8996_VDDCX 0 +#define MSM8996_VDDCX_AO 1 +#define MSM8996_VDDCX_VFC 2 +#define MSM8996_VDDMX 3 +#define MSM8996_VDDMX_AO 4 +#define MSM8996_VDDSSCX 5 +#define MSM8996_VDDSSCX_VFC 6 + +#endif diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h new file mode 100644 index 000000000000..0d9a412fd5e0 --- /dev/null +++ b/include/dt-bindings/power/xlnx-zynqmp-power.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_ZYNQMP_POWER_H +#define _DT_BINDINGS_ZYNQMP_POWER_H + +#define PD_USB_0 22 +#define PD_USB_1 23 +#define PD_TTC_0 24 +#define PD_TTC_1 25 +#define PD_TTC_2 26 +#define PD_TTC_3 27 +#define PD_SATA 28 +#define PD_ETH_0 29 +#define PD_ETH_1 30 +#define PD_ETH_2 31 +#define PD_ETH_3 32 +#define PD_UART_0 33 +#define PD_UART_1 34 +#define PD_SPI_0 35 +#define PD_SPI_1 36 +#define PD_I2C_0 37 +#define PD_I2C_1 38 +#define PD_SD_0 39 +#define PD_SD_1 40 +#define PD_DP 41 +#define PD_GDMA 42 +#define PD_ADMA 43 +#define PD_NAND 44 +#define PD_QSPI 45 +#define PD_GPIO 46 +#define PD_CAN_0 47 +#define PD_CAN_1 48 +#define PD_GPU 58 +#define PD_PCIE 59 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h new file mode 100644 index 000000000000..8063e8314eef --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) 2019 BayLibre, SAS. + * Author: Jerome Brunet <jbrunet@baylibre.com> + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_G12A_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +/* 1 */ +#define RESET_DOS 2 +/* 3-4 */ +#define RESET_VIU 5 +#define RESET_AFIFO 6 +#define RESET_VID_PLL_DIV 7 +/* 8-9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +#define RESET_PCIE_CTRL_A 12 +#define RESET_VCBUS 13 +#define RESET_PCIE_PHY 14 +#define RESET_PCIE_APB 15 +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18 */ +#define RESET_HDMITX_CAPB3 19 +#define RESET_DVALIN_CAPB3 20 +#define RESET_DOS_CAPB3 21 +/* 22 */ +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +/* 27-31 */ +/* RESET1 */ +/* 32 */ +#define RESET_DEMUX 33 +#define RESET_USB 34 +#define RESET_DDR 35 +/* 36 */ +#define RESET_BT656 37 +#define RESET_AHB_SRAM 38 +/* 39 */ +#define RESET_PARSER 40 +/* 41 */ +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +#define RESET_SD_EMMC_A 44 +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +/* 47-60 */ +#define RESET_AUDIO_CODEC 61 +/* 62-63 */ +/* RESET2 */ +/* 64 */ +#define RESET_AUDIO 65 +#define RESET_HDMITX_PHY 66 +/* 67 */ +#define RESET_MIPI_DSI_HOST 68 +#define RESET_ALOCKER 69 +#define RESET_GE2D 70 +#define RESET_PARSER_REG 71 +#define RESET_PARSER_FETCH 72 +#define RESET_CTL 73 +#define RESET_PARSER_TOP 74 +/* 75-77 */ +#define RESET_DVALIN 78 +#define RESET_HDMITX 79 +/* 80-95 */ +/* RESET3 */ +/* 96-95 */ +#define RESET_DEMUX_TOP 105 +#define RESET_DEMUX_DES_PL 106 +#define RESET_DEMUX_S2P_0 107 +#define RESET_DEMUX_S2P_1 108 +#define RESET_DEMUX_0 109 +#define RESET_DEMUX_1 110 +#define RESET_DEMUX_2 111 +/* 112-127 */ +/* RESET4 */ +/* 128-129 */ +#define RESET_MIPI_DSI_PHY 130 +/* 131-132 */ +#define RESET_RDMA 133 +#define RESET_VENCI 134 +#define RESET_VENCP 135 +/* 136 */ +#define RESET_VDAC 137 +/* 138-139 */ +#define RESET_VDI6 140 +#define RESET_VENCL 141 +#define RESET_I2C_M1 142 +#define RESET_I2C_M2 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_GEN 192 +#define RESET_SPICC0 193 +#define RESET_SC 194 +#define RESET_SANA_3 195 +#define RESET_I2C_M0 196 +#define RESET_TS_PLL 197 +#define RESET_SPICC1 198 +#define RESET_STREAM 199 +#define RESET_TS_CPU 200 +#define RESET_UART0 201 +#define RESET_UART1_2 202 +#define RESET_ASYNC0 203 +#define RESET_ASYNC1 204 +#define RESET_SPIFC0 205 +#define RESET_I2C_M3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +#define RESET_TS_GPU 228 +#define RESET_DEVICE_MMC_ARB 229 +#define RESET_DVALIN_DMC_PIPL 230 +#define RESET_VID_LOCK 231 +#define RESET_NIC_DMC_PIPL 232 +#define RESET_DMC_VPU_PIPL 233 +#define RESET_GE2D_DMC_PIPL 234 +#define RESET_HCODEC_DMC_PIPL 235 +#define RESET_WAVE420_DMC_PIPL 236 +#define RESET_HEVCF_DMC_PIPL 237 +/* 238-255 */ + +#endif diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h new file mode 100644 index 000000000000..57c592498aa0 --- /dev/null +++ b/include/dt-bindings/reset/imx8mq-reset.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Zodiac Inflight Innovations + * + * Author: Andrey Smirnov <andrew.smirnov@gmail.com> + */ + +#ifndef DT_BINDING_RESET_IMX8MQ_H +#define DT_BINDING_RESET_IMX8MQ_H + +#define IMX8MQ_RESET_A53_CORE_POR_RESET0 0 +#define IMX8MQ_RESET_A53_CORE_POR_RESET1 1 +#define IMX8MQ_RESET_A53_CORE_POR_RESET2 2 +#define IMX8MQ_RESET_A53_CORE_POR_RESET3 3 +#define IMX8MQ_RESET_A53_CORE_RESET0 4 +#define IMX8MQ_RESET_A53_CORE_RESET1 5 +#define IMX8MQ_RESET_A53_CORE_RESET2 6 +#define IMX8MQ_RESET_A53_CORE_RESET3 7 +#define IMX8MQ_RESET_A53_DBG_RESET0 8 +#define IMX8MQ_RESET_A53_DBG_RESET1 9 +#define IMX8MQ_RESET_A53_DBG_RESET2 10 +#define IMX8MQ_RESET_A53_DBG_RESET3 11 +#define IMX8MQ_RESET_A53_ETM_RESET0 12 +#define IMX8MQ_RESET_A53_ETM_RESET1 13 +#define IMX8MQ_RESET_A53_ETM_RESET2 14 +#define IMX8MQ_RESET_A53_ETM_RESET3 15 +#define IMX8MQ_RESET_A53_SOC_DBG_RESET 16 +#define IMX8MQ_RESET_A53_L2RESET 17 +#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18 +#define IMX8MQ_RESET_OTG1_PHY_RESET 19 +#define IMX8MQ_RESET_OTG2_PHY_RESET 20 +#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 +#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 +#define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N 23 +#define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N 24 +#define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N 25 +#define IMX8MQ_RESET_PCIEPHY 26 +#define IMX8MQ_RESET_PCIEPHY_PERST 27 +#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 +#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 +#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 +#define IMX8MQ_RESET_DISP_RESET 31 +#define IMX8MQ_RESET_GPU_RESET 32 +#define IMX8MQ_RESET_VPU_RESET 33 +#define IMX8MQ_RESET_PCIEPHY2 34 +#define IMX8MQ_RESET_PCIEPHY2_PERST 35 +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 +#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 +#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 +#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 +#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 +#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 +#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 +#define IMX8MQ_RESET_DDRC1_PRST 44 +#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 +#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 +#define IMX8MQ_RESET_DDRC2_PRST 47 +#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 +#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 + +#define IMX8MQ_RESET_NUM 50 + +#endif diff --git a/include/dt-bindings/reset/xlnx-zynqmp-resets.h b/include/dt-bindings/reset/xlnx-zynqmp-resets.h new file mode 100644 index 000000000000..d44525b9f8db --- /dev/null +++ b/include/dt-bindings/reset/xlnx-zynqmp-resets.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_ZYNQMP_RESETS_H +#define _DT_BINDINGS_ZYNQMP_RESETS_H + +#define ZYNQMP_RESET_PCIE_CFG 0 +#define ZYNQMP_RESET_PCIE_BRIDGE 1 +#define ZYNQMP_RESET_PCIE_CTRL 2 +#define ZYNQMP_RESET_DP 3 +#define ZYNQMP_RESET_SWDT_CRF 4 +#define ZYNQMP_RESET_AFI_FM5 5 +#define ZYNQMP_RESET_AFI_FM4 6 +#define ZYNQMP_RESET_AFI_FM3 7 +#define ZYNQMP_RESET_AFI_FM2 8 +#define ZYNQMP_RESET_AFI_FM1 9 +#define ZYNQMP_RESET_AFI_FM0 10 +#define ZYNQMP_RESET_GDMA 11 +#define ZYNQMP_RESET_GPU_PP1 12 +#define ZYNQMP_RESET_GPU_PP0 13 +#define ZYNQMP_RESET_GPU 14 +#define ZYNQMP_RESET_GT 15 +#define ZYNQMP_RESET_SATA 16 +#define ZYNQMP_RESET_ACPU3_PWRON 17 +#define ZYNQMP_RESET_ACPU2_PWRON 18 +#define ZYNQMP_RESET_ACPU1_PWRON 19 +#define ZYNQMP_RESET_ACPU0_PWRON 20 +#define ZYNQMP_RESET_APU_L2 21 +#define ZYNQMP_RESET_ACPU3 22 +#define ZYNQMP_RESET_ACPU2 23 +#define ZYNQMP_RESET_ACPU1 24 +#define ZYNQMP_RESET_ACPU0 25 +#define ZYNQMP_RESET_DDR 26 +#define ZYNQMP_RESET_APM_FPD 27 +#define ZYNQMP_RESET_SOFT 28 +#define ZYNQMP_RESET_GEM0 29 +#define ZYNQMP_RESET_GEM1 30 +#define ZYNQMP_RESET_GEM2 31 +#define ZYNQMP_RESET_GEM3 32 +#define ZYNQMP_RESET_QSPI 33 +#define ZYNQMP_RESET_UART0 34 +#define ZYNQMP_RESET_UART1 35 +#define ZYNQMP_RESET_SPI0 36 +#define ZYNQMP_RESET_SPI1 37 +#define ZYNQMP_RESET_SDIO0 38 +#define ZYNQMP_RESET_SDIO1 39 +#define ZYNQMP_RESET_CAN0 40 +#define ZYNQMP_RESET_CAN1 41 +#define ZYNQMP_RESET_I2C0 42 +#define ZYNQMP_RESET_I2C1 43 +#define ZYNQMP_RESET_TTC0 44 +#define ZYNQMP_RESET_TTC1 45 +#define ZYNQMP_RESET_TTC2 46 +#define ZYNQMP_RESET_TTC3 47 +#define ZYNQMP_RESET_SWDT_CRL 48 +#define ZYNQMP_RESET_NAND 49 +#define ZYNQMP_RESET_ADMA 50 +#define ZYNQMP_RESET_GPIO 51 +#define ZYNQMP_RESET_IOU_CC 52 +#define ZYNQMP_RESET_TIMESTAMP 53 +#define ZYNQMP_RESET_RPU_R50 54 +#define ZYNQMP_RESET_RPU_R51 55 +#define ZYNQMP_RESET_RPU_AMBA 56 +#define ZYNQMP_RESET_OCM 57 +#define ZYNQMP_RESET_RPU_PGE 58 +#define ZYNQMP_RESET_USB0_CORERESET 59 +#define ZYNQMP_RESET_USB1_CORERESET 60 +#define ZYNQMP_RESET_USB0_HIBERRESET 61 +#define ZYNQMP_RESET_USB1_HIBERRESET 62 +#define ZYNQMP_RESET_USB0_APB 63 +#define ZYNQMP_RESET_USB1_APB 64 +#define ZYNQMP_RESET_IPI 65 +#define ZYNQMP_RESET_APM_LPD 66 +#define ZYNQMP_RESET_RTC 67 +#define ZYNQMP_RESET_SYSMON 68 +#define ZYNQMP_RESET_AFI_FM6 69 +#define ZYNQMP_RESET_LPD_SWDT 70 +#define ZYNQMP_RESET_FPD 71 +#define ZYNQMP_RESET_RPU_DBG1 72 +#define ZYNQMP_RESET_RPU_DBG0 73 +#define ZYNQMP_RESET_DBG_LPD 74 +#define ZYNQMP_RESET_DBG_FPD 75 +#define ZYNQMP_RESET_APLL 76 +#define ZYNQMP_RESET_DPLL 77 +#define ZYNQMP_RESET_VPLL 78 +#define ZYNQMP_RESET_IOPLL 79 +#define ZYNQMP_RESET_RPLL 80 +#define ZYNQMP_RESET_GPO3_PL_0 81 +#define ZYNQMP_RESET_GPO3_PL_1 82 +#define ZYNQMP_RESET_GPO3_PL_2 83 +#define ZYNQMP_RESET_GPO3_PL_3 84 +#define ZYNQMP_RESET_GPO3_PL_4 85 +#define ZYNQMP_RESET_GPO3_PL_5 86 +#define ZYNQMP_RESET_GPO3_PL_6 87 +#define ZYNQMP_RESET_GPO3_PL_7 88 +#define ZYNQMP_RESET_GPO3_PL_8 89 +#define ZYNQMP_RESET_GPO3_PL_9 90 +#define ZYNQMP_RESET_GPO3_PL_10 91 +#define ZYNQMP_RESET_GPO3_PL_11 92 +#define ZYNQMP_RESET_GPO3_PL_12 93 +#define ZYNQMP_RESET_GPO3_PL_13 94 +#define ZYNQMP_RESET_GPO3_PL_14 95 +#define ZYNQMP_RESET_GPO3_PL_15 96 +#define ZYNQMP_RESET_GPO3_PL_16 97 +#define ZYNQMP_RESET_GPO3_PL_17 98 +#define ZYNQMP_RESET_GPO3_PL_18 99 +#define ZYNQMP_RESET_GPO3_PL_19 100 +#define ZYNQMP_RESET_GPO3_PL_20 101 +#define ZYNQMP_RESET_GPO3_PL_21 102 +#define ZYNQMP_RESET_GPO3_PL_22 103 +#define ZYNQMP_RESET_GPO3_PL_23 104 +#define ZYNQMP_RESET_GPO3_PL_24 105 +#define ZYNQMP_RESET_GPO3_PL_25 106 +#define ZYNQMP_RESET_GPO3_PL_26 107 +#define ZYNQMP_RESET_GPO3_PL_27 108 +#define ZYNQMP_RESET_GPO3_PL_28 109 +#define ZYNQMP_RESET_GPO3_PL_29 110 +#define ZYNQMP_RESET_GPO3_PL_30 111 +#define ZYNQMP_RESET_GPO3_PL_31 112 +#define ZYNQMP_RESET_RPU_LS 113 +#define ZYNQMP_RESET_PS_ONLY 114 +#define ZYNQMP_RESET_PL 115 +#define ZYNQMP_RESET_PS_PL0 116 +#define ZYNQMP_RESET_PS_PL1 117 +#define ZYNQMP_RESET_PS_PL2 118 +#define ZYNQMP_RESET_PS_PL3 119 + +#endif diff --git a/include/dt-bindings/soc/bcm2835-pm.h b/include/dt-bindings/soc/bcm2835-pm.h new file mode 100644 index 000000000000..153d75b8d99f --- /dev/null +++ b/include/dt-bindings/soc/bcm2835-pm.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef _DT_BINDINGS_ARM_BCM2835_PM_H +#define _DT_BINDINGS_ARM_BCM2835_PM_H + +#define BCM2835_POWER_DOMAIN_GRAFX 0 +#define BCM2835_POWER_DOMAIN_GRAFX_V3D 1 +#define BCM2835_POWER_DOMAIN_IMAGE 2 +#define BCM2835_POWER_DOMAIN_IMAGE_PERI 3 +#define BCM2835_POWER_DOMAIN_IMAGE_ISP 4 +#define BCM2835_POWER_DOMAIN_IMAGE_H264 5 +#define BCM2835_POWER_DOMAIN_USB 6 +#define BCM2835_POWER_DOMAIN_DSI0 7 +#define BCM2835_POWER_DOMAIN_DSI1 8 +#define BCM2835_POWER_DOMAIN_CAM0 9 +#define BCM2835_POWER_DOMAIN_CAM1 10 +#define BCM2835_POWER_DOMAIN_CCP2TX 11 +#define BCM2835_POWER_DOMAIN_HDMI 12 + +#define BCM2835_POWER_DOMAIN_COUNT 13 + +#define BCM2835_RESET_V3D 0 +#define BCM2835_RESET_ISP 1 +#define BCM2835_RESET_H264 2 + +#define BCM2835_RESET_COUNT 3 + +#endif /* _DT_BINDINGS_ARM_BCM2835_PM_H */ diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h new file mode 100644 index 000000000000..a7d240e465c0 --- /dev/null +++ b/include/linux/atomic-fallback.h @@ -0,0 +1,2295 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) \ + __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) \ + __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) \ + __atomic_op_fence(xchg, __VA_ARGS__) +#endif + +#endif /* xchg_relaxed */ + +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif + +#endif /* cmpxchg_relaxed */ + +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif + +#endif /* cmpxchg64_relaxed */ + +#ifndef atomic_read_acquire +static inline int +atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic_read_acquire atomic_read_acquire +#endif + +#ifndef atomic_set_release +static inline void +atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic_set_release atomic_set_release +#endif + +#ifndef atomic_add_return_relaxed +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return +#define atomic_add_return_relaxed atomic_add_return +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +static inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_add_return_acquire atomic_add_return_acquire +#endif + +#ifndef atomic_add_return_release +static inline int +atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_add_return_relaxed(i, v); +} +#define atomic_add_return_release atomic_add_return_release +#endif + +#ifndef atomic_add_return +static inline int +atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_add_return atomic_add_return +#endif + +#endif /* atomic_add_return_relaxed */ + +#ifndef atomic_fetch_add_relaxed +#define atomic_fetch_add_acquire atomic_fetch_add +#define atomic_fetch_add_release atomic_fetch_add +#define atomic_fetch_add_relaxed atomic_fetch_add +#else /* atomic_fetch_add_relaxed */ + +#ifndef atomic_fetch_add_acquire +static inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_add_acquire atomic_fetch_add_acquire +#endif + +#ifndef atomic_fetch_add_release +static inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_add_relaxed(i, v); +} +#define atomic_fetch_add_release atomic_fetch_add_release +#endif + +#ifndef atomic_fetch_add +static inline int +atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_add atomic_fetch_add +#endif + +#endif /* atomic_fetch_add_relaxed */ + +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return +#define atomic_sub_return_relaxed atomic_sub_return +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +static inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_sub_return_acquire atomic_sub_return_acquire +#endif + +#ifndef atomic_sub_return_release +static inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_sub_return_relaxed(i, v); +} +#define atomic_sub_return_release atomic_sub_return_release +#endif + +#ifndef atomic_sub_return +static inline int +atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_sub_return atomic_sub_return +#endif + +#endif /* atomic_sub_return_relaxed */ + +#ifndef atomic_fetch_sub_relaxed +#define atomic_fetch_sub_acquire atomic_fetch_sub +#define atomic_fetch_sub_release atomic_fetch_sub +#define atomic_fetch_sub_relaxed atomic_fetch_sub +#else /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_fetch_sub_acquire +static inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#endif + +#ifndef atomic_fetch_sub_release +static inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_sub_relaxed(i, v); +} +#define atomic_fetch_sub_release atomic_fetch_sub_release +#endif + +#ifndef atomic_fetch_sub +static inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_sub atomic_fetch_sub +#endif + +#endif /* atomic_fetch_sub_relaxed */ + +#ifndef atomic_inc +static inline void +atomic_inc(atomic_t *v) +{ + atomic_add(1, v); +} +#define atomic_inc atomic_inc +#endif + +#ifndef atomic_inc_return_relaxed +#ifdef atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return +#define atomic_inc_return_relaxed atomic_inc_return +#endif /* atomic_inc_return */ + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + return atomic_add_return(1, v); +} +#define atomic_inc_return atomic_inc_return +#endif + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + return atomic_add_return_acquire(1, v); +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + return atomic_add_return_release(1, v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return_relaxed +static inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + return atomic_add_return_relaxed(1, v); +} +#define atomic_inc_return_relaxed atomic_inc_return_relaxed +#endif + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +static inline int +atomic_inc_return_acquire(atomic_t *v) +{ + int ret = atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_inc_return_acquire atomic_inc_return_acquire +#endif + +#ifndef atomic_inc_return_release +static inline int +atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_inc_return_relaxed(v); +} +#define atomic_inc_return_release atomic_inc_return_release +#endif + +#ifndef atomic_inc_return +static inline int +atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_inc_return atomic_inc_return +#endif + +#endif /* atomic_inc_return_relaxed */ + +#ifndef atomic_fetch_inc_relaxed +#ifdef atomic_fetch_inc +#define atomic_fetch_inc_acquire atomic_fetch_inc +#define atomic_fetch_inc_release atomic_fetch_inc +#define atomic_fetch_inc_relaxed atomic_fetch_inc +#endif /* atomic_fetch_inc */ + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + return atomic_fetch_add(1, v); +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + return atomic_fetch_add_acquire(1, v); +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + return atomic_fetch_add_release(1, v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc_relaxed +static inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + return atomic_fetch_add_relaxed(1, v); +} +#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed +#endif + +#else /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_fetch_inc_acquire +static inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#endif + +#ifndef atomic_fetch_inc_release +static inline int +atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_inc_relaxed(v); +} +#define atomic_fetch_inc_release atomic_fetch_inc_release +#endif + +#ifndef atomic_fetch_inc +static inline int +atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_inc atomic_fetch_inc +#endif + +#endif /* atomic_fetch_inc_relaxed */ + +#ifndef atomic_dec +static inline void +atomic_dec(atomic_t *v) +{ + atomic_sub(1, v); +} +#define atomic_dec atomic_dec +#endif + +#ifndef atomic_dec_return_relaxed +#ifdef atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return +#define atomic_dec_return_relaxed atomic_dec_return +#endif /* atomic_dec_return */ + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + return atomic_sub_return(1, v); +} +#define atomic_dec_return atomic_dec_return +#endif + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + return atomic_sub_return_acquire(1, v); +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + return atomic_sub_return_release(1, v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return_relaxed +static inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + return atomic_sub_return_relaxed(1, v); +} +#define atomic_dec_return_relaxed atomic_dec_return_relaxed +#endif + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +static inline int +atomic_dec_return_acquire(atomic_t *v) +{ + int ret = atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_dec_return_acquire atomic_dec_return_acquire +#endif + +#ifndef atomic_dec_return_release +static inline int +atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_dec_return_relaxed(v); +} +#define atomic_dec_return_release atomic_dec_return_release +#endif + +#ifndef atomic_dec_return +static inline int +atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_dec_return atomic_dec_return +#endif + +#endif /* atomic_dec_return_relaxed */ + +#ifndef atomic_fetch_dec_relaxed +#ifdef atomic_fetch_dec +#define atomic_fetch_dec_acquire atomic_fetch_dec +#define atomic_fetch_dec_release atomic_fetch_dec +#define atomic_fetch_dec_relaxed atomic_fetch_dec +#endif /* atomic_fetch_dec */ + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + return atomic_fetch_sub(1, v); +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + return atomic_fetch_sub_acquire(1, v); +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + return atomic_fetch_sub_release(1, v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec_relaxed +static inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + return atomic_fetch_sub_relaxed(1, v); +} +#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed +#endif + +#else /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_dec_acquire +static inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#endif + +#ifndef atomic_fetch_dec_release +static inline int +atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_dec_relaxed(v); +} +#define atomic_fetch_dec_release atomic_fetch_dec_release +#endif + +#ifndef atomic_fetch_dec +static inline int +atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_dec atomic_fetch_dec +#endif + +#endif /* atomic_fetch_dec_relaxed */ + +#ifndef atomic_fetch_and_relaxed +#define atomic_fetch_and_acquire atomic_fetch_and +#define atomic_fetch_and_release atomic_fetch_and +#define atomic_fetch_and_relaxed atomic_fetch_and +#else /* atomic_fetch_and_relaxed */ + +#ifndef atomic_fetch_and_acquire +static inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_and_acquire atomic_fetch_and_acquire +#endif + +#ifndef atomic_fetch_and_release +static inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_and_relaxed(i, v); +} +#define atomic_fetch_and_release atomic_fetch_and_release +#endif + +#ifndef atomic_fetch_and +static inline int +atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_and atomic_fetch_and +#endif + +#endif /* atomic_fetch_and_relaxed */ + +#ifndef atomic_andnot +static inline void +atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#define atomic_andnot atomic_andnot +#endif + +#ifndef atomic_fetch_andnot_relaxed +#ifdef atomic_fetch_andnot +#define atomic_fetch_andnot_acquire atomic_fetch_andnot +#define atomic_fetch_andnot_release atomic_fetch_andnot +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot +#endif /* atomic_fetch_andnot */ + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + return atomic_fetch_and(~i, v); +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return atomic_fetch_and_acquire(~i, v); +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return atomic_fetch_and_release(~i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot_relaxed +static inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return atomic_fetch_and_relaxed(~i, v); +} +#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed +#endif + +#else /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_andnot_acquire +static inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#endif + +#ifndef atomic_fetch_andnot_release +static inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_andnot_relaxed(i, v); +} +#define atomic_fetch_andnot_release atomic_fetch_andnot_release +#endif + +#ifndef atomic_fetch_andnot +static inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_andnot atomic_fetch_andnot +#endif + +#endif /* atomic_fetch_andnot_relaxed */ + +#ifndef atomic_fetch_or_relaxed +#define atomic_fetch_or_acquire atomic_fetch_or +#define atomic_fetch_or_release atomic_fetch_or +#define atomic_fetch_or_relaxed atomic_fetch_or +#else /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_or_acquire +static inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_or_acquire atomic_fetch_or_acquire +#endif + +#ifndef atomic_fetch_or_release +static inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_or_relaxed(i, v); +} +#define atomic_fetch_or_release atomic_fetch_or_release +#endif + +#ifndef atomic_fetch_or +static inline int +atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_or atomic_fetch_or +#endif + +#endif /* atomic_fetch_or_relaxed */ + +#ifndef atomic_fetch_xor_relaxed +#define atomic_fetch_xor_acquire atomic_fetch_xor +#define atomic_fetch_xor_release atomic_fetch_xor +#define atomic_fetch_xor_relaxed atomic_fetch_xor +#else /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_fetch_xor_acquire +static inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#endif + +#ifndef atomic_fetch_xor_release +static inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return atomic_fetch_xor_relaxed(i, v); +} +#define atomic_fetch_xor_release atomic_fetch_xor_release +#endif + +#ifndef atomic_fetch_xor +static inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic_fetch_xor atomic_fetch_xor +#endif + +#endif /* atomic_fetch_xor_relaxed */ + +#ifndef atomic_xchg_relaxed +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg +#define atomic_xchg_relaxed atomic_xchg +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +static inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic_xchg_acquire atomic_xchg_acquire +#endif + +#ifndef atomic_xchg_release +static inline int +atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return atomic_xchg_relaxed(v, i); +} +#define atomic_xchg_release atomic_xchg_release +#endif + +#ifndef atomic_xchg +static inline int +atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic_xchg atomic_xchg +#endif + +#endif /* atomic_xchg_relaxed */ + +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +static inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#endif + +#ifndef atomic_cmpxchg_release +static inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return atomic_cmpxchg_relaxed(v, old, new); +} +#define atomic_cmpxchg_release atomic_cmpxchg_release +#endif + +#ifndef atomic_cmpxchg +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_cmpxchg atomic_cmpxchg +#endif + +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_relaxed +#ifdef atomic_try_cmpxchg +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg +#define atomic_try_cmpxchg_release atomic_try_cmpxchg +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg +#endif /* atomic_try_cmpxchg */ + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg_relaxed +static inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed +#endif + +#else /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_try_cmpxchg_acquire +static inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#endif + +#ifndef atomic_try_cmpxchg_release +static inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return atomic_try_cmpxchg_relaxed(v, old, new); +} +#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#endif + +#ifndef atomic_try_cmpxchg +static inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic_try_cmpxchg atomic_try_cmpxchg +#endif + +#endif /* atomic_try_cmpxchg_relaxed */ + +#ifndef atomic_sub_and_test +/** + * atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + return atomic_sub_return(i, v) == 0; +} +#define atomic_sub_and_test atomic_sub_and_test +#endif + +#ifndef atomic_dec_and_test +/** + * atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic_dec_and_test(atomic_t *v) +{ + return atomic_dec_return(v) == 0; +} +#define atomic_dec_and_test atomic_dec_and_test +#endif + +#ifndef atomic_inc_and_test +/** + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic_inc_and_test(atomic_t *v) +{ + return atomic_inc_return(v) == 0; +} +#define atomic_inc_and_test atomic_inc_and_test +#endif + +#ifndef atomic_add_negative +/** + * atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic_add_negative(int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} +#define atomic_add_negative atomic_add_negative +#endif + +#ifndef atomic_fetch_add_unless +/** + * atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic_fetch_add_unless atomic_fetch_add_unless +#endif + +#ifndef atomic_add_unless +/** + * atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + return atomic_fetch_add_unless(v, a, u) != u; +} +#define atomic_add_unless atomic_add_unless +#endif + +#ifndef atomic_inc_not_zero +/** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic_inc_not_zero(atomic_t *v) +{ + return atomic_add_unless(v, 1, 0); +} +#define atomic_inc_not_zero atomic_inc_not_zero +#endif + +#ifndef atomic_inc_unless_negative +static inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic_inc_unless_negative atomic_inc_unless_negative +#endif + +#ifndef atomic_dec_unless_positive +static inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + int c = atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic_dec_unless_positive atomic_dec_unless_positive +#endif + +#ifndef atomic_dec_if_positive +static inline int +atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic_dec_if_positive atomic_dec_if_positive +#endif + +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif + +#ifndef atomic64_read_acquire +static inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define atomic64_read_acquire atomic64_read_acquire +#endif + +#ifndef atomic64_set_release +static inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define atomic64_set_release atomic64_set_release +#endif + +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return +#define atomic64_add_return_relaxed atomic64_add_return +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +static inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_add_return_acquire atomic64_add_return_acquire +#endif + +#ifndef atomic64_add_return_release +static inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_add_return_relaxed(i, v); +} +#define atomic64_add_return_release atomic64_add_return_release +#endif + +#ifndef atomic64_add_return +static inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_add_return atomic64_add_return +#endif + +#endif /* atomic64_add_return_relaxed */ + +#ifndef atomic64_fetch_add_relaxed +#define atomic64_fetch_add_acquire atomic64_fetch_add +#define atomic64_fetch_add_release atomic64_fetch_add +#define atomic64_fetch_add_relaxed atomic64_fetch_add +#else /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_fetch_add_acquire +static inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#endif + +#ifndef atomic64_fetch_add_release +static inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_add_relaxed(i, v); +} +#define atomic64_fetch_add_release atomic64_fetch_add_release +#endif + +#ifndef atomic64_fetch_add +static inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_add atomic64_fetch_add +#endif + +#endif /* atomic64_fetch_add_relaxed */ + +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return +#define atomic64_sub_return_relaxed atomic64_sub_return +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +static inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#endif + +#ifndef atomic64_sub_return_release +static inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_sub_return_relaxed(i, v); +} +#define atomic64_sub_return_release atomic64_sub_return_release +#endif + +#ifndef atomic64_sub_return +static inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_sub_return atomic64_sub_return +#endif + +#endif /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_fetch_sub_relaxed +#define atomic64_fetch_sub_acquire atomic64_fetch_sub +#define atomic64_fetch_sub_release atomic64_fetch_sub +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub +#else /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_fetch_sub_acquire +static inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#endif + +#ifndef atomic64_fetch_sub_release +static inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_sub_relaxed(i, v); +} +#define atomic64_fetch_sub_release atomic64_fetch_sub_release +#endif + +#ifndef atomic64_fetch_sub +static inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_sub atomic64_fetch_sub +#endif + +#endif /* atomic64_fetch_sub_relaxed */ + +#ifndef atomic64_inc +static inline void +atomic64_inc(atomic64_t *v) +{ + atomic64_add(1, v); +} +#define atomic64_inc atomic64_inc +#endif + +#ifndef atomic64_inc_return_relaxed +#ifdef atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return +#define atomic64_inc_return_relaxed atomic64_inc_return +#endif /* atomic64_inc_return */ + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + return atomic64_add_return(1, v); +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + return atomic64_add_return_acquire(1, v); +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + return atomic64_add_return_release(1, v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return_relaxed +static inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + return atomic64_add_return_relaxed(1, v); +} +#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed +#endif + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +static inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_inc_return_acquire atomic64_inc_return_acquire +#endif + +#ifndef atomic64_inc_return_release +static inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_inc_return_relaxed(v); +} +#define atomic64_inc_return_release atomic64_inc_return_release +#endif + +#ifndef atomic64_inc_return +static inline s64 +atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_inc_return atomic64_inc_return +#endif + +#endif /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_fetch_inc_relaxed +#ifdef atomic64_fetch_inc +#define atomic64_fetch_inc_acquire atomic64_fetch_inc +#define atomic64_fetch_inc_release atomic64_fetch_inc +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc +#endif /* atomic64_fetch_inc */ + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + return atomic64_fetch_add(1, v); +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return atomic64_fetch_add_acquire(1, v); +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + return atomic64_fetch_add_release(1, v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc_relaxed +static inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return atomic64_fetch_add_relaxed(1, v); +} +#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed +#endif + +#else /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_fetch_inc_acquire +static inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#endif + +#ifndef atomic64_fetch_inc_release +static inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_inc_relaxed(v); +} +#define atomic64_fetch_inc_release atomic64_fetch_inc_release +#endif + +#ifndef atomic64_fetch_inc +static inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_inc atomic64_fetch_inc +#endif + +#endif /* atomic64_fetch_inc_relaxed */ + +#ifndef atomic64_dec +static inline void +atomic64_dec(atomic64_t *v) +{ + atomic64_sub(1, v); +} +#define atomic64_dec atomic64_dec +#endif + +#ifndef atomic64_dec_return_relaxed +#ifdef atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return +#define atomic64_dec_return_relaxed atomic64_dec_return +#endif /* atomic64_dec_return */ + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + return atomic64_sub_return(1, v); +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + return atomic64_sub_return_acquire(1, v); +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + return atomic64_sub_return_release(1, v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return_relaxed +static inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + return atomic64_sub_return_relaxed(1, v); +} +#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed +#endif + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +static inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_dec_return_acquire atomic64_dec_return_acquire +#endif + +#ifndef atomic64_dec_return_release +static inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_dec_return_relaxed(v); +} +#define atomic64_dec_return_release atomic64_dec_return_release +#endif + +#ifndef atomic64_dec_return +static inline s64 +atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_dec_return atomic64_dec_return +#endif + +#endif /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_fetch_dec_relaxed +#ifdef atomic64_fetch_dec +#define atomic64_fetch_dec_acquire atomic64_fetch_dec +#define atomic64_fetch_dec_release atomic64_fetch_dec +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec +#endif /* atomic64_fetch_dec */ + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + return atomic64_fetch_sub(1, v); +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return atomic64_fetch_sub_acquire(1, v); +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + return atomic64_fetch_sub_release(1, v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec_relaxed +static inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return atomic64_fetch_sub_relaxed(1, v); +} +#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed +#endif + +#else /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_dec_acquire +static inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#endif + +#ifndef atomic64_fetch_dec_release +static inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_dec_relaxed(v); +} +#define atomic64_fetch_dec_release atomic64_fetch_dec_release +#endif + +#ifndef atomic64_fetch_dec +static inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_dec atomic64_fetch_dec +#endif + +#endif /* atomic64_fetch_dec_relaxed */ + +#ifndef atomic64_fetch_and_relaxed +#define atomic64_fetch_and_acquire atomic64_fetch_and +#define atomic64_fetch_and_release atomic64_fetch_and +#define atomic64_fetch_and_relaxed atomic64_fetch_and +#else /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_fetch_and_acquire +static inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#endif + +#ifndef atomic64_fetch_and_release +static inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_and_relaxed(i, v); +} +#define atomic64_fetch_and_release atomic64_fetch_and_release +#endif + +#ifndef atomic64_fetch_and +static inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_and atomic64_fetch_and +#endif + +#endif /* atomic64_fetch_and_relaxed */ + +#ifndef atomic64_andnot +static inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#define atomic64_andnot atomic64_andnot +#endif + +#ifndef atomic64_fetch_andnot_relaxed +#ifdef atomic64_fetch_andnot +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot +#define atomic64_fetch_andnot_release atomic64_fetch_andnot +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot +#endif /* atomic64_fetch_andnot */ + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and(~i, v); +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_acquire(~i, v); +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_release(~i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot_relaxed +static inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return atomic64_fetch_and_relaxed(~i, v); +} +#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed +#endif + +#else /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_andnot_acquire +static inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#endif + +#ifndef atomic64_fetch_andnot_release +static inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_andnot_relaxed(i, v); +} +#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#endif + +#ifndef atomic64_fetch_andnot +static inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_andnot atomic64_fetch_andnot +#endif + +#endif /* atomic64_fetch_andnot_relaxed */ + +#ifndef atomic64_fetch_or_relaxed +#define atomic64_fetch_or_acquire atomic64_fetch_or +#define atomic64_fetch_or_release atomic64_fetch_or +#define atomic64_fetch_or_relaxed atomic64_fetch_or +#else /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_or_acquire +static inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#endif + +#ifndef atomic64_fetch_or_release +static inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_or_relaxed(i, v); +} +#define atomic64_fetch_or_release atomic64_fetch_or_release +#endif + +#ifndef atomic64_fetch_or +static inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_or atomic64_fetch_or +#endif + +#endif /* atomic64_fetch_or_relaxed */ + +#ifndef atomic64_fetch_xor_relaxed +#define atomic64_fetch_xor_acquire atomic64_fetch_xor +#define atomic64_fetch_xor_release atomic64_fetch_xor +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor +#else /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_fetch_xor_acquire +static inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#endif + +#ifndef atomic64_fetch_xor_release +static inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return atomic64_fetch_xor_relaxed(i, v); +} +#define atomic64_fetch_xor_release atomic64_fetch_xor_release +#endif + +#ifndef atomic64_fetch_xor +static inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_fetch_xor atomic64_fetch_xor +#endif + +#endif /* atomic64_fetch_xor_relaxed */ + +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg +#define atomic64_xchg_relaxed atomic64_xchg +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +static inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_xchg_acquire atomic64_xchg_acquire +#endif + +#ifndef atomic64_xchg_release +static inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return atomic64_xchg_relaxed(v, i); +} +#define atomic64_xchg_release atomic64_xchg_release +#endif + +#ifndef atomic64_xchg +static inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_xchg atomic64_xchg +#endif + +#endif /* atomic64_xchg_relaxed */ + +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +static inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#endif + +#ifndef atomic64_cmpxchg_release +static inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return atomic64_cmpxchg_relaxed(v, old, new); +} +#define atomic64_cmpxchg_release atomic64_cmpxchg_release +#endif + +#ifndef atomic64_cmpxchg +static inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_cmpxchg atomic64_cmpxchg +#endif + +#endif /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_relaxed +#ifdef atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg +#endif /* atomic64_try_cmpxchg */ + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg_relaxed +static inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed +#endif + +#else /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_try_cmpxchg_acquire +static inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#endif + +#ifndef atomic64_try_cmpxchg_release +static inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#endif + +#ifndef atomic64_try_cmpxchg +static inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +#endif + +#endif /* atomic64_try_cmpxchg_relaxed */ + +#ifndef atomic64_sub_and_test +/** + * atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return atomic64_sub_return(i, v) == 0; +} +#define atomic64_sub_and_test atomic64_sub_and_test +#endif + +#ifndef atomic64_dec_and_test +/** + * atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + return atomic64_dec_return(v) == 0; +} +#define atomic64_dec_and_test atomic64_dec_and_test +#endif + +#ifndef atomic64_inc_and_test +/** + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + return atomic64_inc_return(v) == 0; +} +#define atomic64_inc_and_test atomic64_inc_and_test +#endif + +#ifndef atomic64_add_negative +/** + * atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} +#define atomic64_add_negative atomic64_add_negative +#endif + +#ifndef atomic64_fetch_add_unless +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#endif + +#ifndef atomic64_add_unless +/** + * atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return atomic64_fetch_add_unless(v, a, u) != u; +} +#define atomic64_add_unless atomic64_add_unless +#endif + +#ifndef atomic64_inc_not_zero +/** + * atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + return atomic64_add_unless(v, 1, 0); +} +#define atomic64_inc_not_zero atomic64_inc_not_zero +#endif + +#ifndef atomic64_inc_unless_negative +static inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define atomic64_inc_unless_negative atomic64_inc_unless_negative +#endif + +#ifndef atomic64_dec_unless_positive +static inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define atomic64_dec_unless_positive atomic64_dec_unless_positive +#endif + +#ifndef atomic64_dec_if_positive +static inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define atomic64_dec_if_positive atomic64_dec_if_positive +#endif + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// 25de4a2804d70f57e994fe3b419148658bb5378a diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 1e8e88bdaf09..4c0d009a46f0 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -25,14 +25,6 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ -#ifndef atomic_read_acquire -#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) -#endif - -#ifndef atomic_set_release -#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) -#endif - /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed @@ -79,1238 +71,7 @@ __ret; \ }) -/* atomic_add_return_relaxed */ -#ifndef atomic_add_return_relaxed -#define atomic_add_return_relaxed atomic_add_return -#define atomic_add_return_acquire atomic_add_return -#define atomic_add_return_release atomic_add_return - -#else /* atomic_add_return_relaxed */ - -#ifndef atomic_add_return_acquire -#define atomic_add_return_acquire(...) \ - __atomic_op_acquire(atomic_add_return, __VA_ARGS__) -#endif - -#ifndef atomic_add_return_release -#define atomic_add_return_release(...) \ - __atomic_op_release(atomic_add_return, __VA_ARGS__) -#endif - -#ifndef atomic_add_return -#define atomic_add_return(...) \ - __atomic_op_fence(atomic_add_return, __VA_ARGS__) -#endif -#endif /* atomic_add_return_relaxed */ - -#ifndef atomic_inc -#define atomic_inc(v) atomic_add(1, (v)) -#endif - -/* atomic_inc_return_relaxed */ -#ifndef atomic_inc_return_relaxed - -#ifndef atomic_inc_return -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#else /* atomic_inc_return */ -#define atomic_inc_return_relaxed atomic_inc_return -#define atomic_inc_return_acquire atomic_inc_return -#define atomic_inc_return_release atomic_inc_return -#endif /* atomic_inc_return */ - -#else /* atomic_inc_return_relaxed */ - -#ifndef atomic_inc_return_acquire -#define atomic_inc_return_acquire(...) \ - __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic_inc_return_release -#define atomic_inc_return_release(...) \ - __atomic_op_release(atomic_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic_inc_return -#define atomic_inc_return(...) \ - __atomic_op_fence(atomic_inc_return, __VA_ARGS__) -#endif -#endif /* atomic_inc_return_relaxed */ - -/* atomic_sub_return_relaxed */ -#ifndef atomic_sub_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return -#define atomic_sub_return_acquire atomic_sub_return -#define atomic_sub_return_release atomic_sub_return - -#else /* atomic_sub_return_relaxed */ - -#ifndef atomic_sub_return_acquire -#define atomic_sub_return_acquire(...) \ - __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic_sub_return_release -#define atomic_sub_return_release(...) \ - __atomic_op_release(atomic_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic_sub_return -#define atomic_sub_return(...) \ - __atomic_op_fence(atomic_sub_return, __VA_ARGS__) -#endif -#endif /* atomic_sub_return_relaxed */ - -#ifndef atomic_dec -#define atomic_dec(v) atomic_sub(1, (v)) -#endif - -/* atomic_dec_return_relaxed */ -#ifndef atomic_dec_return_relaxed - -#ifndef atomic_dec_return -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#else /* atomic_dec_return */ -#define atomic_dec_return_relaxed atomic_dec_return -#define atomic_dec_return_acquire atomic_dec_return -#define atomic_dec_return_release atomic_dec_return -#endif /* atomic_dec_return */ - -#else /* atomic_dec_return_relaxed */ - -#ifndef atomic_dec_return_acquire -#define atomic_dec_return_acquire(...) \ - __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic_dec_return_release -#define atomic_dec_return_release(...) \ - __atomic_op_release(atomic_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic_dec_return -#define atomic_dec_return(...) \ - __atomic_op_fence(atomic_dec_return, __VA_ARGS__) -#endif -#endif /* atomic_dec_return_relaxed */ - - -/* atomic_fetch_add_relaxed */ -#ifndef atomic_fetch_add_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add -#define atomic_fetch_add_acquire atomic_fetch_add -#define atomic_fetch_add_release atomic_fetch_add - -#else /* atomic_fetch_add_relaxed */ - -#ifndef atomic_fetch_add_acquire -#define atomic_fetch_add_acquire(...) \ - __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_add_release -#define atomic_fetch_add_release(...) \ - __atomic_op_release(atomic_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_add -#define atomic_fetch_add(...) \ - __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) -#endif -#endif /* atomic_fetch_add_relaxed */ - -/* atomic_fetch_inc_relaxed */ -#ifndef atomic_fetch_inc_relaxed - -#ifndef atomic_fetch_inc -#define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) -#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) -#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) -#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) -#else /* atomic_fetch_inc */ -#define atomic_fetch_inc_relaxed atomic_fetch_inc -#define atomic_fetch_inc_acquire atomic_fetch_inc -#define atomic_fetch_inc_release atomic_fetch_inc -#endif /* atomic_fetch_inc */ - -#else /* atomic_fetch_inc_relaxed */ - -#ifndef atomic_fetch_inc_acquire -#define atomic_fetch_inc_acquire(...) \ - __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_inc_release -#define atomic_fetch_inc_release(...) \ - __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_inc -#define atomic_fetch_inc(...) \ - __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) -#endif -#endif /* atomic_fetch_inc_relaxed */ - -/* atomic_fetch_sub_relaxed */ -#ifndef atomic_fetch_sub_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub -#define atomic_fetch_sub_acquire atomic_fetch_sub -#define atomic_fetch_sub_release atomic_fetch_sub - -#else /* atomic_fetch_sub_relaxed */ - -#ifndef atomic_fetch_sub_acquire -#define atomic_fetch_sub_acquire(...) \ - __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_sub_release -#define atomic_fetch_sub_release(...) \ - __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_sub -#define atomic_fetch_sub(...) \ - __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) -#endif -#endif /* atomic_fetch_sub_relaxed */ - -/* atomic_fetch_dec_relaxed */ -#ifndef atomic_fetch_dec_relaxed - -#ifndef atomic_fetch_dec -#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) -#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) -#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) -#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) -#else /* atomic_fetch_dec */ -#define atomic_fetch_dec_relaxed atomic_fetch_dec -#define atomic_fetch_dec_acquire atomic_fetch_dec -#define atomic_fetch_dec_release atomic_fetch_dec -#endif /* atomic_fetch_dec */ - -#else /* atomic_fetch_dec_relaxed */ - -#ifndef atomic_fetch_dec_acquire -#define atomic_fetch_dec_acquire(...) \ - __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_dec_release -#define atomic_fetch_dec_release(...) \ - __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_dec -#define atomic_fetch_dec(...) \ - __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) -#endif -#endif /* atomic_fetch_dec_relaxed */ - -/* atomic_fetch_or_relaxed */ -#ifndef atomic_fetch_or_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or -#define atomic_fetch_or_acquire atomic_fetch_or -#define atomic_fetch_or_release atomic_fetch_or - -#else /* atomic_fetch_or_relaxed */ - -#ifndef atomic_fetch_or_acquire -#define atomic_fetch_or_acquire(...) \ - __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_or_release -#define atomic_fetch_or_release(...) \ - __atomic_op_release(atomic_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_or -#define atomic_fetch_or(...) \ - __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) -#endif -#endif /* atomic_fetch_or_relaxed */ - -/* atomic_fetch_and_relaxed */ -#ifndef atomic_fetch_and_relaxed -#define atomic_fetch_and_relaxed atomic_fetch_and -#define atomic_fetch_and_acquire atomic_fetch_and -#define atomic_fetch_and_release atomic_fetch_and - -#else /* atomic_fetch_and_relaxed */ - -#ifndef atomic_fetch_and_acquire -#define atomic_fetch_and_acquire(...) \ - __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_and_release -#define atomic_fetch_and_release(...) \ - __atomic_op_release(atomic_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_and -#define atomic_fetch_and(...) \ - __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) -#endif -#endif /* atomic_fetch_and_relaxed */ - -#ifndef atomic_andnot -#define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) -#endif - -#ifndef atomic_fetch_andnot_relaxed - -#ifndef atomic_fetch_andnot -#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) -#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) -#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) -#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) -#else /* atomic_fetch_andnot */ -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot -#define atomic_fetch_andnot_acquire atomic_fetch_andnot -#define atomic_fetch_andnot_release atomic_fetch_andnot -#endif /* atomic_fetch_andnot */ - -#else /* atomic_fetch_andnot_relaxed */ - -#ifndef atomic_fetch_andnot_acquire -#define atomic_fetch_andnot_acquire(...) \ - __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_andnot_release -#define atomic_fetch_andnot_release(...) \ - __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_andnot -#define atomic_fetch_andnot(...) \ - __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) -#endif -#endif /* atomic_fetch_andnot_relaxed */ - -/* atomic_fetch_xor_relaxed */ -#ifndef atomic_fetch_xor_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor -#define atomic_fetch_xor_acquire atomic_fetch_xor -#define atomic_fetch_xor_release atomic_fetch_xor - -#else /* atomic_fetch_xor_relaxed */ - -#ifndef atomic_fetch_xor_acquire -#define atomic_fetch_xor_acquire(...) \ - __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_xor_release -#define atomic_fetch_xor_release(...) \ - __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic_fetch_xor -#define atomic_fetch_xor(...) \ - __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) -#endif -#endif /* atomic_fetch_xor_relaxed */ - - -/* atomic_xchg_relaxed */ -#ifndef atomic_xchg_relaxed -#define atomic_xchg_relaxed atomic_xchg -#define atomic_xchg_acquire atomic_xchg -#define atomic_xchg_release atomic_xchg - -#else /* atomic_xchg_relaxed */ - -#ifndef atomic_xchg_acquire -#define atomic_xchg_acquire(...) \ - __atomic_op_acquire(atomic_xchg, __VA_ARGS__) -#endif - -#ifndef atomic_xchg_release -#define atomic_xchg_release(...) \ - __atomic_op_release(atomic_xchg, __VA_ARGS__) -#endif - -#ifndef atomic_xchg -#define atomic_xchg(...) \ - __atomic_op_fence(atomic_xchg, __VA_ARGS__) -#endif -#endif /* atomic_xchg_relaxed */ - -/* atomic_cmpxchg_relaxed */ -#ifndef atomic_cmpxchg_relaxed -#define atomic_cmpxchg_relaxed atomic_cmpxchg -#define atomic_cmpxchg_acquire atomic_cmpxchg -#define atomic_cmpxchg_release atomic_cmpxchg - -#else /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_cmpxchg_acquire -#define atomic_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic_cmpxchg_release -#define atomic_cmpxchg_release(...) \ - __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic_cmpxchg -#define atomic_cmpxchg(...) \ - __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) -#endif -#endif /* atomic_cmpxchg_relaxed */ - -#ifndef atomic_try_cmpxchg - -#define __atomic_try_cmpxchg(type, _p, _po, _n) \ -({ \ - typeof(_po) __po = (_po); \ - typeof(*(_po)) __r, __o = *__po; \ - __r = atomic_cmpxchg##type((_p), __o, (_n)); \ - if (unlikely(__r != __o)) \ - *__po = __r; \ - likely(__r == __o); \ -}) - -#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) -#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) -#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) -#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) - -#else /* atomic_try_cmpxchg */ -#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg -#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg -#define atomic_try_cmpxchg_release atomic_try_cmpxchg -#endif /* atomic_try_cmpxchg */ - -/* cmpxchg_relaxed */ -#ifndef cmpxchg_relaxed -#define cmpxchg_relaxed cmpxchg -#define cmpxchg_acquire cmpxchg -#define cmpxchg_release cmpxchg - -#else /* cmpxchg_relaxed */ - -#ifndef cmpxchg_acquire -#define cmpxchg_acquire(...) \ - __atomic_op_acquire(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg_release -#define cmpxchg_release(...) \ - __atomic_op_release(cmpxchg, __VA_ARGS__) -#endif - -#ifndef cmpxchg -#define cmpxchg(...) \ - __atomic_op_fence(cmpxchg, __VA_ARGS__) -#endif -#endif /* cmpxchg_relaxed */ - -/* cmpxchg64_relaxed */ -#ifndef cmpxchg64_relaxed -#define cmpxchg64_relaxed cmpxchg64 -#define cmpxchg64_acquire cmpxchg64 -#define cmpxchg64_release cmpxchg64 - -#else /* cmpxchg64_relaxed */ - -#ifndef cmpxchg64_acquire -#define cmpxchg64_acquire(...) \ - __atomic_op_acquire(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64_release -#define cmpxchg64_release(...) \ - __atomic_op_release(cmpxchg64, __VA_ARGS__) -#endif - -#ifndef cmpxchg64 -#define cmpxchg64(...) \ - __atomic_op_fence(cmpxchg64, __VA_ARGS__) -#endif -#endif /* cmpxchg64_relaxed */ - -/* xchg_relaxed */ -#ifndef xchg_relaxed -#define xchg_relaxed xchg -#define xchg_acquire xchg -#define xchg_release xchg - -#else /* xchg_relaxed */ - -#ifndef xchg_acquire -#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) -#endif - -#ifndef xchg_release -#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) -#endif - -#ifndef xchg -#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) -#endif -#endif /* xchg_relaxed */ - -/** - * atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns the original value of @v. - */ -#ifndef atomic_fetch_add_unless -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) -{ - int c = atomic_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic_try_cmpxchg(v, &c, c + a)); - - return c; -} -#endif - -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static inline bool atomic_add_unless(atomic_t *v, int a, int u) -{ - return atomic_fetch_add_unless(v, a, u) != u; -} - -/** - * atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -#ifndef atomic_inc_not_zero -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#endif - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic_inc_and_test -static inline bool atomic_inc_and_test(atomic_t *v) -{ - return atomic_inc_return(v) == 0; -} -#endif - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#ifndef atomic_dec_and_test -static inline bool atomic_dec_and_test(atomic_t *v) -{ - return atomic_dec_return(v) == 0; -} -#endif - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic_sub_and_test -static inline bool atomic_sub_and_test(int i, atomic_t *v) -{ - return atomic_sub_return(i, v) == 0; -} -#endif - -/** - * atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#ifndef atomic_add_negative -static inline bool atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} -#endif - -#ifndef atomic_inc_unless_negative -static inline bool atomic_inc_unless_negative(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#endif - -#ifndef atomic_dec_unless_positive -static inline bool atomic_dec_unless_positive(atomic_t *v) -{ - int c = atomic_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#endif - -/* - * atomic_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ -#ifndef atomic_dec_if_positive -static inline int atomic_dec_if_positive(atomic_t *v) -{ - int dec, c = atomic_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic_try_cmpxchg(v, &c, dec)); - - return dec; -} -#endif - -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) - -#ifdef CONFIG_GENERIC_ATOMIC64 -#include <asm-generic/atomic64.h> -#endif - -#ifndef atomic64_read_acquire -#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) -#endif - -#ifndef atomic64_set_release -#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) -#endif - -/* atomic64_add_return_relaxed */ -#ifndef atomic64_add_return_relaxed -#define atomic64_add_return_relaxed atomic64_add_return -#define atomic64_add_return_acquire atomic64_add_return -#define atomic64_add_return_release atomic64_add_return - -#else /* atomic64_add_return_relaxed */ - -#ifndef atomic64_add_return_acquire -#define atomic64_add_return_acquire(...) \ - __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) -#endif - -#ifndef atomic64_add_return_release -#define atomic64_add_return_release(...) \ - __atomic_op_release(atomic64_add_return, __VA_ARGS__) -#endif - -#ifndef atomic64_add_return -#define atomic64_add_return(...) \ - __atomic_op_fence(atomic64_add_return, __VA_ARGS__) -#endif -#endif /* atomic64_add_return_relaxed */ - -#ifndef atomic64_inc -#define atomic64_inc(v) atomic64_add(1, (v)) -#endif - -/* atomic64_inc_return_relaxed */ -#ifndef atomic64_inc_return_relaxed - -#ifndef atomic64_inc_return -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#else /* atomic64_inc_return */ -#define atomic64_inc_return_relaxed atomic64_inc_return -#define atomic64_inc_return_acquire atomic64_inc_return -#define atomic64_inc_return_release atomic64_inc_return -#endif /* atomic64_inc_return */ - -#else /* atomic64_inc_return_relaxed */ - -#ifndef atomic64_inc_return_acquire -#define atomic64_inc_return_acquire(...) \ - __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic64_inc_return_release -#define atomic64_inc_return_release(...) \ - __atomic_op_release(atomic64_inc_return, __VA_ARGS__) -#endif - -#ifndef atomic64_inc_return -#define atomic64_inc_return(...) \ - __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) -#endif -#endif /* atomic64_inc_return_relaxed */ - - -/* atomic64_sub_return_relaxed */ -#ifndef atomic64_sub_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return -#define atomic64_sub_return_acquire atomic64_sub_return -#define atomic64_sub_return_release atomic64_sub_return - -#else /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_sub_return_acquire -#define atomic64_sub_return_acquire(...) \ - __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic64_sub_return_release -#define atomic64_sub_return_release(...) \ - __atomic_op_release(atomic64_sub_return, __VA_ARGS__) -#endif - -#ifndef atomic64_sub_return -#define atomic64_sub_return(...) \ - __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) -#endif -#endif /* atomic64_sub_return_relaxed */ - -#ifndef atomic64_dec -#define atomic64_dec(v) atomic64_sub(1, (v)) -#endif - -/* atomic64_dec_return_relaxed */ -#ifndef atomic64_dec_return_relaxed - -#ifndef atomic64_dec_return -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#else /* atomic64_dec_return */ -#define atomic64_dec_return_relaxed atomic64_dec_return -#define atomic64_dec_return_acquire atomic64_dec_return -#define atomic64_dec_return_release atomic64_dec_return -#endif /* atomic64_dec_return */ - -#else /* atomic64_dec_return_relaxed */ - -#ifndef atomic64_dec_return_acquire -#define atomic64_dec_return_acquire(...) \ - __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic64_dec_return_release -#define atomic64_dec_return_release(...) \ - __atomic_op_release(atomic64_dec_return, __VA_ARGS__) -#endif - -#ifndef atomic64_dec_return -#define atomic64_dec_return(...) \ - __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) -#endif -#endif /* atomic64_dec_return_relaxed */ - - -/* atomic64_fetch_add_relaxed */ -#ifndef atomic64_fetch_add_relaxed -#define atomic64_fetch_add_relaxed atomic64_fetch_add -#define atomic64_fetch_add_acquire atomic64_fetch_add -#define atomic64_fetch_add_release atomic64_fetch_add - -#else /* atomic64_fetch_add_relaxed */ - -#ifndef atomic64_fetch_add_acquire -#define atomic64_fetch_add_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_add_release -#define atomic64_fetch_add_release(...) \ - __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_add -#define atomic64_fetch_add(...) \ - __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_add_relaxed */ - -/* atomic64_fetch_inc_relaxed */ -#ifndef atomic64_fetch_inc_relaxed - -#ifndef atomic64_fetch_inc -#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) -#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) -#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) -#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) -#else /* atomic64_fetch_inc */ -#define atomic64_fetch_inc_relaxed atomic64_fetch_inc -#define atomic64_fetch_inc_acquire atomic64_fetch_inc -#define atomic64_fetch_inc_release atomic64_fetch_inc -#endif /* atomic64_fetch_inc */ - -#else /* atomic64_fetch_inc_relaxed */ - -#ifndef atomic64_fetch_inc_acquire -#define atomic64_fetch_inc_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_inc_release -#define atomic64_fetch_inc_release(...) \ - __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_inc -#define atomic64_fetch_inc(...) \ - __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_inc_relaxed */ - -/* atomic64_fetch_sub_relaxed */ -#ifndef atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub -#define atomic64_fetch_sub_acquire atomic64_fetch_sub -#define atomic64_fetch_sub_release atomic64_fetch_sub - -#else /* atomic64_fetch_sub_relaxed */ - -#ifndef atomic64_fetch_sub_acquire -#define atomic64_fetch_sub_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_sub_release -#define atomic64_fetch_sub_release(...) \ - __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_sub -#define atomic64_fetch_sub(...) \ - __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_sub_relaxed */ - -/* atomic64_fetch_dec_relaxed */ -#ifndef atomic64_fetch_dec_relaxed - -#ifndef atomic64_fetch_dec -#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) -#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) -#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) -#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) -#else /* atomic64_fetch_dec */ -#define atomic64_fetch_dec_relaxed atomic64_fetch_dec -#define atomic64_fetch_dec_acquire atomic64_fetch_dec -#define atomic64_fetch_dec_release atomic64_fetch_dec -#endif /* atomic64_fetch_dec */ - -#else /* atomic64_fetch_dec_relaxed */ - -#ifndef atomic64_fetch_dec_acquire -#define atomic64_fetch_dec_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_dec_release -#define atomic64_fetch_dec_release(...) \ - __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_dec -#define atomic64_fetch_dec(...) \ - __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_dec_relaxed */ - -/* atomic64_fetch_or_relaxed */ -#ifndef atomic64_fetch_or_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or -#define atomic64_fetch_or_acquire atomic64_fetch_or -#define atomic64_fetch_or_release atomic64_fetch_or - -#else /* atomic64_fetch_or_relaxed */ - -#ifndef atomic64_fetch_or_acquire -#define atomic64_fetch_or_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_or_release -#define atomic64_fetch_or_release(...) \ - __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_or -#define atomic64_fetch_or(...) \ - __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_or_relaxed */ - -/* atomic64_fetch_and_relaxed */ -#ifndef atomic64_fetch_and_relaxed -#define atomic64_fetch_and_relaxed atomic64_fetch_and -#define atomic64_fetch_and_acquire atomic64_fetch_and -#define atomic64_fetch_and_release atomic64_fetch_and - -#else /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_fetch_and_acquire -#define atomic64_fetch_and_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_and_release -#define atomic64_fetch_and_release(...) \ - __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_and -#define atomic64_fetch_and(...) \ - __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_and_relaxed */ - -#ifndef atomic64_andnot -#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) -#endif - -#ifndef atomic64_fetch_andnot_relaxed - -#ifndef atomic64_fetch_andnot -#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) -#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) -#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) -#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) -#else /* atomic64_fetch_andnot */ -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot -#define atomic64_fetch_andnot_release atomic64_fetch_andnot -#endif /* atomic64_fetch_andnot */ - -#else /* atomic64_fetch_andnot_relaxed */ - -#ifndef atomic64_fetch_andnot_acquire -#define atomic64_fetch_andnot_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_andnot_release -#define atomic64_fetch_andnot_release(...) \ - __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_andnot -#define atomic64_fetch_andnot(...) \ - __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_andnot_relaxed */ - -/* atomic64_fetch_xor_relaxed */ -#ifndef atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor -#define atomic64_fetch_xor_acquire atomic64_fetch_xor -#define atomic64_fetch_xor_release atomic64_fetch_xor - -#else /* atomic64_fetch_xor_relaxed */ - -#ifndef atomic64_fetch_xor_acquire -#define atomic64_fetch_xor_acquire(...) \ - __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_xor_release -#define atomic64_fetch_xor_release(...) \ - __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) -#endif - -#ifndef atomic64_fetch_xor -#define atomic64_fetch_xor(...) \ - __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) -#endif -#endif /* atomic64_fetch_xor_relaxed */ - - -/* atomic64_xchg_relaxed */ -#ifndef atomic64_xchg_relaxed -#define atomic64_xchg_relaxed atomic64_xchg -#define atomic64_xchg_acquire atomic64_xchg -#define atomic64_xchg_release atomic64_xchg - -#else /* atomic64_xchg_relaxed */ - -#ifndef atomic64_xchg_acquire -#define atomic64_xchg_acquire(...) \ - __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) -#endif - -#ifndef atomic64_xchg_release -#define atomic64_xchg_release(...) \ - __atomic_op_release(atomic64_xchg, __VA_ARGS__) -#endif - -#ifndef atomic64_xchg -#define atomic64_xchg(...) \ - __atomic_op_fence(atomic64_xchg, __VA_ARGS__) -#endif -#endif /* atomic64_xchg_relaxed */ - -/* atomic64_cmpxchg_relaxed */ -#ifndef atomic64_cmpxchg_relaxed -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg -#define atomic64_cmpxchg_acquire atomic64_cmpxchg -#define atomic64_cmpxchg_release atomic64_cmpxchg - -#else /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_cmpxchg_acquire -#define atomic64_cmpxchg_acquire(...) \ - __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic64_cmpxchg_release -#define atomic64_cmpxchg_release(...) \ - __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) -#endif - -#ifndef atomic64_cmpxchg -#define atomic64_cmpxchg(...) \ - __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) -#endif -#endif /* atomic64_cmpxchg_relaxed */ - -#ifndef atomic64_try_cmpxchg - -#define __atomic64_try_cmpxchg(type, _p, _po, _n) \ -({ \ - typeof(_po) __po = (_po); \ - typeof(*(_po)) __r, __o = *__po; \ - __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ - if (unlikely(__r != __o)) \ - *__po = __r; \ - likely(__r == __o); \ -}) - -#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) -#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n) -#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n) -#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n) - -#else /* atomic64_try_cmpxchg */ -#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg -#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg -#endif /* atomic64_try_cmpxchg */ - -/** - * atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns the original value of @v. - */ -#ifndef atomic64_fetch_add_unless -static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, - long long u) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c == u)) - break; - } while (!atomic64_try_cmpxchg(v, &c, c + a)); - - return c; -} -#endif - -/** - * atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) -{ - return atomic64_fetch_add_unless(v, a, u) != u; -} - -/** - * atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -#ifndef atomic64_inc_not_zero -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#endif - -/** - * atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic64_inc_and_test -static inline bool atomic64_inc_and_test(atomic64_t *v) -{ - return atomic64_inc_return(v) == 0; -} -#endif - -/** - * atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -#ifndef atomic64_dec_and_test -static inline bool atomic64_dec_and_test(atomic64_t *v) -{ - return atomic64_dec_return(v) == 0; -} -#endif - -/** - * atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -#ifndef atomic64_sub_and_test -static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) -{ - return atomic64_sub_return(i, v) == 0; -} -#endif - -/** - * atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -#ifndef atomic64_add_negative -static inline bool atomic64_add_negative(long long i, atomic64_t *v) -{ - return atomic64_add_return(i, v) < 0; -} -#endif - -#ifndef atomic64_inc_unless_negative -static inline bool atomic64_inc_unless_negative(atomic64_t *v) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c < 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c + 1)); - - return true; -} -#endif - -#ifndef atomic64_dec_unless_positive -static inline bool atomic64_dec_unless_positive(atomic64_t *v) -{ - long long c = atomic64_read(v); - - do { - if (unlikely(c > 0)) - return false; - } while (!atomic64_try_cmpxchg(v, &c, c - 1)); - - return true; -} -#endif - -/* - * atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic64_t - * - * The function returns the old value of *v minus 1, even if - * the atomic64 variable, v, was not decremented. - */ -#ifndef atomic64_dec_if_positive -static inline long long atomic64_dec_if_positive(atomic64_t *v) -{ - long long dec, c = atomic64_read(v); - - do { - dec = c - 1; - if (unlikely(dec < 0)) - break; - } while (!atomic64_try_cmpxchg(v, &c, dec)); - - return dec; -} -#endif - -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#include <linux/atomic-fallback.h> #include <asm-generic/atomic-long.h> diff --git a/include/linux/compat.h b/include/linux/compat.h index 056be0d03722..ebddcb6cfcf8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -132,37 +132,6 @@ struct compat_tms { compat_clock_t tms_cstime; }; -struct compat_timex { - compat_uint_t modes; - compat_long_t offset; - compat_long_t freq; - compat_long_t maxerror; - compat_long_t esterror; - compat_int_t status; - compat_long_t constant; - compat_long_t precision; - compat_long_t tolerance; - struct old_timeval32 time; - compat_long_t tick; - compat_long_t ppsfreq; - compat_long_t jitter; - compat_int_t shift; - compat_long_t stabil; - compat_long_t jitcnt; - compat_long_t calcnt; - compat_long_t errcnt; - compat_long_t stbcnt; - compat_int_t tai; - - compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; - compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; - compat_int_t:32; compat_int_t:32; compat_int_t:32; -}; - -struct timex; -int compat_get_timex(struct timex *, const struct compat_timex __user *); -int compat_put_timex(struct compat_timex __user *, const struct timex *); - #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) typedef struct { @@ -551,11 +520,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, u32 __user *iocb); -asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, - compat_long_t min_nr, - compat_long_t nr, - struct io_event __user *events, - struct old_timespec32 __user *timeout); asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, @@ -648,7 +612,7 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, compat_size_t count); /* fs/select.c */ -asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, +asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timespec32 __user *tsp, @@ -658,7 +622,7 @@ asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, compat_ulong_t __user *exp, struct __kernel_timespec __user *tsp, void __user *sig); -asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, +asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, unsigned int nfds, struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, @@ -688,19 +652,6 @@ asmlinkage long compat_sys_newfstat(unsigned int fd, /* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ -/* fs/timerfd.c */ -asmlinkage long compat_sys_timerfd_gettime(int ufd, - struct old_itimerspec32 __user *otmr); -asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, - const struct old_itimerspec32 __user *utmr, - struct old_itimerspec32 __user *otmr); - -/* fs/utimes.c */ -asmlinkage long compat_sys_utimensat(unsigned int dfd, - const char __user *filename, - struct old_timespec32 __user *t, - int flags); - /* kernel/exit.c */ asmlinkage long compat_sys_waitid(int, compat_pid_t, struct compat_siginfo __user *, int, @@ -709,9 +660,6 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, /* kernel/futex.c */ -asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, - struct old_timespec32 __user *utime, u32 __user *uaddr2, - u32 val3); asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, compat_size_t len); @@ -719,10 +667,6 @@ asmlinkage long compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); -/* kernel/hrtimer.c */ -asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); - /* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, struct compat_itimerval __user *it); @@ -740,20 +684,6 @@ asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); -asmlinkage long compat_sys_timer_gettime(timer_t timer_id, - struct old_itimerspec32 __user *setting); -asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, - struct old_itimerspec32 __user *new, - struct old_itimerspec32 __user *old); -asmlinkage long compat_sys_clock_settime(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_getres(clockid_t which_clock, - struct old_timespec32 __user *tp); -asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, - struct old_timespec32 __user *rqtp, - struct old_timespec32 __user *rmtp); /* kernel/ptrace.c */ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, @@ -766,8 +696,6 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); -asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, @@ -785,7 +713,7 @@ asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, compat_size_t sigsetsize); -asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, +asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct old_timespec32 __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, @@ -808,7 +736,6 @@ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); -asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); /* kernel/timer.c */ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); @@ -817,14 +744,6 @@ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); asmlinkage long compat_sys_mq_open(const char __user *u_name, int oflag, compat_mode_t mode, struct compat_mq_attr __user *u_attr); -asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, - const char __user *u_msg_ptr, - compat_size_t msg_len, unsigned int msg_prio, - const struct old_timespec32 __user *u_abs_timeout); -asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, - char __user *u_msg_ptr, - compat_size_t msg_len, unsigned int __user *u_msg_prio, - const struct old_timespec32 __user *u_abs_timeout); asmlinkage long compat_sys_mq_notify(mqd_t mqdes, const struct compat_sigevent __user *u_notification); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, @@ -840,8 +759,6 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, /* ipc/sem.c */ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); -asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsems, const struct old_timespec32 __user *timeout); /* ipc/shm.c */ asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); @@ -899,7 +816,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct __kernel_timespec __user *timeout); -asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, +asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct old_timespec32 __user *timeout); asmlinkage long compat_sys_wait4(compat_pid_t pid, @@ -910,8 +827,6 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); -asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, - struct compat_timex __user *tp); asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags); asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, @@ -952,8 +867,6 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd, /* __ARCH_WANT_SYSCALL_NO_AT */ asmlinkage long compat_sys_open(const char __user *filename, int flags, umode_t mode); -asmlinkage long compat_sys_utimes(const char __user *filename, - struct old_timeval32 __user *t); /* __ARCH_WANT_SYSCALL_NO_FLAGS */ asmlinkage long compat_sys_signalfd(int ufd, @@ -967,12 +880,6 @@ asmlinkage long compat_sys_newlstat(const char __user *filename, struct compat_stat __user *statbuf); /* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long compat_sys_time(old_time32_t __user *tloc); -asmlinkage long compat_sys_utime(const char __user *filename, - struct old_utimbuf32 __user *t); -asmlinkage long compat_sys_futimesat(unsigned int dfd, - const char __user *filename, - struct old_timeval32 __user *t); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct old_timeval32 __user *tvp); @@ -1007,9 +914,6 @@ asmlinkage long compat_sys_sigaction(int sig, struct compat_old_sigaction __user *oact); #endif -/* obsolete: kernel/time/time.c */ -asmlinkage long compat_sys_stime(old_time32_t __user *tptr); - /* obsolete: net/socket.c */ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 46c67a764877..7b87965f7a65 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -154,8 +154,9 @@ struct coresight_connection { * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be - activated but not yet enabled. Enabling for a _sink_ - happens when a source has been selected for that it. + * activated but not yet enabled. Enabling for a _sink_ + * appens when a source has been selected for that it. + * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { struct coresight_connection *conns; @@ -168,7 +169,9 @@ struct coresight_device { atomic_t *refcnt; bool orphan; bool enable; /* true only if configured as part of a path */ + /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ + struct dev_ext_attribute *ea; }; #define to_coresight_device(d) container_of(d, struct coresight_device, dev) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index fd586d0301e7..e78281d07b70 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -121,6 +121,7 @@ enum cpuhp_state { CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_TEGRA_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 05b97144d342..28e6cf1356da 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h @@ -46,5 +46,4 @@ enum { EMAC_VERSION_2, /* DM646x */ }; -void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 28604a8d0aa9..ae96ea145ae3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -48,7 +48,20 @@ typedef u16 efi_char16_t; /* UNICODE character */ typedef u64 efi_physical_addr_t; typedef void *efi_handle_t; -typedef guid_t efi_guid_t; +/* + * The UEFI spec and EDK2 reference implementation both define EFI_GUID as + * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment + * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), + * this means that firmware services invoked by the kernel may assume that + * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that + * do not tolerate misalignment. So let's set the minimum alignment to 32 bits. + * + * Note that the UEFI spec as well as some comments in the EDK2 code base + * suggest that EFI_GUID should be 64-bit aligned, but this appears to be + * a mistake, given that no code seems to exist that actually enforces that + * or relies on it. + */ +typedef guid_t efi_guid_t __aligned(__alignof__(u32)); #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) @@ -1699,19 +1712,19 @@ extern int efi_tpm_eventlog_init(void); * fault happened while executing an efi runtime service. */ enum efi_rts_ids { - NONE, - GET_TIME, - SET_TIME, - GET_WAKEUP_TIME, - SET_WAKEUP_TIME, - GET_VARIABLE, - GET_NEXT_VARIABLE, - SET_VARIABLE, - QUERY_VARIABLE_INFO, - GET_NEXT_HIGH_MONO_COUNT, - RESET_SYSTEM, - UPDATE_CAPSULE, - QUERY_CAPSULE_CAPS, + EFI_NONE, + EFI_GET_TIME, + EFI_SET_TIME, + EFI_GET_WAKEUP_TIME, + EFI_SET_WAKEUP_TIME, + EFI_GET_VARIABLE, + EFI_GET_NEXT_VARIABLE, + EFI_SET_VARIABLE, + EFI_QUERY_VARIABLE_INFO, + EFI_GET_NEXT_HIGH_MONO_COUNT, + EFI_RESET_SYSTEM, + EFI_UPDATE_CAPSULE, + EFI_QUERY_CAPSULE_CAPS, }; /* diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 27dc7a60693e..d019df946cb2 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -12,7 +12,7 @@ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) #ifndef force_o_largefile -#define force_o_largefile() (BITS_PER_LONG != 32) +#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T)) #endif #if BITS_PER_LONG == 32 diff --git a/include/linux/filter.h b/include/linux/filter.h index 7e5e3db11106..6074aa064b54 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1007,6 +1007,7 @@ bpf_address_lookup(unsigned long addr, unsigned long *size, void bpf_prog_kallsyms_add(struct bpf_prog *fp); void bpf_prog_kallsyms_del(struct bpf_prog *fp); +void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); #else /* CONFIG_BPF_JIT */ @@ -1062,6 +1063,12 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } + +static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) +{ + sym[0] = '\0'; +} + #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h index e21c49aba92f..031dd4d3c766 100644 --- a/include/linux/firmware/imx/svc/misc.h +++ b/include/linux/firmware/imx/svc/misc.h @@ -52,4 +52,7 @@ int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource, int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, u8 ctrl, u32 *val); +int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource, + bool enable, u64 phys_addr); + #endif /* _SC_MISC_API_H */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 3c3c28eff56a..642dab10f65d 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -28,12 +28,35 @@ /* SMC SIP service Call Function Identifier Prefix */ #define PM_SIP_SVC 0xC2000000 #define PM_GET_TRUSTZONE_VERSION 0xa03 +#define PM_SET_SUSPEND_MODE 0xa02 +#define GET_CALLBACK_DATA 0xa01 /* Number of 32bits values in payload */ #define PAYLOAD_ARG_CNT 4U +/* Number of arguments for a callback */ +#define CB_ARG_CNT 4 + +/* Payload size (consists of callback API ID + arguments) */ +#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1) + +#define ZYNQMP_PM_MAX_QOS 100U + +/* Node capabilities */ +#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U +#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U +#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U +#define ZYNQMP_PM_CAPABILITY_POWER 0x8U + enum pm_api_id { PM_GET_API_VERSION = 1, + PM_REQUEST_NODE = 13, + PM_RELEASE_NODE, + PM_SET_REQUIREMENT, + PM_RESET_ASSERT = 17, + PM_RESET_GET_STATUS, + PM_PM_INIT_FINALIZE = 21, + PM_GET_CHIPID = 24, PM_IOCTL = 34, PM_QUERY_DATA, PM_CLOCK_ENABLE, @@ -75,6 +98,149 @@ enum pm_query_id { PM_QID_CLOCK_GET_NUM_CLOCKS = 12, }; +enum zynqmp_pm_reset_action { + PM_RESET_ACTION_RELEASE, + PM_RESET_ACTION_ASSERT, + PM_RESET_ACTION_PULSE, +}; + +enum zynqmp_pm_reset { + ZYNQMP_PM_RESET_START = 1000, + ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START, + ZYNQMP_PM_RESET_PCIE_BRIDGE, + ZYNQMP_PM_RESET_PCIE_CTRL, + ZYNQMP_PM_RESET_DP, + ZYNQMP_PM_RESET_SWDT_CRF, + ZYNQMP_PM_RESET_AFI_FM5, + ZYNQMP_PM_RESET_AFI_FM4, + ZYNQMP_PM_RESET_AFI_FM3, + ZYNQMP_PM_RESET_AFI_FM2, + ZYNQMP_PM_RESET_AFI_FM1, + ZYNQMP_PM_RESET_AFI_FM0, + ZYNQMP_PM_RESET_GDMA, + ZYNQMP_PM_RESET_GPU_PP1, + ZYNQMP_PM_RESET_GPU_PP0, + ZYNQMP_PM_RESET_GPU, + ZYNQMP_PM_RESET_GT, + ZYNQMP_PM_RESET_SATA, + ZYNQMP_PM_RESET_ACPU3_PWRON, + ZYNQMP_PM_RESET_ACPU2_PWRON, + ZYNQMP_PM_RESET_ACPU1_PWRON, + ZYNQMP_PM_RESET_ACPU0_PWRON, + ZYNQMP_PM_RESET_APU_L2, + ZYNQMP_PM_RESET_ACPU3, + ZYNQMP_PM_RESET_ACPU2, + ZYNQMP_PM_RESET_ACPU1, + ZYNQMP_PM_RESET_ACPU0, + ZYNQMP_PM_RESET_DDR, + ZYNQMP_PM_RESET_APM_FPD, + ZYNQMP_PM_RESET_SOFT, + ZYNQMP_PM_RESET_GEM0, + ZYNQMP_PM_RESET_GEM1, + ZYNQMP_PM_RESET_GEM2, + ZYNQMP_PM_RESET_GEM3, + ZYNQMP_PM_RESET_QSPI, + ZYNQMP_PM_RESET_UART0, + ZYNQMP_PM_RESET_UART1, + ZYNQMP_PM_RESET_SPI0, + ZYNQMP_PM_RESET_SPI1, + ZYNQMP_PM_RESET_SDIO0, + ZYNQMP_PM_RESET_SDIO1, + ZYNQMP_PM_RESET_CAN0, + ZYNQMP_PM_RESET_CAN1, + ZYNQMP_PM_RESET_I2C0, + ZYNQMP_PM_RESET_I2C1, + ZYNQMP_PM_RESET_TTC0, + ZYNQMP_PM_RESET_TTC1, + ZYNQMP_PM_RESET_TTC2, + ZYNQMP_PM_RESET_TTC3, + ZYNQMP_PM_RESET_SWDT_CRL, + ZYNQMP_PM_RESET_NAND, + ZYNQMP_PM_RESET_ADMA, + ZYNQMP_PM_RESET_GPIO, + ZYNQMP_PM_RESET_IOU_CC, + ZYNQMP_PM_RESET_TIMESTAMP, + ZYNQMP_PM_RESET_RPU_R50, + ZYNQMP_PM_RESET_RPU_R51, + ZYNQMP_PM_RESET_RPU_AMBA, + ZYNQMP_PM_RESET_OCM, + ZYNQMP_PM_RESET_RPU_PGE, + ZYNQMP_PM_RESET_USB0_CORERESET, + ZYNQMP_PM_RESET_USB1_CORERESET, + ZYNQMP_PM_RESET_USB0_HIBERRESET, + ZYNQMP_PM_RESET_USB1_HIBERRESET, + ZYNQMP_PM_RESET_USB0_APB, + ZYNQMP_PM_RESET_USB1_APB, + ZYNQMP_PM_RESET_IPI, + ZYNQMP_PM_RESET_APM_LPD, + ZYNQMP_PM_RESET_RTC, + ZYNQMP_PM_RESET_SYSMON, + ZYNQMP_PM_RESET_AFI_FM6, + ZYNQMP_PM_RESET_LPD_SWDT, + ZYNQMP_PM_RESET_FPD, + ZYNQMP_PM_RESET_RPU_DBG1, + ZYNQMP_PM_RESET_RPU_DBG0, + ZYNQMP_PM_RESET_DBG_LPD, + ZYNQMP_PM_RESET_DBG_FPD, + ZYNQMP_PM_RESET_APLL, + ZYNQMP_PM_RESET_DPLL, + ZYNQMP_PM_RESET_VPLL, + ZYNQMP_PM_RESET_IOPLL, + ZYNQMP_PM_RESET_RPLL, + ZYNQMP_PM_RESET_GPO3_PL_0, + ZYNQMP_PM_RESET_GPO3_PL_1, + ZYNQMP_PM_RESET_GPO3_PL_2, + ZYNQMP_PM_RESET_GPO3_PL_3, + ZYNQMP_PM_RESET_GPO3_PL_4, + ZYNQMP_PM_RESET_GPO3_PL_5, + ZYNQMP_PM_RESET_GPO3_PL_6, + ZYNQMP_PM_RESET_GPO3_PL_7, + ZYNQMP_PM_RESET_GPO3_PL_8, + ZYNQMP_PM_RESET_GPO3_PL_9, + ZYNQMP_PM_RESET_GPO3_PL_10, + ZYNQMP_PM_RESET_GPO3_PL_11, + ZYNQMP_PM_RESET_GPO3_PL_12, + ZYNQMP_PM_RESET_GPO3_PL_13, + ZYNQMP_PM_RESET_GPO3_PL_14, + ZYNQMP_PM_RESET_GPO3_PL_15, + ZYNQMP_PM_RESET_GPO3_PL_16, + ZYNQMP_PM_RESET_GPO3_PL_17, + ZYNQMP_PM_RESET_GPO3_PL_18, + ZYNQMP_PM_RESET_GPO3_PL_19, + ZYNQMP_PM_RESET_GPO3_PL_20, + ZYNQMP_PM_RESET_GPO3_PL_21, + ZYNQMP_PM_RESET_GPO3_PL_22, + ZYNQMP_PM_RESET_GPO3_PL_23, + ZYNQMP_PM_RESET_GPO3_PL_24, + ZYNQMP_PM_RESET_GPO3_PL_25, + ZYNQMP_PM_RESET_GPO3_PL_26, + ZYNQMP_PM_RESET_GPO3_PL_27, + ZYNQMP_PM_RESET_GPO3_PL_28, + ZYNQMP_PM_RESET_GPO3_PL_29, + ZYNQMP_PM_RESET_GPO3_PL_30, + ZYNQMP_PM_RESET_GPO3_PL_31, + ZYNQMP_PM_RESET_RPU_LS, + ZYNQMP_PM_RESET_PS_ONLY, + ZYNQMP_PM_RESET_PL, + ZYNQMP_PM_RESET_PS_PL0, + ZYNQMP_PM_RESET_PS_PL1, + ZYNQMP_PM_RESET_PS_PL2, + ZYNQMP_PM_RESET_PS_PL3, + ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3 +}; + +enum zynqmp_pm_suspend_reason { + SUSPEND_POWER_REQUEST = 201, + SUSPEND_ALERT, + SUSPEND_SYSTEM_SHUTDOWN, +}; + +enum zynqmp_pm_request_ack { + ZYNQMP_PM_REQUEST_ACK_NO = 1, + ZYNQMP_PM_REQUEST_ACK_BLOCKING, + ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING, +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -91,6 +257,7 @@ struct zynqmp_pm_query_data { struct zynqmp_eemi_ops { int (*get_api_version)(u32 *version); + int (*get_chipid)(u32 *idcode, u32 *version); int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); int (*clock_enable)(u32 clock_id); int (*clock_disable)(u32 clock_id); @@ -102,8 +269,25 @@ struct zynqmp_eemi_ops { int (*clock_setparent)(u32 clock_id, u32 parent_id); int (*clock_getparent)(u32 clock_id, u32 *parent_id); int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); + int (*reset_assert)(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag); + int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status); + int (*init_finalize)(void); + int (*set_suspend_mode)(u32 mode); + int (*request_node)(const u32 node, + const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack); + int (*release_node)(const u32 node); + int (*set_requirement)(const u32 node, + const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack); }; +int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, + u32 arg2, u32 arg3, u32 *ret_payload); + #if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); #else diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 941b11811f85..1fc0edd71c52 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -135,8 +135,6 @@ struct ccsr_guts { u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ } __attribute__ ((packed)); -u32 fsl_guts_get_svr(void); - /* Alternate function signal multiplex control */ #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a7083a45a26c..6049baa5b8bc 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -13,6 +13,7 @@ #include <linux/securebits.h> #include <linux/seqlock.h> #include <linux/rbtree.h> +#include <linux/refcount.h> #include <linux/sched/autogroup.h> #include <net/net_namespace.h> #include <linux/sched/rt.h> diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a728dba02e2..690b238a44d5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, void __percpu *percpu_dev_id); +extern int __must_check +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + static inline int __must_check request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *percpu_dev_id) @@ -164,9 +168,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, devname, percpu_dev_id); } +extern int __must_check +request_percpu_nmi(unsigned int irq, irq_handler_t handler, + const char *devname, void __percpu *dev); + extern const void *free_irq(unsigned int, void *); extern void free_percpu_irq(unsigned int, void __percpu *); +extern const void *free_nmi(unsigned int irq, void *dev_id); +extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); + struct device; extern int __must_check @@ -217,6 +228,13 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); +extern void disable_nmi_nosync(unsigned int irq); +extern void disable_percpu_nmi(unsigned int irq); +extern void enable_nmi(unsigned int irq); +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); +extern int prepare_percpu_nmi(unsigned int irq); +extern void teardown_percpu_nmi(unsigned int irq); + /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); @@ -241,20 +259,29 @@ struct irq_affinity_notify { void (*release)(struct kref *ref); }; +#define IRQ_AFFINITY_MAX_SETS 4 + /** * struct irq_affinity - Description for automatic irq affinity assignements * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of * the MSI(-X) vector space * @post_vectors: Don't apply affinity to @post_vectors at end of * the MSI(-X) vector space - * @nr_sets: Length of passed in *sets array - * @sets: Number of affinitized sets + * @nr_sets: The number of interrupt sets for which affinity + * spreading is required + * @set_size: Array holding the size of each interrupt set + * @calc_sets: Callback for calculating the number and size + * of interrupt sets + * @priv: Private data for usage by @calc_sets, usually a + * pointer to driver/device specific data. */ struct irq_affinity { - int pre_vectors; - int post_vectors; - int nr_sets; - int *sets; + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; + void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); + void *priv; }; /** @@ -314,9 +341,10 @@ extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); struct irq_affinity_desc * -irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); -int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); +unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, + const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -350,13 +378,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) } static inline struct irq_affinity_desc * -irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) { return NULL; } -static inline int -irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) +static inline unsigned int +irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, + const struct irq_affinity *affd) { return maxvec; } diff --git a/include/linux/irq.h b/include/linux/irq.h index def2b2aac8b1..5e91f6bcaacd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine * @ipi_send_single: send a single IPI to destination cpus * @ipi_send_mask: send an IPI to destination cpus in cpumask + * @irq_nmi_setup: function called from core code before enabling an NMI + * @irq_nmi_teardown: function called from core code after disabling an NMI * @flags: chip specific flags */ struct irq_chip { @@ -490,6 +492,9 @@ struct irq_chip { void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); + int (*irq_nmi_setup)(struct irq_data *data); + void (*irq_nmi_teardown)(struct irq_data *data); + unsigned long flags; }; @@ -505,6 +510,7 @@ struct irq_chip { * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs + * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -515,6 +521,7 @@ enum { IRQCHIP_ONESHOT_SAFE = (1 << 5), IRQCHIP_EOI_THREADED = (1 << 6), IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), + IRQCHIP_SUPPORTS_NMI = (1 << 8), }; #include <linux/irqdesc.h> @@ -594,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); +extern void handle_fasteoi_nmi(struct irq_desc *desc); +extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); + extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data); diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h new file mode 100644 index 000000000000..ea4e087fac98 --- /dev/null +++ b/include/linux/irqchip/irq-davinci-aintc.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019 Texas Instruments + */ + +#ifndef _LINUX_IRQ_DAVINCI_AINTC_ +#define _LINUX_IRQ_DAVINCI_AINTC_ + +#include <linux/ioport.h> + +/** + * struct davinci_aintc_config - configuration data for davinci-aintc driver. + * + * @reg: register range to map + * @num_irqs: number of HW interrupts supported by the controller + * @prios: an array of size num_irqs containing priority settings for + * each interrupt + */ +struct davinci_aintc_config { + struct resource reg; + unsigned int num_irqs; + u8 *prios; +}; + +void davinci_aintc_init(const struct davinci_aintc_config *config); + +#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */ diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h new file mode 100644 index 000000000000..8d71ed5b5a61 --- /dev/null +++ b/include/linux/irqchip/irq-davinci-cp-intc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2019 Texas Instruments + */ + +#ifndef _LINUX_IRQ_DAVINCI_CP_INTC_ +#define _LINUX_IRQ_DAVINCI_CP_INTC_ + +#include <linux/ioport.h> + +/** + * struct davinci_cp_intc_config - configuration data for davinci-cp-intc + * driver. + * + * @reg: register range to map + * @num_irqs: number of HW interrupts supported by the controller + */ +struct davinci_cp_intc_config { + struct resource reg; + unsigned int num_irqs; +}; + +int davinci_cp_intc_init(const struct davinci_cp_intc_config *config); + +#endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dd1e40ddac7d..d6e2ab538ef2 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -28,6 +28,7 @@ struct pt_regs; * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers + * @tot_count: stats field for non-percpu irqs * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts @@ -65,6 +66,7 @@ struct irq_desc { unsigned int core_internal_state__do_not_mess_with_it; unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ + unsigned int tot_count; unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; @@ -171,6 +173,11 @@ static inline int handle_domain_irq(struct irq_domain *domain, { return __handle_domain_irq(domain, hwirq, true, regs); } + +#ifdef CONFIG_IRQ_DOMAIN +int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, + struct pt_regs *regs); +#endif #endif /* Test to see if a driver has successfully requested an irq */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 35965f41d7be..d2130dc7c0e6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -265,6 +265,7 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern bool irq_domain_check_msi_remap(void); extern void irq_set_default_host(struct irq_domain *host); +extern struct irq_domain *irq_get_default_host(void); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..2c89e60bc752 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -56,6 +56,7 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); int kthread_stop(struct task_struct *k); bool kthread_should_stop(void); bool kthread_should_park(void); +bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); @@ -85,7 +86,7 @@ enum { struct kthread_worker { unsigned int flags; - spinlock_t lock; + raw_spinlock_t lock; struct list_head work_list; struct list_head delayed_work_list; struct task_struct *task; @@ -106,7 +107,7 @@ struct kthread_delayed_work { }; #define KTHREAD_WORKER_INIT(worker) { \ - .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ .work_list = LIST_HEAD_INIT((worker).work_list), \ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ } @@ -164,9 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker, #define kthread_init_delayed_work(dwork, fn) \ do { \ kthread_init_work(&(dwork)->work, (fn)); \ - __init_timer(&(dwork)->timer, \ - kthread_delayed_work_timer_fn, \ - TIMER_IRQSAFE); \ + timer_setup(&(dwork)->timer, \ + kthread_delayed_work_timer_fn, 0); \ } while (0) int kthread_worker_fn(void *worker_ptr); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c5335df2372f..79c3873d58ac 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -46,16 +46,22 @@ extern int lock_stat; #define NR_LOCKDEP_CACHING_CLASSES 2 /* - * Lock-classes are keyed via unique addresses, by embedding the - * lockclass-key into the kernel (or module) .data section. (For - * static locks we use the lock address itself as the key.) + * A lockdep key is associated with each lock object. For static locks we use + * the lock address itself as the key. Dynamically allocated lock objects can + * have a statically or dynamically allocated key. Dynamically allocated lock + * keys must be registered before being used and must be unregistered before + * the key memory is freed. */ struct lockdep_subclass_key { char __one_byte; } __attribute__ ((__packed__)); +/* hash_entry is used to keep track of dynamically allocated keys. */ struct lock_class_key { - struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; + union { + struct hlist_node hash_entry; + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; + }; }; extern struct lock_class_key __lockdep_no_validate__; @@ -63,7 +69,8 @@ extern struct lock_class_key __lockdep_no_validate__; #define LOCKSTAT_POINTS 4 /* - * The lock-class itself: + * The lock-class itself. The order of the structure members matters. + * reinit_class() zeroes the key member and all subsequent members. */ struct lock_class { /* @@ -72,10 +79,19 @@ struct lock_class { struct hlist_node hash_entry; /* - * global list of all lock-classes: + * Entry in all_lock_classes when in use. Entry in free_lock_classes + * when not in use. Instances that are being freed are on one of the + * zapped_classes lists. */ struct list_head lock_entry; + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; @@ -87,13 +103,6 @@ struct lock_class { struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* - * These fields represent a directed graph of lock dependencies, - * to every node we attach a list of "forward" and a list of - * "backward" graph nodes. - */ - struct list_head locks_after, locks_before; - - /* * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once: */ @@ -104,7 +113,7 @@ struct lock_class { unsigned long contention_point[LOCKSTAT_POINTS]; unsigned long contending_point[LOCKSTAT_POINTS]; #endif -}; +} __no_randomize_layout; #ifdef CONFIG_LOCK_STAT struct lock_time { @@ -178,6 +187,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to, struct lock_list { struct list_head entry; struct lock_class *class; + struct lock_class *links_to; struct stack_trace trace; int distance; @@ -264,10 +274,14 @@ extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); +extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_off(void); extern void lockdep_on(void); +extern void lockdep_register_key(struct lock_class_key *key); +extern void lockdep_unregister_key(struct lock_class_key *key); + /* * These methods are used by specific locking variants (spinlocks, * rwlocks, mutexes and rwsems) to pass init/acquire/release events @@ -394,6 +408,10 @@ static inline void lockdep_on(void) { } +static inline void lockdep_set_selftest_task(struct task_struct *task) +{ +} + # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) # define lock_downgrade(l, i) do { } while (0) @@ -425,6 +443,14 @@ static inline void lockdep_on(void) */ struct lock_class_key { }; +static inline void lockdep_register_key(struct lock_class_key *key) +{ +} + +static inline void lockdep_unregister_key(struct lock_class_key *key) +{ +} + /* * The lockdep_map takes no space if lockdep is disabled: */ diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h new file mode 100644 index 000000000000..ed37dc40e82a --- /dev/null +++ b/include/linux/mfd/bcm2835-pm.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef BCM2835_MFD_PM_H +#define BCM2835_MFD_PM_H + +#include <linux/regmap.h> + +struct bcm2835_pm { + struct device *dev; + void __iomem *base; + void __iomem *asb; +}; + +#endif /* BCM2835_MFD_PM_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f9bd2f34b99f..14eaeeb46f41 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -779,4 +779,13 @@ struct typec_device_id { kernel_ulong_t driver_data; }; +/** + * struct tee_client_device_id - tee based device identifier + * @uuid: For TEE based client devices we use the device uuid as + * the identifier. + */ +struct tee_client_device_id { + uuid_t uuid; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 65f1d8c2f082..e7c51b00cdfe 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1393,7 +1393,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, } int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, - const struct irq_affinity *affd); + struct irq_affinity *affd); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); @@ -1419,7 +1419,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, static inline int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, - const struct irq_affinity *aff_desc) + struct irq_affinity *aff_desc) { if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) return 1; diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 71b75643c432..03cb4b6f842e 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); @@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ - barrier(); /* - * The barrier() prevents the compiler from + * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ -} - -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) -{ - percpu_down_read_preempt_disable(sem); preempt_enable(); } @@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) return ret; } -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { - /* - * The barrier() prevents the compiler from - * bleeding the critical section out. - */ - barrier(); + preempt_disable(); /* * Same as in percpu_down_read(). */ @@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } -static inline void percpu_up_read(struct percpu_rw_semaphore *sem) -{ - preempt_disable(); - percpu_up_read_preempt_enable(sem); -} - extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e1a051724f7e..e47ef764f613 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -53,8 +53,8 @@ struct perf_guest_info_callbacks { #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> -#include <linux/workqueue.h> #include <linux/cgroup.h> +#include <linux/refcount.h> #include <asm/local.h> struct perf_callchain_entry { @@ -244,6 +244,7 @@ struct perf_event; #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 +#define PERF_PMU_CAP_NO_EXCLUDE 0x80 /** * struct pmu - generic performance monitoring unit @@ -409,7 +410,7 @@ struct pmu { /* * Set up pmu-private data structures for an AUX area */ - void *(*setup_aux) (int cpu, void **pages, + void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ @@ -494,6 +495,11 @@ struct perf_addr_filters_head { unsigned int nr_file_filters; }; +struct perf_addr_filter_range { + unsigned long start; + unsigned long size; +}; + /** * enum perf_event_state - the states of an event: */ @@ -670,7 +676,7 @@ struct perf_event { /* address range filters */ struct perf_addr_filters_head addr_filters; /* vma address array for file-based filders */ - unsigned long *addr_filters_offs; + struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; void (*destroy)(struct perf_event *); @@ -742,7 +748,7 @@ struct perf_event_context { int nr_stat; int nr_freq; int rotate_disable; - atomic_t refcount; + refcount_t refcount; struct task_struct *task; /* @@ -983,9 +989,9 @@ extern void perf_event_output_forward(struct perf_event *event, extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); -extern void perf_event_output(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs); +extern int perf_event_output(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); static inline bool is_default_overflow_handler(struct perf_event *event) @@ -1009,6 +1015,15 @@ perf_event__output_id_sample(struct perf_event *event, extern void perf_log_lost_samples(struct perf_event *event, u64 lost); +static inline bool event_has_any_exclude_flag(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + return attr->exclude_idle || attr->exclude_user || + attr->exclude_kernel || attr->exclude_hv || + attr->exclude_guest || attr->exclude_host; +} + static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; @@ -1118,6 +1133,13 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, } extern void perf_event_mmap(struct vm_area_struct *vma); + +extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, + bool unregister, const char *sym); +extern void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags); + extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1338,6 +1360,13 @@ static inline int perf_unregister_guest_info_callbacks (struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } + +typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); +static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, + bool unregister, const char *sym) { } +static inline void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h index 0926e99f2e8f..879f5c78b91a 100644 --- a/include/linux/platform_data/usb-davinci.h +++ b/include/linux/platform_data/usb-davinci.h @@ -11,22 +11,8 @@ #ifndef __ASM_ARCH_USB_H #define __ASM_ARCH_USB_H -struct da8xx_ohci_root_hub; - -typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, - unsigned port); - /* Passed as the platform data to the OHCI driver */ struct da8xx_ohci_root_hub { - /* Switch the port power on/off */ - int (*set_power)(unsigned port, int on); - /* Read the port power status */ - int (*get_power)(unsigned port); - /* Read the port over-current indicator */ - int (*get_oci)(unsigned port); - /* Over-current indicator change notification (pass NULL to disable) */ - int (*ocic_notify)(da8xx_ocic_handler_t handler); - /* Time from power on to power good (in 2 ms units) */ u8 potpgt; }; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index b895f4e79868..900359342965 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -86,6 +86,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); @@ -158,6 +160,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) return 0; } +static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) +{ + return 0; +} + static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 3a3bc71017d5..18674d7d5b1c 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -51,7 +51,7 @@ struct posix_clock; struct posix_clock_operations { struct module *owner; - int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); + int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx); int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e96581ca7c9d..b20798fc5191 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -12,7 +12,7 @@ struct siginfo; struct cpu_timer_list { struct list_head entry; - u64 expires, incr; + u64 expires; struct task_struct *task; int firing; }; diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index 426cee67f0e2..b8e094b125ee 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU node combining tree definitions. These are used to compute * global attributes while avoiding common-case global contention. A key @@ -11,23 +12,9 @@ * because the size of the TREE SRCU srcu_struct structure depends * on these definitions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + * Author: Paul E. McKenney <paulmck@linux.ibm.com> */ #ifndef __LINUX_RCU_NODE_TREE_H diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index c3ad00e63556..87404cb015f1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU segmented callback lists * @@ -5,23 +6,9 @@ * because the size of the TREE SRCU srcu_struct structure depends * on these definitions. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2017 * - * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com> */ #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index ece7ed9a4a70..6fc53a1345b3 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * RCU-based infrastructure for lightweight reader-writer locking * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (c) 2015, Red Hat, Inc. * * Author: Oleg Nesterov <oleg@redhat.com> diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4db8bcacc51a..6cdb1db776cf 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1,25 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf @@ -89,7 +76,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active __read_mostly; -void rcu_check_callbacks(int user); +void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); @@ -309,16 +296,16 @@ static inline void rcu_preempt_sleep_check(void) { } */ #ifdef __CHECKER__ -#define rcu_dereference_sparse(p, space) \ +#define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ -#define rcu_dereference_sparse(p, space) +#define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ @@ -326,13 +313,13 @@ static inline void rcu_preempt_sleep_check(void) { } /* Dependency order vs. p above. */ \ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ - rcu_dereference_sparse(p, space); \ + rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define rcu_dereference_raw(p) \ @@ -382,6 +369,7 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_assign_pointer(p, v) \ ({ \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ + rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ @@ -785,7 +773,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ - rcu_dereference_sparse(p, __rcu); \ + rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) @@ -859,7 +847,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* Has the specified rcu_head structure been handed to call_rcu()? */ -/* +/** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * @@ -874,10 +862,10 @@ static inline void rcu_head_init(struct rcu_head *rhp) rhp->func = (rcu_callback_t)~0L; } -/* +/** * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. - * @func: The function passed to call_rcu() along with @rhp. + * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including @@ -896,57 +884,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) return false; } - -/* Transitional pre-consolidation compatibility definitions. */ - -static inline void synchronize_rcu_bh(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_bh(void) -{ - rcu_barrier(); -} - -static inline void synchronize_sched(void) -{ - synchronize_rcu(); -} - -static inline void synchronize_sched_expedited(void) -{ - synchronize_rcu_expedited(); -} - -static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) -{ - call_rcu(head, func); -} - -static inline void rcu_barrier_sched(void) -{ - rcu_barrier(); -} - -static inline unsigned long get_state_synchronize_sched(void) -{ - return get_state_synchronize_rcu(); -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - cond_synchronize_rcu(oldstate); -} - #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index af65d1f36ddb..8e727f57d814 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * - * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + * Author: Paul E. McKenney <paulmck@linux.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 7f83179177d1..735601ac27d3 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -1,26 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2008 * * Author: Dipankar Sarma <dipankar@in.ibm.com> - * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm + * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm * - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h new file mode 100644 index 000000000000..b11a2047c342 --- /dev/null +++ b/include/linux/reset/socfpga.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_RESET_SOCFPGA_H__ +#define __LINUX_RESET_SOCFPGA_H__ + +void __init socfpga_reset_init(void); + +#endif /* __LINUX_RESET_SOCFPGA_H__ */ diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h new file mode 100644 index 000000000000..1ad7fffb413e --- /dev/null +++ b/include/linux/reset/sunxi.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_RESET_SUNXI_H__ +#define __LINUX_RESET_SUNXI_H__ + +void __init sun6i_reset_init(void); + +#endif /* __LINUX_RESET_SUNXI_H__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 36ec6e7e8291..f073bd59df32 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -21,6 +21,7 @@ #include <linux/seccomp.h> #include <linux/nodemask.h> #include <linux/rcupdate.h> +#include <linux/refcount.h> #include <linux/resource.h> #include <linux/latencytop.h> #include <linux/sched/prio.h> @@ -357,12 +358,6 @@ struct util_est { * For cfs_rq, it is the aggregated load_avg of all runnable and * blocked sched_entities. * - * load_avg may also take frequency scaling into account: - * - * load_avg = runnable% * scale_load_down(load) * freq% - * - * where freq% is the CPU frequency normalized to the highest frequency. - * * [util_avg definition] * * util_avg = running% * SCHED_CAPACITY_SCALE @@ -371,17 +366,14 @@ struct util_est { * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable * and blocked sched_entities. * - * util_avg may also factor frequency scaling and CPU capacity scaling: + * load_avg and util_avg don't direcly factor frequency scaling and CPU + * capacity scaling. The scaling is done through the rq_clock_pelt that + * is used for computing those signals (see update_rq_clock_pelt()) * - * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% - * - * where freq% is the same as above, and capacity% is the CPU capacity - * normalized to the greatest capacity (due to uarch differences, etc). - * - * N.B., the above ratios (runnable%, running%, freq%, and capacity%) - * themselves are in the range of [0, 1]. To do fixed point arithmetics, - * we therefore scale them to as large a range as necessary. This is for - * example reflected by util_avg's SCHED_CAPACITY_SCALE. + * N.B., the above ratios (runnable% and running%) themselves are in the + * range of [0, 1]. To do fixed point arithmetics, we therefore scale them + * to as large a range as necessary. This is for example reflected by + * util_avg's SCHED_CAPACITY_SCALE. * * [Overflow issue] * @@ -608,7 +600,7 @@ struct task_struct { randomized_struct_fields_start void *stack; - atomic_t usage; + refcount_t usage; /* Per task flags (PF_*), defined further below: */ unsigned int flags; unsigned int ptrace; @@ -1191,7 +1183,7 @@ struct task_struct { #endif #ifdef CONFIG_THREAD_INFO_IN_TASK /* A live task holds one reference: */ - atomic_t stack_refcount; + refcount_t stack_refcount; #endif #ifdef CONFIG_LIVEPATCH int patch_state; @@ -1408,7 +1400,6 @@ extern struct pid *cad_pid; #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ @@ -1458,6 +1449,7 @@ static inline bool is_percpu_thread(void) #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ +#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1486,6 +1478,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) +TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) + TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) @@ -1753,9 +1749,9 @@ static __always_inline bool need_resched(void) static inline unsigned int task_cpu(const struct task_struct *p) { #ifdef CONFIG_THREAD_INFO_IN_TASK - return p->cpu; + return READ_ONCE(p->cpu); #else - return task_thread_info(p)->cpu; + return READ_ONCE(task_thread_info(p)->cpu); #endif } diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 13789d10a50e..ae5655197698 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -8,13 +8,14 @@ #include <linux/sched/jobctl.h> #include <linux/sched/task.h> #include <linux/cred.h> +#include <linux/refcount.h> /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { - atomic_t count; + refcount_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; @@ -82,7 +83,7 @@ struct multiprocess_signals { * the locking of signal_struct. */ struct signal_struct { - atomic_t sigcnt; + refcount_t sigcnt; atomic_t live; int nr_threads; struct list_head thread_head; diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index a9c32daeb9d8..99ce6d728df7 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -83,4 +83,11 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +extern unsigned int sysctl_sched_energy_aware; +extern int sched_energy_aware_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 44c6f15800ff..2e97a2227045 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -88,13 +88,13 @@ extern void sched_exec(void); #define sched_exec() {} #endif -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) { - if (atomic_dec_and_test(&t->usage)) + if (refcount_dec_and_test(&t->usage)) __put_task_struct(t); } diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index 6a841929073f..2413427e439c 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -61,7 +61,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #ifdef CONFIG_THREAD_INFO_IN_TASK static inline void *try_get_task_stack(struct task_struct *tsk) { - return atomic_inc_not_zero(&tsk->stack_refcount) ? + return refcount_inc_not_zero(&tsk->stack_refcount) ? task_stack_page(tsk) : NULL; } diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index c31d3a47a47c..57c7ed3fe465 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -176,10 +176,10 @@ typedef int (*sched_domain_flags_f)(void); #define SDTL_OVERLAP 0x01 struct sd_data { - struct sched_domain **__percpu sd; - struct sched_domain_shared **__percpu sds; - struct sched_group **__percpu sg; - struct sched_group_capacity **__percpu sgc; + struct sched_domain *__percpu *sd; + struct sched_domain_shared *__percpu *sds; + struct sched_group *__percpu *sg; + struct sched_group_capacity *__percpu *sgc; }; struct sched_domain_topology_level { diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 545f37138057..ad826d2a4557 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); +extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); +extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 69c285b1c990..eb71a50b8afc 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -162,6 +162,12 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc); */ int qcom_llcc_probe(struct platform_device *pdev, const struct llcc_slice_config *table, u32 sz); + +/** + * qcom_llcc_remove - remove the sct table + * @pdev: Platform device pointer + */ +int qcom_llcc_remove(struct platform_device *pdev); #else static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid) { diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c614375cd264..c495b2d51569 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * - * Author: Paul McKenney <paulmck@us.ibm.com> + * Author: Paul McKenney <paulmck@linux.ibm.com> * Lai Jiangshan <laijs@cn.fujitsu.com> * * For detailed explanation of Read-Copy Update mechanism see - @@ -223,6 +210,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { + WARN_ON_ONCE(idx & ~0x1); rcu_lock_release(&(ssp)->dep_map); __srcu_read_unlock(ssp, idx); } diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index b19216aaaef2..5a5a1941ca15 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tiny variant. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2017 * - * Author: Paul McKenney <paulmck@us.ibm.com> + * Author: Paul McKenney <paulmck@linux.ibm.com> */ #ifndef _LINUX_SRCU_TINY_H diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 6f292bd3e7db..7f7c8c050f63 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -1,24 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tree variant. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright (C) IBM Corporation, 2017 * - * Author: Paul McKenney <paulmck@us.ibm.com> + * Author: Paul McKenney <paulmck@linux.ibm.com> */ #ifndef _LINUX_SRCU_TREE_H @@ -45,7 +32,8 @@ struct srcu_data { unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ bool srcu_cblist_invoking; /* Invoking these CBs? */ - struct delayed_work work; /* Context for CB invoking. */ + struct timer_list delay_work; /* Delay for CB invoking */ + struct work_struct work; /* Context for CB invoking. */ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ struct srcu_node *mynode; /* Leaf srcu_node. */ unsigned long grpmask; /* Mask for leaf srcu_node */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index fb63045a0fb6..94369f5bd8e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -54,7 +54,7 @@ struct __sysctl_args; struct sysinfo; struct timespec; struct timeval; -struct timex; +struct __kernel_timex; struct timezone; struct tms; struct utimbuf; @@ -297,6 +297,11 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id, long nr, struct io_event __user *events, struct __kernel_timespec __user *timeout); +asmlinkage long sys_io_getevents_time32(__u32 ctx_id, + __s32 min_nr, + __s32 nr, + struct io_event __user *events, + struct old_timespec32 __user *timeout); asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, long min_nr, long nr, @@ -522,11 +527,19 @@ asmlinkage long sys_timerfd_settime(int ufd, int flags, const struct __kernel_itimerspec __user *utmr, struct __kernel_itimerspec __user *otmr); asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime32(int ufd, + struct old_itimerspec32 __user *otmr); +asmlinkage long sys_timerfd_settime32(int ufd, int flags, + const struct old_itimerspec32 __user *utmr, + struct old_itimerspec32 __user *otmr); /* fs/utimes.c */ asmlinkage long sys_utimensat(int dfd, const char __user *filename, struct __kernel_timespec __user *utimes, int flags); +asmlinkage long sys_utimensat_time32(unsigned int dfd, + const char __user *filename, + struct old_timespec32 __user *t, int flags); /* kernel/acct.c */ asmlinkage long sys_acct(const char __user *name); @@ -555,6 +568,9 @@ asmlinkage long sys_unshare(unsigned long unshare_flags); asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, struct __kernel_timespec __user *utime, u32 __user *uaddr2, u32 val3); +asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val, + struct old_timespec32 __user *utime, u32 __user *uaddr2, + u32 val3); asmlinkage long sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, size_t __user *len_ptr); @@ -564,6 +580,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, /* kernel/hrtimer.c */ asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); +asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/itimer.c */ asmlinkage long sys_getitimer(int which, struct itimerval __user *value); @@ -591,7 +609,7 @@ asmlinkage long sys_timer_gettime(timer_t timer_id, asmlinkage long sys_timer_getoverrun(timer_t timer_id); asmlinkage long sys_timer_settime(timer_t timer_id, int flags, const struct __kernel_itimerspec __user *new_setting, - struct itimerspec __user *old_setting); + struct __kernel_itimerspec __user *old_setting); asmlinkage long sys_timer_delete(timer_t timer_id); asmlinkage long sys_clock_settime(clockid_t which_clock, const struct __kernel_timespec __user *tp); @@ -602,6 +620,20 @@ asmlinkage long sys_clock_getres(clockid_t which_clock, asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, const struct __kernel_timespec __user *rqtp, struct __kernel_timespec __user *rmtp); +asmlinkage long sys_timer_gettime32(timer_t timer_id, + struct old_itimerspec32 __user *setting); +asmlinkage long sys_timer_settime32(timer_t timer_id, int flags, + struct old_itimerspec32 __user *new, + struct old_itimerspec32 __user *old); +asmlinkage long sys_clock_settime32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_gettime32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_getres_time32(clockid_t which_clock, + struct old_timespec32 __user *tp); +asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags, + struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/printk.c */ asmlinkage long sys_syslog(int type, char __user *buf, int len); @@ -627,6 +659,8 @@ asmlinkage long sys_sched_get_priority_max(int policy); asmlinkage long sys_sched_get_priority_min(int policy); asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct __kernel_timespec __user *interval); +asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid, + struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long sys_restart_syscall(void); @@ -695,7 +729,8 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __user *tz); asmlinkage long sys_settimeofday(struct timeval __user *tv, struct timezone __user *tz); -asmlinkage long sys_adjtimex(struct timex __user *txc_p); +asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p); +asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p); /* kernel/timer.c */ asmlinkage long sys_getpid(void); @@ -714,9 +749,18 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout); asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); +asmlinkage long sys_mq_timedreceive_time32(mqd_t mqdes, + char __user *u_msg_ptr, + unsigned int msg_len, unsigned int __user *u_msg_prio, + const struct old_timespec32 __user *u_abs_timeout); +asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes, + const char __user *u_msg_ptr, + unsigned int msg_len, unsigned int msg_prio, + const struct old_timespec32 __user *u_abs_timeout); /* ipc/msg.c */ asmlinkage long sys_msgget(key_t key, int msgflg); +asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg); @@ -726,14 +770,19 @@ asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, /* ipc/sem.c */ asmlinkage long sys_semget(key_t key, int nsems, int semflg); asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); +asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, unsigned nsops, const struct __kernel_timespec __user *timeout); +asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops, + unsigned nsops, + const struct old_timespec32 __user *timeout); asmlinkage long sys_semop(int semid, struct sembuf __user *sops, unsigned nsops); /* ipc/shm.c */ asmlinkage long sys_shmget(key_t key, size_t size, int flag); +asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); asmlinkage long sys_shmdt(char __user *shmaddr); @@ -867,7 +916,9 @@ asmlinkage long sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); asmlinkage long sys_clock_adjtime(clockid_t which_clock, - struct timex __user *tx); + struct __kernel_timex __user *tx); +asmlinkage long sys_clock_adjtime32(clockid_t which_clock, + struct old_timex32 __user *tx); asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, @@ -1003,6 +1054,7 @@ asmlinkage long sys_alarm(unsigned int seconds); asmlinkage long sys_getpgrp(void); asmlinkage long sys_pause(void); asmlinkage long sys_time(time_t __user *tloc); +asmlinkage long sys_time32(old_time32_t __user *tloc); #ifdef __ARCH_WANT_SYS_UTIME asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times); @@ -1011,6 +1063,13 @@ asmlinkage long sys_utimes(char __user *filename, asmlinkage long sys_futimesat(int dfd, const char __user *filename, struct timeval __user *utimes); #endif +asmlinkage long sys_futimesat_time32(unsigned int dfd, + const char __user *filename, + struct old_timeval32 __user *t); +asmlinkage long sys_utime32(const char __user *filename, + struct old_utimbuf32 __user *t); +asmlinkage long sys_utimes_time32(const char __user *filename, + struct old_timeval32 __user *t); asmlinkage long sys_creat(const char __user *pathname, umode_t mode); asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user *dirent, @@ -1035,6 +1094,7 @@ asmlinkage long sys_fork(void); /* obsolete: kernel/time/time.c */ asmlinkage long sys_stime(time_t __user *tptr); +asmlinkage long sys_stime32(old_time32_t __user *tptr); /* obsolete: kernel/signal.c */ asmlinkage long sys_sigpending(old_sigset_t __user *uset); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 6cfe05893a76..4a49f80e7f71 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -15,11 +15,14 @@ #ifndef __TEE_DRV_H #define __TEE_DRV_H -#include <linux/types.h> +#include <linux/device.h> #include <linux/idr.h> #include <linux/kref.h> #include <linux/list.h> +#include <linux/mod_devicetable.h> #include <linux/tee.h> +#include <linux/types.h> +#include <linux/uuid.h> /* * The file describes the API provided by the generic TEE driver to the @@ -47,6 +50,11 @@ struct tee_shm_pool; * @releasing: flag that indicates if context is being released right now. * It is needed to break circular dependency on context during * shared memory release. + * @supp_nowait: flag that indicates that requests in this context should not + * wait for tee-supplicant daemon to be started if not present + * and just return with an error code. It is needed for requests + * that arises from TEE based kernel drivers that should be + * non-blocking in nature. */ struct tee_context { struct tee_device *teedev; @@ -54,6 +62,7 @@ struct tee_context { void *data; struct kref refcount; bool releasing; + bool supp_nowait; }; struct tee_param_memref { @@ -526,6 +535,18 @@ int tee_client_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, struct tee_param *param); +/** + * tee_client_cancel_req() - Request cancellation of the previous open-session + * or invoke-command operations in a Trusted Application + * @ctx: TEE Context + * @arg: Cancellation arguments, see description of + * struct tee_ioctl_cancel_arg + * + * Returns < 0 on error else 0 if the cancellation was successfully requested. + */ +int tee_client_cancel_req(struct tee_context *ctx, + struct tee_ioctl_cancel_arg *arg); + static inline bool tee_param_is_memref(struct tee_param *param) { switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { @@ -538,4 +559,31 @@ static inline bool tee_param_is_memref(struct tee_param *param) } } +extern struct bus_type tee_bus_type; + +/** + * struct tee_client_device - tee based device + * @id: device identifier + * @dev: device structure + */ +struct tee_client_device { + struct tee_client_device_id id; + struct device dev; +}; + +#define to_tee_client_device(d) container_of(d, struct tee_client_device, dev) + +/** + * struct tee_client_driver - tee client driver + * @id_table: device id table supported by this driver + * @driver: driver structure + */ +struct tee_client_driver { + const struct tee_client_device_id *id_table; + struct device_driver driver; +}; + +#define to_tee_client_driver(d) \ + container_of(d, struct tee_client_driver, driver) + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/time32.h b/include/linux/time32.h index 118b9977080c..0a1f302a1753 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -10,6 +10,7 @@ */ #include <linux/time64.h> +#include <linux/timex.h> #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) @@ -35,13 +36,42 @@ struct old_utimbuf32 { old_time32_t modtime; }; +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; +}; + extern int get_old_timespec32(struct timespec64 *, const void __user *); extern int put_old_timespec32(const struct timespec64 *, void __user *); extern int get_old_itimerspec32(struct itimerspec64 *its, const struct old_itimerspec32 __user *uits); extern int put_old_itimerspec32(const struct itimerspec64 *its, struct old_itimerspec32 __user *uits); - +struct __kernel_timex; +int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *); +int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *); #if __BITS_PER_LONG == 64 diff --git a/include/linux/time64.h b/include/linux/time64.h index 05634afba0db..f38d382ffec1 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -7,14 +7,6 @@ typedef __s64 time64_t; typedef __u64 timeu64_t; -/* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path - * and 32-bit emulation. - */ -#ifndef CONFIG_64BIT_TIME -#define __kernel_timespec timespec -#define __kernel_itimerspec itimerspec -#endif - #include <uapi/linux/time.h> struct timespec64 { diff --git a/include/linux/timex.h b/include/linux/timex.h index 39c25dbebfe8..ce0859763670 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -151,7 +151,9 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_FREQ (HZ) #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) -extern int do_adjtimex(struct timex *); +extern int do_adjtimex(struct __kernel_timex *); +extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); + extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); diff --git a/include/linux/torture.h b/include/linux/torture.h index 48fad21109fc..23d80db426d7 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common functions for in-kernel torture tests. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can access it online at - * http://www.gnu.org/licenses/gpl-2.0.html. - * * Copyright IBM Corporation, 2014 * - * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + * Author: Paul E. McKenney <paulmck@linux.ibm.com> */ #ifndef __LINUX_TORTURE_H @@ -50,11 +37,12 @@ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) /* Definitions for online/offline exerciser. */ +typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_onl, int *min_onl, int *max_onl); -int torture_onoff_init(long ooholdoff, long oointerval); +int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); void torture_onoff_stats(void); bool torture_onoff_failures(void); diff --git a/include/linux/wait.h b/include/linux/wait.h index ed7c122cb31f..5f3efabc36f4 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -308,7 +308,7 @@ do { \ #define __wait_event_freezable(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ - schedule(); try_to_freeze()) + freezable_schedule()) /** * wait_event_freezable - sleep (or freeze) until a condition gets true @@ -367,7 +367,7 @@ do { \ #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ - __ret = schedule_timeout(__ret); try_to_freeze()) + __ret = freezable_schedule_timeout(__ret)) /* * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid @@ -588,7 +588,7 @@ do { \ #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ - schedule(); try_to_freeze()) + freezable_schedule()) #define wait_event_freezable_exclusive(wq, condition) \ ({ \ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 60d673e15632..d9a1a480e920 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -390,43 +390,23 @@ extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; -extern struct workqueue_struct * -__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, - struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); - /** * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default - * @args...: args for @fmt + * remaining args: args for @fmt * * Allocate a workqueue with the specified parameters. For detailed * information on WQ_* flags, please refer to * Documentation/core-api/workqueue.rst. * - * The __lock_name macro dance is to guarantee that single lock_class_key - * doesn't end up with different namesm, which isn't allowed by lockdep. - * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ -#ifdef CONFIG_LOCKDEP -#define alloc_workqueue(fmt, flags, max_active, args...) \ -({ \ - static struct lock_class_key __key; \ - const char *__lock_name; \ - \ - __lock_name = "(wq_completion)"#fmt#args; \ - \ - __alloc_workqueue_key((fmt), (flags), (max_active), \ - &__key, __lock_name, ##args); \ -}) -#else -#define alloc_workqueue(fmt, flags, max_active, args...) \ - __alloc_workqueue_key((fmt), (flags), (max_active), \ - NULL, NULL, ##args) -#endif +struct workqueue_struct *alloc_workqueue(const char *fmt, + unsigned int flags, + int max_active, ...); /** * alloc_ordered_workqueue - allocate an ordered workqueue diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 4be1aa4435ae..7800e12ee042 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -73,6 +73,8 @@ enum rpi_firmware_property_tag { RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021, RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030, RPI_FIRMWARE_GET_THROTTLED = 0x00030046, + RPI_FIRMWARE_GET_CLOCK_MEASURED = 0x00030047, + RPI_FIRMWARE_NOTIFY_REBOOT = 0x00030048, RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001, RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002, RPI_FIRMWARE_SET_VOLTAGE = 0x00038003, @@ -86,6 +88,8 @@ enum rpi_firmware_property_tag { RPI_FIRMWARE_SET_GPIO_CONFIG = 0x00038043, RPI_FIRMWARE_GET_PERIPH_REG = 0x00030045, RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045, + RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049, + RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050, /* Dispmanx TAGS */ diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index 3447fd10a3e6..672cfb58046f 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -111,9 +111,9 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, const struct dpaa2_fd *fd); int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, u16 qdbin, const struct dpaa2_fd *fd); -int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid, +int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid, const u64 *buffers, unsigned int num_buffers); -int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid, +int dpaa2_io_service_acquire(struct dpaa2_io *d, u16 bpid, u64 *buffers, unsigned int num_buffers); struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index b02f926a0216..45960aa08f4a 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -23,6 +23,7 @@ #include <soc/tegra/bpmp-abi.h> struct tegra_bpmp_clk; +struct tegra_bpmp_ops; struct tegra_bpmp_soc { struct { @@ -32,6 +33,8 @@ struct tegra_bpmp_soc { unsigned int timeout; } cpu_tx, thread, cpu_rx; } channels; + + const struct tegra_bpmp_ops *ops; unsigned int num_resets; }; @@ -47,6 +50,7 @@ struct tegra_bpmp_channel { struct tegra_bpmp_mb_data *ob; struct completion completion; struct tegra_ivc *ivc; + unsigned int index; }; typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq, @@ -63,12 +67,7 @@ struct tegra_bpmp_mrq { struct tegra_bpmp { const struct tegra_bpmp_soc *soc; struct device *dev; - - struct { - struct gen_pool *pool; - dma_addr_t phys; - void *virt; - } tx, rx; + void *priv; struct { struct mbox_client client; @@ -173,6 +172,8 @@ static inline bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, } #endif +void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp); + #if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP) int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp); #else diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index a9db1b501de1..b32ee5d82dd4 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -161,7 +161,6 @@ enum tegra_io_pad { #define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS #ifdef CONFIG_SOC_TEGRA_PMC -int tegra_powergate_is_powered(unsigned int id); int tegra_powergate_power_on(unsigned int id); int tegra_powergate_power_off(unsigned int id); int tegra_powergate_remove_clamping(unsigned int id); @@ -182,11 +181,6 @@ void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); #else -static inline int tegra_powergate_is_powered(unsigned int id) -{ - return -ENOSYS; -} - static inline int tegra_powergate_power_on(unsigned int id) { return -ENOSYS; diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index e7ee32861d51..abd238d0f7a4 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -15,9 +15,7 @@ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index d90127298f12..12cdf611d217 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy) __SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit) #define __NR_io_cancel 3 __SYSCALL(__NR_io_cancel, sys_io_cancel) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_io_getevents 4 -__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents) +__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents) +#endif /* fs/xattr.c */ #define __NR_setxattr 5 @@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat) #define __NR_fchown 55 __SYSCALL(__NR_fchown, sys_fchown) #define __NR_openat 56 -__SC_COMP(__NR_openat, sys_openat, compat_sys_openat) +__SYSCALL(__NR_openat, sys_openat) #define __NR_close 57 __SYSCALL(__NR_close, sys_close) #define __NR_vhangup 58 @@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev) __SYSCALL(__NR3264_sendfile, sys_sendfile64) /* fs/select.c */ +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_pselect6 72 -__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6) +__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32) #define __NR_ppoll 73 -__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll) +__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32) +#endif /* fs/signalfd.c */ #define __NR_signalfd4 74 @@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \ /* fs/timerfd.c */ #define __NR_timerfd_create 85 __SYSCALL(__NR_timerfd_create, sys_timerfd_create) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_timerfd_settime 86 -__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \ - compat_sys_timerfd_settime) +__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \ + sys_timerfd_settime) #define __NR_timerfd_gettime 87 -__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \ - compat_sys_timerfd_gettime) +__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \ + sys_timerfd_gettime) +#endif /* fs/utimes.c */ +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_utimensat 88 -__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat) +__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat) +#endif /* kernel/acct.c */ #define __NR_acct 89 @@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address) __SYSCALL(__NR_unshare, sys_unshare) /* kernel/futex.c */ +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_futex 98 -__SC_COMP(__NR_futex, sys_futex, compat_sys_futex) +__SC_3264(__NR_futex, sys_futex_time32, sys_futex) +#endif #define __NR_set_robust_list 99 __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \ compat_sys_set_robust_list) @@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \ compat_sys_get_robust_list) /* kernel/hrtimer.c */ +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_nanosleep 101 -__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep) +__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep) +#endif /* kernel/itimer.c */ #define __NR_getitimer 102 @@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module) /* kernel/posix-timers.c */ #define __NR_timer_create 107 __SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_timer_gettime 108 -__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime) +__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime) +#endif #define __NR_timer_getoverrun 109 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_timer_settime 110 -__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime) +__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime) +#endif #define __NR_timer_delete 111 __SYSCALL(__NR_timer_delete, sys_timer_delete) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_clock_settime 112 -__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime) +__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime) #define __NR_clock_gettime 113 -__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime) +__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime) #define __NR_clock_getres 114 -__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres) +__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres) #define __NR_clock_nanosleep 115 -__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \ - compat_sys_clock_nanosleep) +__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \ + sys_clock_nanosleep) +#endif /* kernel/printk.c */ #define __NR_syslog 116 @@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield) __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) #define __NR_sched_get_priority_min 126 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_sched_rr_get_interval 127 -__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \ - compat_sys_sched_rr_get_interval) +__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \ + sys_sched_rr_get_interval) +#endif /* kernel/signal.c */ #define __NR_restart_syscall 128 @@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction) __SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask) #define __NR_rt_sigpending 136 __SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_rt_sigtimedwait 137 -__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \ - compat_sys_rt_sigtimedwait) +__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \ + sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32) +#endif #define __NR_rt_sigqueueinfo 138 __SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \ compat_sys_rt_sigqueueinfo) @@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname) __SYSCALL(__NR_sethostname, sys_sethostname) #define __NR_setdomainname 162 __SYSCALL(__NR_setdomainname, sys_setdomainname) + +#ifdef __ARCH_WANT_SET_GET_RLIMIT +/* getrlimit and setrlimit are superseded with prlimit64 */ #define __NR_getrlimit 163 __SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) #define __NR_setrlimit 164 __SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) +#endif + #define __NR_getrusage 165 __SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) #define __NR_umask 166 @@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl) __SYSCALL(__NR_getcpu, sys_getcpu) /* kernel/time.c */ +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_gettimeofday 169 __SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday) #define __NR_settimeofday 170 __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday) #define __NR_adjtimex 171 -__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex) +__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex) +#endif /* kernel/timer.c */ #define __NR_getpid 172 @@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo) __SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open) #define __NR_mq_unlink 181 __SYSCALL(__NR_mq_unlink, sys_mq_unlink) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_mq_timedsend 182 -__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend) +__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend) #define __NR_mq_timedreceive 183 -__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \ - compat_sys_mq_timedreceive) +__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \ + sys_mq_timedreceive) +#endif #define __NR_mq_notify 184 __SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify) #define __NR_mq_getsetattr 185 @@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd) __SYSCALL(__NR_semget, sys_semget) #define __NR_semctl 191 __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_semtimedop 192 -__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop) +__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) +#endif #define __NR_semop 193 __SYSCALL(__NR_semop, sys_semop) @@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \ __SYSCALL(__NR_perf_event_open, sys_perf_event_open) #define __NR_accept4 242 __SYSCALL(__NR_accept4, sys_accept4) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_recvmmsg 243 -__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg) +__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32) +#endif /* * Architectures may provide up to 16 syscalls of their own @@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg) */ #define __NR_arch_specific_syscall 244 +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_wait4 260 __SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4) +#endif #define __NR_prlimit64 261 __SYSCALL(__NR_prlimit64, sys_prlimit64) #define __NR_fanotify_init 262 @@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) #define __NR_name_to_handle_at 264 __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) #define __NR_open_by_handle_at 265 -__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \ - compat_sys_open_by_handle_at) +__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_clock_adjtime 266 -__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) +__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime) +#endif #define __NR_syncfs 267 __SYSCALL(__NR_syncfs, sys_syncfs) #define __NR_setns 268 @@ -734,15 +772,60 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) __SYSCALL(__NR_pkey_free, sys_pkey_free) #define __NR_statx 291 __SYSCALL(__NR_statx, sys_statx) +#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_io_pgetevents 292 -__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) +__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents) +#endif #define __NR_rseq 293 __SYSCALL(__NR_rseq, sys_rseq) #define __NR_kexec_file_load 294 __SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) +/* 295 through 402 are unassigned to sync up with generic numbers, don't use */ +#if __BITS_PER_LONG == 32 +#define __NR_clock_gettime64 403 +__SYSCALL(__NR_clock_gettime64, sys_clock_gettime) +#define __NR_clock_settime64 404 +__SYSCALL(__NR_clock_settime64, sys_clock_settime) +#define __NR_clock_adjtime64 405 +__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime) +#define __NR_clock_getres_time64 406 +__SYSCALL(__NR_clock_getres_time64, sys_clock_getres) +#define __NR_clock_nanosleep_time64 407 +__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep) +#define __NR_timer_gettime64 408 +__SYSCALL(__NR_timer_gettime64, sys_timer_gettime) +#define __NR_timer_settime64 409 +__SYSCALL(__NR_timer_settime64, sys_timer_settime) +#define __NR_timerfd_gettime64 410 +__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime) +#define __NR_timerfd_settime64 411 +__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime) +#define __NR_utimensat_time64 412 +__SYSCALL(__NR_utimensat_time64, sys_utimensat) +#define __NR_pselect6_time64 413 +__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64) +#define __NR_ppoll_time64 414 +__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64) +#define __NR_io_pgetevents_time64 416 +__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents) +#define __NR_recvmmsg_time64 417 +__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64) +#define __NR_mq_timedsend_time64 418 +__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend) +#define __NR_mq_timedreceive_time64 419 +__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive) +#define __NR_semtimedop_time64 420 +__SYSCALL(__NR_semtimedop_time64, sys_semtimedop) +#define __NR_rt_sigtimedwait_time64 421 +__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64) +#define __NR_futex_time64 422 +__SYSCALL(__NR_futex_time64, sys_futex) +#define __NR_sched_rr_get_interval_time64 423 +__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) +#endif #undef __NR_syscalls -#define __NR_syscalls 295 +#define __NR_syscalls 424 /* * 32 bit systems traditionally used different diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index d0f515d53299..fc1a64c3447b 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -12,6 +12,10 @@ #define OVERCOMMIT_ALWAYS 1 #define OVERCOMMIT_NEVER 2 +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ + /* * Huge page size encoding when MAP_HUGETLB is specified, and a huge page * size other than the default is desired. See hugetlb_encode.h. diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 9de8780ac8d9..7198ddd0c6b1 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -372,7 +372,9 @@ struct perf_event_attr { context_switch : 1, /* context switch data */ write_backward : 1, /* Write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ - __reserved_1 : 35; + ksymbol : 1, /* include ksymbol events */ + bpf_event : 1, /* include bpf events */ + __reserved_1 : 33; union { __u32 wakeup_events; /* wakeup every n events */ @@ -445,8 +447,6 @@ struct perf_event_query_bpf { __u32 ids[0]; }; -#define perf_flags(attr) (*(&(attr)->read_format + 1)) - /* * Ioctls that can be done on a perf event fd: */ @@ -965,9 +965,58 @@ enum perf_event_type { */ PERF_RECORD_NAMESPACES = 16, + /* + * Record ksymbol register/unregister events: + * + * struct { + * struct perf_event_header header; + * u64 addr; + * u32 len; + * u16 ksym_type; + * u16 flags; + * char name[]; + * struct sample_id sample_id; + * }; + */ + PERF_RECORD_KSYMBOL = 17, + + /* + * Record bpf events: + * enum perf_bpf_event_type { + * PERF_BPF_EVENT_UNKNOWN = 0, + * PERF_BPF_EVENT_PROG_LOAD = 1, + * PERF_BPF_EVENT_PROG_UNLOAD = 2, + * }; + * + * struct { + * struct perf_event_header header; + * u16 type; + * u16 flags; + * u32 id; + * u8 tag[BPF_TAG_SIZE]; + * struct sample_id sample_id; + * }; + */ + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_MAX, /* non-ABI */ }; +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */ +}; + +#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, /* non-ABI */ +}; + #define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_CONTEXTS_PER_STACK 8 diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index b4875a93363a..094bb03b9cc2 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -219,6 +219,7 @@ struct prctl_mm_map { # define PR_SPEC_ENABLE (1UL << 1) # define PR_SPEC_DISABLE (1UL << 2) # define PR_SPEC_FORCE_DISABLE (1UL << 3) +# define PR_SPEC_DISABLE_NOEXEC (1UL << 4) /* Reset arm64 pointer authentication keys */ #define PR_PAC_RESET_KEYS 54 diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h index 459070c61d47..27bfc8fc6904 100644 --- a/include/uapi/linux/time_types.h +++ b/include/uapi/linux/time_types.h @@ -4,19 +4,15 @@ #include <linux/types.h> -#ifndef __kernel_timespec struct __kernel_timespec { __kernel_time64_t tv_sec; /* seconds */ long long tv_nsec; /* nanoseconds */ }; -#endif -#ifndef __kernel_itimerspec struct __kernel_itimerspec { struct __kernel_timespec it_interval; /* timer period */ struct __kernel_timespec it_value; /* timer expiration */ }; -#endif /* * legacy timeval structure, only embedded in structures that diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h index 92685d826444..9f517f9010bb 100644 --- a/include/uapi/linux/timex.h +++ b/include/uapi/linux/timex.h @@ -92,6 +92,45 @@ struct timex { int :32; int :32; int :32; }; +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; /* mode selector */ + int :32; /* pad */ + long long offset; /* time offset (usec) */ + long long freq; /* frequency offset (scaled ppm) */ + long long maxerror;/* maximum error (usec) */ + long long esterror;/* estimated error (usec) */ + int status; /* clock command/status */ + int :32; /* pad */ + long long constant;/* pll time constant */ + long long precision;/* clock precision (usec) (read only) */ + long long tolerance;/* clock frequency tolerance (ppm) + * (read only) + */ + struct __kernel_timex_timeval time; /* (read only, except for ADJ_SETOFFSET) */ + long long tick; /* (modified) usecs between clock ticks */ + + long long ppsfreq;/* pps frequency (scaled ppm) (ro) */ + long long jitter; /* pps jitter (us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + int :32; /* pad */ + long long stabil; /* pps stability (scaled ppm) (ro) */ + long long jitcnt; /* jitter limit exceeded (ro) */ + long long calcnt; /* calibration intervals (ro) */ + long long errcnt; /* calibration errors (ro) */ + long long stbcnt; /* stability limit exceeded (ro) */ + + int tai; /* TAI offset (ro) */ + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; + /* * Mode codes (timex.mode) */ |