diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-28 00:14:30 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-28 00:14:30 +0300 |
commit | bc6cb4d5bc3a44197de30784eae71d8ba28483eb (patch) | |
tree | fdd00391c6068c217eeb8a4a06afc40cc1fc6853 | |
parent | ed3b7923a816ded62dccef377c9ee346c7d3b1b4 (diff) | |
parent | b33eb50a92b0a298fa8a6ac350e741c3ec100f6d (diff) | |
download | linux-bc6cb4d5bc3a44197de30784eae71d8ba28483eb.tar.xz |
Merge tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Introduce cmpxchg128() -- aka. the demise of cmpxchg_double()
The cmpxchg128() family of functions is basically & functionally the
same as cmpxchg_double(), but with a saner interface.
Instead of a 6-parameter horror that forced u128 - u64/u64-halves
layout details on the interface and exposed users to complexity,
fragility & bugs, use a natural 3-parameter interface with u128
types.
- Restructure the generated atomic headers, and add kerneldoc comments
for all of the generic atomic{,64,_long}_t operations.
The generated definitions are much cleaner now, and come with
documentation.
- Implement lock_set_cmp_fn() on lockdep, for defining an ordering when
taking multiple locks of the same type.
This gets rid of one use of lockdep_set_novalidate_class() in the
bcache code.
- Fix raw_cpu_generic_try_cmpxchg() bug due to an unintended variable
shadowing generating garbage code on Clang on certain ARM builds.
* tag 'locking-core-2023-06-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
locking/atomic: scripts: fix ${atomic}_dec_if_positive() kerneldoc
percpu: Fix self-assignment of __old in raw_cpu_generic_try_cmpxchg()
locking/atomic: treewide: delete arch_atomic_*() kerneldoc
locking/atomic: docs: Add atomic operations to the driver basic API documentation
locking/atomic: scripts: generate kerneldoc comments
docs: scripts: kernel-doc: accept bitwise negation like ~@var
locking/atomic: scripts: simplify raw_atomic*() definitions
locking/atomic: scripts: simplify raw_atomic_long*() definitions
locking/atomic: scripts: split pfx/name/sfx/order
locking/atomic: scripts: restructure fallback ifdeffery
locking/atomic: scripts: build raw_atomic_long*() directly
locking/atomic: treewide: use raw_atomic*_<op>()
locking/atomic: scripts: add trivial raw_atomic*_<op>()
locking/atomic: scripts: factor out order template generation
locking/atomic: scripts: remove leftover "${mult}"
locking/atomic: scripts: remove bogus order parameter
locking/atomic: xtensa: add preprocessor symbols
locking/atomic: x86: add preprocessor symbols
locking/atomic: sparc: add preprocessor symbols
locking/atomic: sh: add preprocessor symbols
...
136 files changed, 9917 insertions, 4156 deletions
diff --git a/Documentation/core-api/this_cpu_ops.rst b/Documentation/core-api/this_cpu_ops.rst index 5cb8b883ae83..91acbcf30e9b 100644 --- a/Documentation/core-api/this_cpu_ops.rst +++ b/Documentation/core-api/this_cpu_ops.rst @@ -53,7 +53,6 @@ preemption and interrupts:: this_cpu_add_return(pcp, val) this_cpu_xchg(pcp, nval) this_cpu_cmpxchg(pcp, oval, nval) - this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) this_cpu_sub(pcp, val) this_cpu_inc(pcp) this_cpu_dec(pcp) @@ -242,7 +241,6 @@ safe:: __this_cpu_add_return(pcp, val) __this_cpu_xchg(pcp, nval) __this_cpu_cmpxchg(pcp, oval, nval) - __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) __this_cpu_sub(pcp, val) __this_cpu_inc(pcp) __this_cpu_dec(pcp) diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst index 4b4d8e28d3be..7671b531ba1a 100644 --- a/Documentation/driver-api/basics.rst +++ b/Documentation/driver-api/basics.rst @@ -84,7 +84,13 @@ Reference counting Atomics ------- -.. kernel-doc:: arch/x86/include/asm/atomic.h +.. kernel-doc:: include/linux/atomic/atomic-instrumented.h + :internal: + +.. kernel-doc:: include/linux/atomic/atomic-arch-fallback.h + :internal: + +.. kernel-doc:: include/linux/atomic/atomic-long.h :internal: Kernel objects manipulation diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index f2861a43a61e..cbd9244571af 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -200,25 +200,6 @@ ATOMIC_OPS(xor, xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define arch_atomic64_cmpxchg(v, old, new) \ - (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic64_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) - -#define arch_atomic_cmpxchg(v, old, new) \ - (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) - -/** - * arch_atomic_fetch_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c, new, old; @@ -242,15 +223,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) } #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -/** - * arch_atomic64_fetch_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v. - */ static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c, new, old; @@ -274,13 +246,6 @@ static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u } #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless -/* - * arch_atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 old, tmp; diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h index 2c830347bfb4..89d12a60f84c 100644 --- a/arch/arc/include/asm/atomic-spinlock.h +++ b/arch/arc/include/asm/atomic-spinlock.h @@ -81,6 +81,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ @@ -92,7 +97,11 @@ ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) #define arch_atomic_andnot arch_atomic_andnot + +#define arch_atomic_fetch_and arch_atomic_fetch_and #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 52ee51e1ff7c..592d7fffc223 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -22,30 +22,6 @@ #include <asm/atomic-spinlock.h> #endif -#define arch_atomic_cmpxchg(v, o, n) \ -({ \ - arch_cmpxchg(&((v)->counter), (o), (n)); \ -}) - -#ifdef arch_cmpxchg_relaxed -#define arch_atomic_cmpxchg_relaxed(v, o, n) \ -({ \ - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \ -}) -#endif - -#define arch_atomic_xchg(v, n) \ -({ \ - arch_xchg(&((v)->counter), (n)); \ -}) - -#ifdef arch_xchg_relaxed -#define arch_atomic_xchg_relaxed(v, n) \ -({ \ - arch_xchg_relaxed(&((v)->counter), (n)); \ -}) -#endif - /* * 64-bit atomics */ diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h index c5a8010fdc97..6b6db981967a 100644 --- a/arch/arc/include/asm/atomic64-arcv2.h +++ b/arch/arc/include/asm/atomic64-arcv2.h @@ -159,6 +159,7 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) return prev; } +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new) { @@ -179,14 +180,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new) return prev; } - -/** - * arch_atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic64_t - * - * The function returns the old value of *v minus 1, even if - * the atomic variable, v, was not decremented. - */ +#define arch_atomic64_xchg arch_atomic64_xchg static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { @@ -212,15 +206,6 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) } #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -/** - * arch_atomic64_fetch_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if it was not @u. - * Returns the old value of @v - */ static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 old, temp; diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 505a306e0271..aebe2c8f6a68 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@) #endif .endm +/* + * Raw SMP data memory barrier + */ + .macro __smp_dmb mode +#if __LINUX_ARM_ARCH__ >= 7 + .ifeqs "\mode","arm" + dmb ish + .else + W(dmb) ish + .endif +#elif __LINUX_ARM_ARCH__ == 6 + mcr p15, 0, r0, c7, c10, 5 @ dmb +#else + .error "Incompatible SMP platform" +#endif + .endm + #if defined(CONFIG_CPU_V7M) /* * setmode is used to assert to be in svc mode during boot. For v7-M diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index db8512d9a918..f0e3b01afa74 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -197,6 +197,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ return val; \ } +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -210,8 +220,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } - -#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#define arch_atomic_cmpxchg arch_atomic_cmpxchg #endif /* __LINUX_ARM_ARCH__ */ @@ -240,8 +249,6 @@ ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { s64 counter; diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 6f5d627c44a3..f46b3c570f92 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -14,14 +14,35 @@ * ops which are SMP safe even on a UP kernel. */ +/* + * Unordered + */ + #define sync_set_bit(nr, p) _set_bit(nr, p) #define sync_clear_bit(nr, p) _clear_bit(nr, p) #define sync_change_bit(nr, p) _change_bit(nr, p) -#define sync_test_and_set_bit(nr, p) _test_and_set_bit(nr, p) -#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) -#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) #define sync_test_bit(nr, addr) test_bit(nr, addr) -#define arch_sync_cmpxchg arch_cmpxchg +/* + * Fully ordered + */ + +int _sync_test_and_set_bit(int nr, volatile unsigned long * p); +#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p) + +int _sync_test_and_clear_bit(int nr, volatile unsigned long * p); +#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p) + +int _sync_test_and_change_bit(int nr, volatile unsigned long * p); +#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p) + +#define arch_sync_cmpxchg(ptr, old, new) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __smp_mb__before_atomic(); \ + __ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \ + __smp_mb__after_atomic(); \ + __ret; \ +}) #endif diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 95bd35991288..f069d1b2318e 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -28,7 +28,7 @@ UNWIND( .fnend ) ENDPROC(\name ) .endm - .macro testop, name, instr, store + .macro __testop, name, instr, store, barrier ENTRY( \name ) UNWIND( .fnstart ) ands ip, r1, #3 @@ -38,7 +38,7 @@ UNWIND( .fnstart ) mov r0, r0, lsr #5 add r1, r1, r0, lsl #2 @ Get word offset mov r3, r2, lsl r3 @ create mask - smp_dmb + \barrier #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) .arch_extension mp ALT_SMP(W(pldw) [r1]) @@ -50,13 +50,21 @@ UNWIND( .fnstart ) strex ip, r2, [r1] cmp ip, #0 bne 1b - smp_dmb + \barrier cmp r0, #0 movne r0, #1 2: bx lr UNWIND( .fnend ) ENDPROC(\name ) .endm + + .macro testop, name, instr, store + __testop \name, \instr, \store, smp_dmb + .endm + + .macro sync_testop, name, instr, store + __testop \name, \instr, \store, __smp_dmb + .endm #else .macro bitop, name, instr ENTRY( \name ) diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S index 4ebecc67e6e0..f13fe9bc2399 100644 --- a/arch/arm/lib/testchangebit.S +++ b/arch/arm/lib/testchangebit.S @@ -10,3 +10,7 @@ .text testop _test_and_change_bit, eor, str + +#if __LINUX_ARM_ARCH__ >= 6 +sync_testop _sync_test_and_change_bit, eor, str +#endif diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S index 009afa0f5b4a..4d2c5ca620eb 100644 --- a/arch/arm/lib/testclearbit.S +++ b/arch/arm/lib/testclearbit.S @@ -10,3 +10,7 @@ .text testop _test_and_clear_bit, bicne, strne + +#if __LINUX_ARM_ARCH__ >= 6 +sync_testop _sync_test_and_clear_bit, bicne, strne +#endif diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S index f3192e55acc8..649dbab65d8d 100644 --- a/arch/arm/lib/testsetbit.S +++ b/arch/arm/lib/testsetbit.S @@ -10,3 +10,7 @@ .text testop _test_and_set_bit, orreq, streq + +#if __LINUX_ARM_ARCH__ >= 6 +sync_testop _sync_test_and_set_bit, orreq, streq +#endif diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c9979273d389..400d279e0f8d 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -142,24 +142,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v) #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release #define arch_atomic_fetch_xor arch_atomic_fetch_xor -#define arch_atomic_xchg_relaxed(v, new) \ - arch_xchg_relaxed(&((v)->counter), (new)) -#define arch_atomic_xchg_acquire(v, new) \ - arch_xchg_acquire(&((v)->counter), (new)) -#define arch_atomic_xchg_release(v, new) \ - arch_xchg_release(&((v)->counter), (new)) -#define arch_atomic_xchg(v, new) \ - arch_xchg(&((v)->counter), (new)) - -#define arch_atomic_cmpxchg_relaxed(v, old, new) \ - arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) -#define arch_atomic_cmpxchg_acquire(v, old, new) \ - arch_cmpxchg_acquire(&((v)->counter), (old), (new)) -#define arch_atomic_cmpxchg_release(v, old, new) \ - arch_cmpxchg_release(&((v)->counter), (old), (new)) -#define arch_atomic_cmpxchg(v, old, new) \ - arch_cmpxchg(&((v)->counter), (old), (new)) - #define arch_atomic_andnot arch_atomic_andnot /* @@ -209,16 +191,6 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v) #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed -#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire -#define arch_atomic64_xchg_release arch_atomic_xchg_release -#define arch_atomic64_xchg arch_atomic_xchg - -#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire -#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release -#define arch_atomic64_cmpxchg arch_atomic_cmpxchg - #define arch_atomic64_andnot arch_atomic64_andnot #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index cbb3d961123b..89d2ba272359 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -294,38 +294,46 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE -#define __CMPXCHG_DBL(name, mb, rel, cl) \ -static __always_inline long \ -__ll_sc__cmpxchg_double##name(unsigned long old1, \ - unsigned long old2, \ - unsigned long new1, \ - unsigned long new2, \ - volatile void *ptr) \ +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __CMPXCHG128(name, mb, rel, cl...) \ +static __always_inline u128 \ +__ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \ { \ - unsigned long tmp, ret; \ + union __u128_halves r, o = { .full = (old) }, \ + n = { .full = (new) }; \ + unsigned int tmp; \ \ - asm volatile("// __cmpxchg_double" #name "\n" \ - " prfm pstl1strm, %2\n" \ - "1: ldxp %0, %1, %2\n" \ - " eor %0, %0, %3\n" \ - " eor %1, %1, %4\n" \ - " orr %1, %0, %1\n" \ - " cbnz %1, 2f\n" \ - " st" #rel "xp %w0, %5, %6, %2\n" \ - " cbnz %w0, 1b\n" \ + asm volatile("// __cmpxchg128" #name "\n" \ + " prfm pstl1strm, %[v]\n" \ + "1: ldxp %[rl], %[rh], %[v]\n" \ + " cmp %[rl], %[ol]\n" \ + " ccmp %[rh], %[oh], 0, eq\n" \ + " b.ne 2f\n" \ + " st" #rel "xp %w[tmp], %[nl], %[nh], %[v]\n" \ + " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ "2:" \ - : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \ - : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ - : cl); \ + : [v] "+Q" (*(u128 *)ptr), \ + [rl] "=&r" (r.low), [rh] "=&r" (r.high), \ + [tmp] "=&r" (tmp) \ + : [ol] "r" (o.low), [oh] "r" (o.high), \ + [nl] "r" (n.low), [nh] "r" (n.high) \ + : "cc", ##cl); \ \ - return ret; \ + return r.full; \ } -__CMPXCHG_DBL( , , , ) -__CMPXCHG_DBL(_mb, dmb ish, l, "memory") +__CMPXCHG128( , , ) +__CMPXCHG128(_mb, dmb ish, l, "memory") + +#undef __CMPXCHG128 -#undef __CMPXCHG_DBL #undef K #endif /* __ASM_ATOMIC_LL_SC_H */ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 319958b95cfd..87f568a94e55 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -281,40 +281,35 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __CMPXCHG_CASE -#define __CMPXCHG_DBL(name, mb, cl...) \ -static __always_inline long \ -__lse__cmpxchg_double##name(unsigned long old1, \ - unsigned long old2, \ - unsigned long new1, \ - unsigned long new2, \ - volatile void *ptr) \ +#define __CMPXCHG128(name, mb, cl...) \ +static __always_inline u128 \ +__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new) \ { \ - unsigned long oldval1 = old1; \ - unsigned long oldval2 = old2; \ - register unsigned long x0 asm ("x0") = old1; \ - register unsigned long x1 asm ("x1") = old2; \ - register unsigned long x2 asm ("x2") = new1; \ - register unsigned long x3 asm ("x3") = new2; \ + union __u128_halves r, o = { .full = (old) }, \ + n = { .full = (new) }; \ + register unsigned long x0 asm ("x0") = o.low; \ + register unsigned long x1 asm ("x1") = o.high; \ + register unsigned long x2 asm ("x2") = n.low; \ + register unsigned long x3 asm ("x3") = n.high; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile( \ __LSE_PREAMBLE \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ - " eor %[old1], %[old1], %[oldval1]\n" \ - " eor %[old2], %[old2], %[oldval2]\n" \ - " orr %[old1], %[old1], %[old2]" \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ - [v] "+Q" (*(__uint128_t *)ptr) \ + [v] "+Q" (*(u128 *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ - [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ + [oldval1] "r" (o.low), [oldval2] "r" (o.high) \ : cl); \ \ - return x0; \ + r.low = x0; r.high = x1; \ + \ + return r.full; \ } -__CMPXCHG_DBL( , ) -__CMPXCHG_DBL(_mb, al, "memory") +__CMPXCHG128( , ) +__CMPXCHG128(_mb, al, "memory") -#undef __CMPXCHG_DBL +#undef __CMPXCHG128 #endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index c6bc5d8ec3ca..d7a540736741 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -130,21 +130,18 @@ __CMPXCHG_CASE(mb_, 64) #undef __CMPXCHG_CASE -#define __CMPXCHG_DBL(name) \ -static inline long __cmpxchg_double##name(unsigned long old1, \ - unsigned long old2, \ - unsigned long new1, \ - unsigned long new2, \ - volatile void *ptr) \ +#define __CMPXCHG128(name) \ +static inline u128 __cmpxchg128##name(volatile u128 *ptr, \ + u128 old, u128 new) \ { \ - return __lse_ll_sc_body(_cmpxchg_double##name, \ - old1, old2, new1, new2, ptr); \ + return __lse_ll_sc_body(_cmpxchg128##name, \ + ptr, old, new); \ } -__CMPXCHG_DBL( ) -__CMPXCHG_DBL(_mb) +__CMPXCHG128( ) +__CMPXCHG128(_mb) -#undef __CMPXCHG_DBL +#undef __CMPXCHG128 #define __CMPXCHG_GEN(sfx) \ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ @@ -198,34 +195,17 @@ __CMPXCHG_GEN(_mb) #define arch_cmpxchg64 arch_cmpxchg #define arch_cmpxchg64_local arch_cmpxchg_local -/* cmpxchg_double */ -#define system_has_cmpxchg_double() 1 - -#define __cmpxchg_double_check(ptr1, ptr2) \ -({ \ - if (sizeof(*(ptr1)) != 8) \ - BUILD_BUG(); \ - VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ -}) +/* cmpxchg128 */ +#define system_has_cmpxchg128() 1 -#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ +#define arch_cmpxchg128(ptr, o, n) \ ({ \ - int __ret; \ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ + __cmpxchg128_mb((ptr), (o), (n)); \ }) -#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ +#define arch_cmpxchg128_local(ptr, o, n) \ ({ \ - int __ret; \ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ + __cmpxchg128((ptr), (o), (n)); \ }) #define __CMPWAIT_CASE(w, sfx, sz) \ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index b9ba19dbdb69..9abcc8ef3087 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -140,17 +140,11 @@ PERCPU_RET_OP(add, add, ldadd) * re-enabling preemption for preemptible kernels, but doing that in a way * which builds inside a module would mean messing directly with the preempt * count. If you do this, peterz and tglx will hunt you down. + * + * Not to mention it'll break the actual preemption model for missing a + * preemption point when TIF_NEED_RESCHED gets set while preemption is + * disabled. */ -#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ -({ \ - int __ret; \ - preempt_disable_notrace(); \ - __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ - raw_cpu_ptr(&(ptr2)), \ - o1, o2, n1, n2); \ - preempt_enable_notrace(); \ - __ret; \ -}) #define _pcp_protect(op, pcp, ...) \ ({ \ @@ -240,6 +234,22 @@ PERCPU_RET_OP(add, add, ldadd) #define this_cpu_cmpxchg_8(pcp, o, n) \ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) +#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n) + +#define this_cpu_cmpxchg128(pcp, o, n) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + u128 old__, new__, ret__; \ + pcp_op_T__ *ptr__; \ + old__ = o; \ + new__ = n; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg128_local((void *)ptr__, old__, new__); \ + preempt_enable_notrace(); \ + ret__; \ +}) + #ifdef __KVM_NVHE_HYPERVISOR__ extern unsigned long __hyp_per_cpu_offset(unsigned int cpu); #define __per_cpu_offset diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h index 60406ef9c2bb..4dab44f6143a 100644 --- a/arch/csky/include/asm/atomic.h +++ b/arch/csky/include/asm/atomic.h @@ -195,41 +195,6 @@ arch_atomic_dec_if_positive(atomic_t *v) } #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive -#define ATOMIC_OP() \ -static __always_inline \ -int arch_atomic_xchg_relaxed(atomic_t *v, int n) \ -{ \ - return __xchg_relaxed(n, &(v->counter), 4); \ -} \ -static __always_inline \ -int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \ -{ \ - return __cmpxchg_relaxed(&(v->counter), o, n, 4); \ -} \ -static __always_inline \ -int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \ -{ \ - return __cmpxchg_acquire(&(v->counter), o, n, 4); \ -} \ -static __always_inline \ -int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \ -{ \ - return __cmpxchg(&(v->counter), o, n, 4); \ -} - -#define ATOMIC_OPS() \ - ATOMIC_OP() - -ATOMIC_OPS() - -#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed -#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire -#define arch_atomic_cmpxchg arch_atomic_cmpxchg - -#undef ATOMIC_OPS -#undef ATOMIC_OP - #else #include <asm-generic/atomic.h> #endif diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 6e94f8d04146..2447d083c432 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -28,58 +28,8 @@ static inline void arch_atomic_set(atomic_t *v, int new) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) -/** - * arch_atomic_read - reads a word, atomically - * @v: pointer to atomic value - * - * Assumes all word reads on our architecture are atomic. - */ #define arch_atomic_read(v) READ_ONCE((v)->counter) -/** - * arch_atomic_xchg - atomic - * @v: pointer to memory to change - * @new: new value (technically passed in a register -- see xchg) - */ -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) - - -/** - * arch_atomic_cmpxchg - atomic compare-and-exchange values - * @v: pointer to value to change - * @old: desired old value to match - * @new: new value to put in - * - * Parameters are then pointer, value-in-register, value-in-register, - * and the output is the old value. - * - * Apparently this is complicated for archs that don't support - * the memw_locked like we do (or it's broken or whatever). - * - * Kind of the lynchpin of the rest of the generically defined routines. - * Remember V2 had that bug with dotnew predicate set by memw_locked. - * - * "old" is "expected" old val, __oldval is actual old value - */ -static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int __oldval; - - asm volatile( - "1: %0 = memw_locked(%1);\n" - " { P0 = cmp.eq(%0,%2);\n" - " if (!P0.new) jump:nt 2f; }\n" - " memw_locked(%1,P0) = %3;\n" - " if (!P0) jump 1b;\n" - "2:\n" - : "=&r" (__oldval) - : "r" (&v->counter), "r" (old), "r" (new) - : "memory", "p0" - ); - - return __oldval; -} - #define ATOMIC_OP(op) \ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ @@ -135,6 +85,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -142,21 +97,15 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * arch_atomic_fetch_add_unless - add unless the number is a given value - * @v: pointer to value - * @a: amount to add - * @u: unless value is equal to u - * - * Returns old value. - * - */ - static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int __oldval; diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 266c429b9137..6540a628d257 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -207,13 +207,6 @@ ATOMIC64_FETCH_OP(xor, ^) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - -#define arch_atomic64_cmpxchg(v, old, new) \ - (arch_cmpxchg(&((v)->counter), old, new)) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v)) #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v)) diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index 6b9aca9ab6e9..e27f0c72d324 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -29,21 +29,7 @@ #define ATOMIC_INIT(i) { (i) } -/* - * arch_atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ #define arch_atomic_read(v) READ_ONCE((v)->counter) - -/* - * arch_atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) #define ATOMIC_OP(op, I, asm_op) \ @@ -139,14 +125,6 @@ static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) } #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -/* - * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. - */ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) { int result; @@ -181,31 +159,13 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) return result; } -#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) - -/* - * arch_atomic_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic_t - */ #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } -/* - * arch_atomic64_read - read atomic variable - * @v: pointer of type atomic64_t - * - */ #define arch_atomic64_read(v) READ_ONCE((v)->counter) - -/* - * arch_atomic64_set - set atomic variable - * @v: pointer of type atomic64_t - * @i: required value - */ #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) #define ATOMIC64_OP(op, I, asm_op) \ @@ -300,14 +260,6 @@ static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) } #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless -/* - * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. - */ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) { long result; @@ -342,14 +294,6 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) return result; } -#define arch_atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new))) - -/* - * arch_atomic64_dec_if_positive - decrement by 1 if old value positive - * @v: pointer of type atomic64_t - */ #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) #endif /* CONFIG_64BIT */ diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index cfba83d230fd..4bfbc25f6ecf 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -106,6 +106,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ @@ -115,6 +120,10 @@ ATOMIC_OPS(and, &=, and) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, eor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN @@ -158,12 +167,7 @@ static inline int arch_atomic_inc_and_test(atomic_t *v) } #define arch_atomic_inc_and_test arch_atomic_inc_and_test -#ifdef CONFIG_RMW_INSNS - -#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - -#else /* !CONFIG_RMW_INSNS */ +#ifndef CONFIG_RMW_INSNS static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { @@ -177,6 +181,7 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) local_irq_restore(flags); return prev; } +#define arch_atomic_cmpxchg arch_atomic_cmpxchg static inline int arch_atomic_xchg(atomic_t *v, int new) { @@ -189,6 +194,7 @@ static inline int arch_atomic_xchg(atomic_t *v, int new) local_irq_restore(flags); return prev; } +#define arch_atomic_xchg arch_atomic_xchg #endif /* !CONFIG_RMW_INSNS */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 712fb5a6a568..ba188e77768b 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -33,17 +33,6 @@ static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \ { \ WRITE_ONCE(v->counter, i); \ } \ - \ -static __always_inline type \ -arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \ -{ \ - return arch_cmpxchg(&v->counter, o, n); \ -} \ - \ -static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \ -{ \ - return arch_xchg(&v->counter, n); \ -} ATOMIC_OPS(atomic, int) diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index 326167e4783a..8ce67ec7c9a3 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -130,7 +130,4 @@ static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) #include <asm/cmpxchg.h> -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) - #endif /* __ASM_OPENRISC_ATOMIC_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index dd5a299ada69..d4f023887ff8 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -73,10 +73,6 @@ static __inline__ int arch_atomic_read(const atomic_t *v) return READ_ONCE((v)->counter); } -/* exported interface */ -#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #define ATOMIC_OP(op, c_op) \ static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ { \ @@ -122,6 +118,11 @@ static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ @@ -131,6 +132,10 @@ ATOMIC_OPS(and, &=) ATOMIC_OPS(or, |=) ATOMIC_OPS(xor, ^=) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN @@ -185,6 +190,11 @@ static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ @@ -194,6 +204,10 @@ ATOMIC64_OPS(and, &=) ATOMIC64_OPS(or, |=) ATOMIC64_OPS(xor, ^=) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN @@ -218,11 +232,6 @@ arch_atomic64_read(const atomic64_t *v) return READ_ONCE((v)->counter); } -/* exported interface */ -#define arch_atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #endif /* !CONFIG_64BIT */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 47228b177478..5bf6a4d49268 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -126,18 +126,6 @@ ATOMIC_OPS(xor, xor, "", K) #undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP -#define arch_atomic_cmpxchg(v, o, n) \ - (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_cmpxchg_relaxed(v, o, n) \ - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) -#define arch_atomic_cmpxchg_acquire(v, o, n) \ - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) - -#define arch_atomic_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) -#define arch_atomic_xchg_relaxed(v, new) \ - arch_xchg_relaxed(&((v)->counter), (new)) - /** * atomic_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic_t @@ -396,18 +384,6 @@ static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) } #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -#define arch_atomic64_cmpxchg(v, o, n) \ - (arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_cmpxchg_relaxed(v, o, n) \ - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) -#define arch_atomic64_cmpxchg_acquire(v, o, n) \ - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) - -#define arch_atomic64_xchg(v, new) \ - (arch_xchg(&((v)->counter), new)) -#define arch_atomic64_xchg_relaxed(v, new) \ - arch_xchg_relaxed(&((v)->counter), (new)) - /** * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 265801a3e94c..e8965f18686f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -417,9 +417,9 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags) { raw_local_irq_save(*flags); hard_irq_disable(); - while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { + while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { raw_local_irq_restore(*flags); - spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); + spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); raw_local_irq_save(*flags); hard_irq_disable(); } @@ -427,15 +427,15 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags) noinstr static void nmi_ipi_lock(void) { - while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) - spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); + while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) + spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); } noinstr static void nmi_ipi_unlock(void) { smp_mb(); - WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); - arch_atomic_set(&__nmi_ipi_lock, 0); + WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1); + raw_atomic_set(&__nmi_ipi_lock, 0); } noinstr static void nmi_ipi_unlock_end(unsigned long *flags) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index bba472928b53..f5dfef6c2153 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -238,78 +238,6 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif -/* - * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as - * {cmp,}xchg and the operations that return, so they need a full barrier. - */ -#define ATOMIC_OP(c_t, prefix, size) \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_relaxed(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_acquire(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ -{ \ - return __xchg_release(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ -{ \ - return __arch_xchg(&(v->counter), n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_relaxed(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_acquire(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ - c_t o, c_t n) \ -{ \ - return __cmpxchg_release(&(v->counter), o, n, size); \ -} \ -static __always_inline \ -c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ -{ \ - return __cmpxchg(&(v->counter), o, n, size); \ -} - -#ifdef CONFIG_GENERIC_ATOMIC64 -#define ATOMIC_OPS() \ - ATOMIC_OP(int, , 4) -#else -#define ATOMIC_OPS() \ - ATOMIC_OP(int, , 4) \ - ATOMIC_OP(s64, 64, 8) -#endif - -ATOMIC_OPS() - -#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed -#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire -#define arch_atomic_xchg_release arch_atomic_xchg_release -#define arch_atomic_xchg arch_atomic_xchg -#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release -#define arch_atomic_cmpxchg arch_atomic_cmpxchg - -#undef ATOMIC_OPS -#undef ATOMIC_OP - static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) { int prev, rc; diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 06e0e42f4eec..aae0315374de 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -190,38 +190,18 @@ static __always_inline unsigned long __cmpxchg(unsigned long address, #define arch_cmpxchg_local arch_cmpxchg #define arch_cmpxchg64_local arch_cmpxchg -#define system_has_cmpxchg_double() 1 +#define system_has_cmpxchg128() 1 -static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2, - unsigned long o1, unsigned long o2, - unsigned long n1, unsigned long n2) +static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) { - union register_pair old = { .even = o1, .odd = o2, }; - union register_pair new = { .even = n1, .odd = n2, }; - int cc; - asm volatile( " cdsg %[old],%[new],%[ptr]\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (cc), [old] "+&d" (old.pair) - : [new] "d" (new.pair), - [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2) + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) : "memory", "cc"); - return !cc; + return old; } -#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ -({ \ - typeof(p1) __p1 = (p1); \ - typeof(p2) __p2 = (p2); \ - \ - BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ - BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ - VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ - __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \ - (unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2)); \ -}) +#define arch_cmpxchg128 arch_cmpxchg128 #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 7e417d7de568..a0de5b9b02ea 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -140,7 +140,7 @@ union hws_trailer_header { unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ unsigned long long overflow; /* 64 - Overflow Count */ }; - __uint128_t val; + u128 val; }; struct hws_trailer_entry { diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 081837b391e3..264095dd84bc 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -148,6 +148,22 @@ #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg64(pcp, o, n) this_cpu_cmpxchg_8(pcp, o, n) + +#define this_cpu_cmpxchg128(pcp, oval, nval) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + u128 old__, new__, ret__; \ + pcp_op_T__ *ptr__; \ + old__ = oval; \ + new__ = nval; \ + preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg128((void *)ptr__, old__, new__); \ + preempt_enable_notrace(); \ + ret__; \ +}) + #define arch_this_cpu_xchg(pcp, nval) \ ({ \ typeof(pcp) *ptr__; \ @@ -164,24 +180,6 @@ #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) -#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ -({ \ - typeof(pcp1) *p1__; \ - typeof(pcp2) *p2__; \ - int ret__; \ - \ - preempt_disable_notrace(); \ - p1__ = raw_cpu_ptr(&(pcp1)); \ - p2__ = raw_cpu_ptr(&(pcp2)); \ - ret__ = __cmpxchg_double((unsigned long)p1__, (unsigned long)p2__, \ - (unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2)); \ - preempt_enable_notrace(); \ - ret__; \ -}) - -#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double - #include <asm-generic/percpu.h> #endif /* __ARCH_S390_PERCPU__ */ diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 7ef72f5ff52e..8ecfbce4ac92 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1271,16 +1271,6 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, } } -static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new) -{ - asm volatile( - " cdsg %[old],%[new],%[ptr]\n" - : [old] "+d" (old), [ptr] "+QS" (*ptr) - : [new] "d" (new) - : "memory", "cc"); - return old; -} - /* hw_perf_event_update() - Process sampling buffer * @event: The perf event * @flush_all: Flag to also flush partially filled sample-data-blocks @@ -1352,7 +1342,7 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) new.f = 0; new.a = 1; new.overflow = 0; - prev.val = __cdsg(&te->header.val, old.val, new.val); + prev.val = cmpxchg128(&te->header.val, old.val, new.val); } while (prev.val != old.val); /* Advance to next sample-data-block */ @@ -1562,7 +1552,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, } new.a = 1; new.overflow = 0; - prev.val = __cdsg(&te->header.val, old.val, new.val); + prev.val = cmpxchg128(&te->header.val, old.val, new.val); } while (prev.val != old.val); return true; } @@ -1636,7 +1626,7 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, new.a = 1; else new.a = 0; - prev.val = __cdsg(&te->header.val, old.val, new.val); + prev.val = cmpxchg128(&te->header.val, old.val, new.val); } while (prev.val != old.val); *overflow += orig_overflow; } diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index 059791fd394f..cf1c10f15528 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h @@ -71,6 +71,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -78,6 +83,10 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 7665de9d00d0..b4090cc35493 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h @@ -55,6 +55,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=) ATOMIC_OPS(sub, -=) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ @@ -64,6 +69,10 @@ ATOMIC_OPS(and, &=) ATOMIC_OPS(or, |=) ATOMIC_OPS(xor, ^=) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index b63dcfbfa14e..9ef1fb1dd12e 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h @@ -73,6 +73,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -80,6 +85,10 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 528bfeda78f5..7a18cb2a1c1a 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -30,9 +30,6 @@ #include <asm/atomic-irq.h> #endif -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) -#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) - #endif /* CONFIG_CPU_J2 */ #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index d775daa83d12..60ce2fe57fcd 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -19,17 +19,31 @@ #include <asm-generic/atomic64.h> int arch_atomic_add_return(int, atomic_t *); +#define arch_atomic_add_return arch_atomic_add_return + int arch_atomic_fetch_add(int, atomic_t *); +#define arch_atomic_fetch_add arch_atomic_fetch_add + int arch_atomic_fetch_and(int, atomic_t *); +#define arch_atomic_fetch_and arch_atomic_fetch_and + int arch_atomic_fetch_or(int, atomic_t *); +#define arch_atomic_fetch_or arch_atomic_fetch_or + int arch_atomic_fetch_xor(int, atomic_t *); +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + int arch_atomic_cmpxchg(atomic_t *, int, int); +#define arch_atomic_cmpxchg arch_atomic_cmpxchg + int arch_atomic_xchg(atomic_t *, int); -int arch_atomic_fetch_add_unless(atomic_t *, int, int); -void arch_atomic_set(atomic_t *, int); +#define arch_atomic_xchg arch_atomic_xchg +int arch_atomic_fetch_add_unless(atomic_t *, int, int); #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +void arch_atomic_set(atomic_t *, int); + #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) #define arch_atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 077891686715..a5e9c37605a7 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -37,6 +37,16 @@ s64 arch_atomic64_fetch_##op(s64, atomic64_t *); ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -44,22 +54,19 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) - -static inline int arch_atomic_xchg(atomic_t *v, int new) -{ - return arch_xchg(&v->counter, new); -} - -#define arch_atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) - s64 arch_atomic64_dec_if_positive(atomic64_t *v); #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 5e754e895767..55a55ec04350 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -14,12 +14,6 @@ * resource counting etc.. */ -/** - * arch_atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ static __always_inline int arch_atomic_read(const atomic_t *v) { /* @@ -29,25 +23,11 @@ static __always_inline int arch_atomic_read(const atomic_t *v) return __READ_ONCE((v)->counter); } -/** - * arch_atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ static __always_inline void arch_atomic_set(atomic_t *v, int i) { __WRITE_ONCE(v->counter, i); } -/** - * arch_atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ static __always_inline void arch_atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" @@ -55,13 +35,6 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v) : "ir" (i) : "memory"); } -/** - * arch_atomic_sub - subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ static __always_inline void arch_atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" @@ -69,27 +42,12 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) : "ir" (i) : "memory"); } -/** - * arch_atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); } #define arch_atomic_sub_and_test arch_atomic_sub_and_test -/** - * arch_atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ static __always_inline void arch_atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" @@ -97,12 +55,6 @@ static __always_inline void arch_atomic_inc(atomic_t *v) } #define arch_atomic_inc arch_atomic_inc -/** - * arch_atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ static __always_inline void arch_atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" @@ -110,69 +62,30 @@ static __always_inline void arch_atomic_dec(atomic_t *v) } #define arch_atomic_dec arch_atomic_dec -/** - * arch_atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); } #define arch_atomic_dec_and_test arch_atomic_dec_and_test -/** - * arch_atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); } #define arch_atomic_inc_and_test arch_atomic_inc_and_test -/** - * arch_atomic_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) { return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); } #define arch_atomic_add_negative arch_atomic_add_negative -/** - * arch_atomic_add_return - add integer and return - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns @i + @v - */ static __always_inline int arch_atomic_add_return(int i, atomic_t *v) { return i + xadd(&v->counter, i); } #define arch_atomic_add_return arch_atomic_add_return -/** - * arch_atomic_sub_return - subtract integer and return - * @v: pointer of type atomic_t - * @i: integer value to subtract - * - * Atomically subtracts @i from @v and returns @v - @i - */ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v) { return arch_atomic_add_return(-i, v); diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 808b4eece251..3486d91b8595 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -61,30 +61,12 @@ ATOMIC64_DECL(add_unless); #undef __ATOMIC64_DECL #undef ATOMIC64_EXPORT -/** - * arch_atomic64_cmpxchg - cmpxchg atomic64 variable - * @v: pointer to type atomic64_t - * @o: expected value - * @n: new value - * - * Atomically sets @v to @n if it was equal to @o and returns - * the old value. - */ - static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { return arch_cmpxchg64(&v->counter, o, n); } #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg -/** - * arch_atomic64_xchg - xchg atomic64 variable - * @v: pointer to type atomic64_t - * @n: value to assign - * - * Atomically xchgs the value of @v to @n and returns - * the old value. - */ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) { s64 o; @@ -97,13 +79,6 @@ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) } #define arch_atomic64_xchg arch_atomic64_xchg -/** - * arch_atomic64_set - set atomic64 variable - * @v: pointer to type atomic64_t - * @i: value to assign - * - * Atomically sets the value of @v to @n. - */ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) { unsigned high = (unsigned)(i >> 32); @@ -113,12 +88,6 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) : "eax", "edx", "memory"); } -/** - * arch_atomic64_read - read atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically reads the value of @v and returns it. - */ static __always_inline s64 arch_atomic64_read(const atomic64_t *v) { s64 r; @@ -126,13 +95,6 @@ static __always_inline s64 arch_atomic64_read(const atomic64_t *v) return r; } -/** - * arch_atomic64_add_return - add and return - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns @i + *@v - */ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { alternative_atomic64(add_return, @@ -142,9 +104,6 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) } #define arch_atomic64_add_return arch_atomic64_add_return -/* - * Other variants with different arithmetic operators: - */ static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) { alternative_atomic64(sub_return, @@ -172,13 +131,6 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v) } #define arch_atomic64_dec_return arch_atomic64_dec_return -/** - * arch_atomic64_add - add integer to atomic64 variable - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v. - */ static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v) { __alternative_atomic64(add, add_return, @@ -187,13 +139,6 @@ static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v) return i; } -/** - * arch_atomic64_sub - subtract the atomic64 variable - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v. - */ static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v) { __alternative_atomic64(sub, sub_return, @@ -202,12 +147,6 @@ static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v) return i; } -/** - * arch_atomic64_inc - increment atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1. - */ static __always_inline void arch_atomic64_inc(atomic64_t *v) { __alternative_atomic64(inc, inc_return, /* no output */, @@ -215,12 +154,6 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) } #define arch_atomic64_inc arch_atomic64_inc -/** - * arch_atomic64_dec - decrement atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1. - */ static __always_inline void arch_atomic64_dec(atomic64_t *v) { __alternative_atomic64(dec, dec_return, /* no output */, @@ -228,15 +161,6 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) } #define arch_atomic64_dec arch_atomic64_dec -/** - * arch_atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if the add was done, zero otherwise. - */ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned low = (unsigned)u; diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index c496595bf601..3165c0feedf7 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -10,37 +10,16 @@ #define ATOMIC64_INIT(i) { (i) } -/** - * arch_atomic64_read - read atomic64 variable - * @v: pointer of type atomic64_t - * - * Atomically reads the value of @v. - * Doesn't imply a read memory barrier. - */ static __always_inline s64 arch_atomic64_read(const atomic64_t *v) { return __READ_ONCE((v)->counter); } -/** - * arch_atomic64_set - set atomic64 variable - * @v: pointer to type atomic64_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) { __WRITE_ONCE(v->counter, i); } -/** - * arch_atomic64_add - add integer to atomic64 variable - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v. - */ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" @@ -48,13 +27,6 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v) : "er" (i), "m" (v->counter) : "memory"); } -/** - * arch_atomic64_sub - subtract the atomic64 variable - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v. - */ static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" @@ -62,27 +34,12 @@ static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v) : "er" (i), "m" (v->counter) : "memory"); } -/** - * arch_atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer to type atomic64_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) { return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); } #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test -/** - * arch_atomic64_inc - increment atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1. - */ static __always_inline void arch_atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" @@ -91,12 +48,6 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) } #define arch_atomic64_inc arch_atomic64_inc -/** - * arch_atomic64_dec - decrement atomic64 variable - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1. - */ static __always_inline void arch_atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" @@ -105,56 +56,24 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) } #define arch_atomic64_dec arch_atomic64_dec -/** - * arch_atomic64_dec_and_test - decrement and test - * @v: pointer to type atomic64_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v) { return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); } #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test -/** - * arch_atomic64_inc_and_test - increment and test - * @v: pointer to type atomic64_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v) { return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); } #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test -/** - * arch_atomic64_add_negative - add and test if negative - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v) { return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); } #define arch_atomic64_add_negative arch_atomic64_add_negative -/** - * arch_atomic64_add_return - add and return - * @i: integer value to add - * @v: pointer to type atomic64_t - * - * Atomically adds @i to @v and returns @i + @v - */ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { return i + xadd(&v->counter, i); diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 540573f515b7..d53636506134 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -239,29 +239,4 @@ extern void __add_wrong_size(void) #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) -#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ -({ \ - bool __ret; \ - __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \ - __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \ - BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ - BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ - VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \ - VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \ - asm volatile(pfx "cmpxchg%c5b %1" \ - CC_SET(e) \ - : CC_OUT(e) (__ret), \ - "+m" (*(p1)), "+m" (*(p2)), \ - "+a" (__old1), "+d" (__old2) \ - : "i" (2 * sizeof(long)), \ - "b" (__new1), "c" (__new2)); \ - __ret; \ -}) - -#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) - -#define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double(, p1, p2, o1, o2, n1, n2) - #endif /* ASM_X86_CMPXCHG_H */ diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 6ba80ce9438d..b5731c51f0f4 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -103,6 +103,6 @@ static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new) #endif -#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8) +#define system_has_cmpxchg64() boot_cpu_has(X86_FEATURE_CX8) #endif /* _ASM_X86_CMPXCHG_32_H */ diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 0d3beb27b7fe..44b08b53ab32 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -20,6 +20,71 @@ arch_try_cmpxchg((ptr), (po), (n)); \ }) -#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16) +union __u128_halves { + u128 full; + struct { + u64 low, high; + }; +}; + +#define __arch_cmpxchg128(_ptr, _old, _new, _lock) \ +({ \ + union __u128_halves o = { .full = (_old), }, \ + n = { .full = (_new), }; \ + \ + asm volatile(_lock "cmpxchg16b %[ptr]" \ + : [ptr] "+m" (*(_ptr)), \ + "+a" (o.low), "+d" (o.high) \ + : "b" (n.low), "c" (n.high) \ + : "memory"); \ + \ + o.full; \ +}) + +static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) +{ + return __arch_cmpxchg128(ptr, old, new, LOCK_PREFIX); +} +#define arch_cmpxchg128 arch_cmpxchg128 + +static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old, u128 new) +{ + return __arch_cmpxchg128(ptr, old, new,); +} +#define arch_cmpxchg128_local arch_cmpxchg128_local + +#define __arch_try_cmpxchg128(_ptr, _oldp, _new, _lock) \ +({ \ + union __u128_halves o = { .full = *(_oldp), }, \ + n = { .full = (_new), }; \ + bool ret; \ + \ + asm volatile(_lock "cmpxchg16b %[ptr]" \ + CC_SET(e) \ + : CC_OUT(e) (ret), \ + [ptr] "+m" (*ptr), \ + "+a" (o.low), "+d" (o.high) \ + : "b" (n.low), "c" (n.high) \ + : "memory"); \ + \ + if (unlikely(!ret)) \ + *(_oldp) = o.full; \ + \ + likely(ret); \ +}) + +static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new) +{ + return __arch_try_cmpxchg128(ptr, oldp, new, LOCK_PREFIX); +} +#define arch_try_cmpxchg128 arch_try_cmpxchg128 + +static __always_inline bool arch_try_cmpxchg128_local(volatile u128 *ptr, u128 *oldp, u128 new) +{ + return __arch_try_cmpxchg128(ptr, oldp, new,); +} +#define arch_try_cmpxchg128_local arch_try_cmpxchg128_local + +#define system_has_cmpxchg128() boot_cpu_has(X86_FEATURE_CX16) #endif /* _ASM_X86_CMPXCHG_64_H */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 13c0d63ed55e..34734d730463 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -210,6 +210,67 @@ do { \ (typeof(_var))(unsigned long) pco_old__; \ }) +#if defined(CONFIG_X86_32) && !defined(CONFIG_UML) +#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ +({ \ + union { \ + u64 var; \ + struct { \ + u32 low, high; \ + }; \ + } old__, new__; \ + \ + old__.var = _oval; \ + new__.var = _nval; \ + \ + asm qual (ALTERNATIVE("leal %P[var], %%esi; call this_cpu_cmpxchg8b_emu", \ + "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ + : [var] "+m" (_var), \ + "+a" (old__.low), \ + "+d" (old__.high) \ + : "b" (new__.low), \ + "c" (new__.high) \ + : "memory", "esi"); \ + \ + old__.var; \ +}) + +#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval) +#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) +#endif + +#ifdef CONFIG_X86_64 +#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); +#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval); + +#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \ +({ \ + union { \ + u128 var; \ + struct { \ + u64 low, high; \ + }; \ + } old__, new__; \ + \ + old__.var = _oval; \ + new__.var = _nval; \ + \ + asm qual (ALTERNATIVE("leaq %P[var], %%rsi; call this_cpu_cmpxchg16b_emu", \ + "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ + : [var] "+m" (_var), \ + "+a" (old__.low), \ + "+d" (old__.high) \ + : "b" (new__.low), \ + "c" (new__.high) \ + : "memory", "rsi"); \ + \ + old__.var; \ +}) + +#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval) +#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) +#endif + /* * this_cpu_read() makes gcc load the percpu variable every time it is * accessed while this_cpu_read_stable() allows the value to be cached. @@ -290,23 +351,6 @@ do { \ #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) -#ifdef CONFIG_X86_CMPXCHG64 -#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ -({ \ - bool __ret; \ - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ - asm volatile("cmpxchg8b "__percpu_arg(1) \ - CC_SET(z) \ - : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \ - : "b" (__n1), "c" (__n2)); \ - __ret; \ -}) - -#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double -#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double -#endif /* CONFIG_X86_CMPXCHG64 */ - /* * Per cpu atomic 64 bit operations are only available under 64 bit. * 32 bit must fall back to generic operations. @@ -329,30 +373,6 @@ do { \ #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) - -/* - * Pretty complex macro to generate cmpxchg16 instruction. The instruction - * is not supported on early AMD64 processors so we must be able to emulate - * it in software. The address used in the cmpxchg16 instruction must be - * aligned to a 16 byte boundary. - */ -#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ -({ \ - bool __ret; \ - typeof(pcp1) __o1 = (o1), __n1 = (n1); \ - typeof(pcp2) __o2 = (o2), __n2 = (n2); \ - alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ - "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ - X86_FEATURE_CX16, \ - ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ - "+m" (pcp2), "+d" (__o2)), \ - "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ - __ret; \ -}) - -#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double -#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double - #endif static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr, diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a7e1ec50ad29..72646d75b6ff 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1967,7 +1967,7 @@ struct bp_patching_desc *try_get_desc(void) { struct bp_patching_desc *desc = &bp_desc; - if (!arch_atomic_inc_not_zero(&desc->refs)) + if (!raw_atomic_inc_not_zero(&desc->refs)) return NULL; return desc; @@ -1978,7 +1978,7 @@ static __always_inline void put_desc(void) struct bp_patching_desc *desc = &bp_desc; smp_mb__before_atomic(); - arch_atomic_dec(&desc->refs); + raw_atomic_dec(&desc->refs); } static __always_inline void *text_poke_addr(struct text_poke_loc *tp) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 22dfcb2adcd7..89e2aab5d34d 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1022,12 +1022,12 @@ static noinstr int mce_start(int *no_way_out) if (!timeout) return ret; - arch_atomic_add(*no_way_out, &global_nwo); + raw_atomic_add(*no_way_out, &global_nwo); /* * Rely on the implied barrier below, such that global_nwo * is updated before mce_callin. */ - order = arch_atomic_inc_return(&mce_callin); + order = raw_atomic_inc_return(&mce_callin); arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); /* Enable instrumentation around calls to external facilities */ @@ -1036,10 +1036,10 @@ static noinstr int mce_start(int *no_way_out) /* * Wait for everyone. */ - while (arch_atomic_read(&mce_callin) != num_online_cpus()) { + while (raw_atomic_read(&mce_callin) != num_online_cpus()) { if (mce_timed_out(&timeout, "Timeout: Not all CPUs entered broadcast exception handler")) { - arch_atomic_set(&global_nwo, 0); + raw_atomic_set(&global_nwo, 0); goto out; } ndelay(SPINUNIT); @@ -1054,7 +1054,7 @@ static noinstr int mce_start(int *no_way_out) /* * Monarch: Starts executing now, the others wait. */ - arch_atomic_set(&mce_executing, 1); + raw_atomic_set(&mce_executing, 1); } else { /* * Subject: Now start the scanning loop one by one in @@ -1062,10 +1062,10 @@ static noinstr int mce_start(int *no_way_out) * This way when there are any shared banks it will be * only seen by one CPU before cleared, avoiding duplicates. */ - while (arch_atomic_read(&mce_executing) < order) { + while (raw_atomic_read(&mce_executing) < order) { if (mce_timed_out(&timeout, "Timeout: Subject CPUs unable to finish machine check processing")) { - arch_atomic_set(&global_nwo, 0); + raw_atomic_set(&global_nwo, 0); goto out; } ndelay(SPINUNIT); @@ -1075,7 +1075,7 @@ static noinstr int mce_start(int *no_way_out) /* * Cache the global no_way_out state. */ - *no_way_out = arch_atomic_read(&global_nwo); + *no_way_out = raw_atomic_read(&global_nwo); ret = order; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 776f4b1e395b..a0c551846b35 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -496,7 +496,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi) */ sev_es_nmi_complete(); if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) - arch_atomic_long_inc(&nsp->idt_calls); + raw_atomic_long_inc(&nsp->idt_calls); if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 56acf53a782a..b3f81379c2fc 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -101,11 +101,11 @@ u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd) * updating at the same time, and one of them could be slightly behind, * making the assumption that last_value always go forward fail to hold. */ - last = arch_atomic64_read(&last_value); + last = raw_atomic64_read(&last_value); do { if (ret <= last) return last; - } while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret)); + } while (!raw_atomic64_try_cmpxchg(&last_value, &last, ret)); return ret; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bc68a39efd70..7f70207e8689 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13161,7 +13161,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) { - return arch_atomic_read(&kvm->arch.assigned_device_count); + return raw_atomic_read(&kvm->arch.assigned_device_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 01932af64193..ea3a28e7b613 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -61,8 +61,9 @@ ifeq ($(CONFIG_X86_32),y) lib-y += strstr_32.o lib-y += string_32.o lib-y += memmove_32.o + lib-y += cmpxchg8b_emu.o ifneq ($(CONFIG_X86_CMPXCHG64),y) - lib-y += cmpxchg8b_emu.o atomic64_386_32.o + lib-y += atomic64_386_32.o endif else obj-y += iomap_copy_64.o diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index 33c70c0160ea..6962df315793 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -1,47 +1,54 @@ /* SPDX-License-Identifier: GPL-2.0-only */ #include <linux/linkage.h> #include <asm/percpu.h> +#include <asm/processor-flags.h> .text /* + * Emulate 'cmpxchg16b %gs:(%rsi)' + * * Inputs: * %rsi : memory location to compare * %rax : low 64 bits of old value * %rdx : high 64 bits of old value * %rbx : low 64 bits of new value * %rcx : high 64 bits of new value - * %al : Operation successful + * + * Notably this is not LOCK prefixed and is not safe against NMIs */ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) -# -# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not -# via the ZF. Caller will access %al to get result. -# -# Note that this is only useful for a cpuops operation. Meaning that we -# do *not* have a fully atomic operation but just an operation that is -# *atomic* on a single cpu (as provided by the this_cpu_xx class of -# macros). -# pushfq cli - cmpq PER_CPU_VAR((%rsi)), %rax - jne .Lnot_same - cmpq PER_CPU_VAR(8(%rsi)), %rdx - jne .Lnot_same + /* if (*ptr == old) */ + cmpq PER_CPU_VAR(0(%rsi)), %rax + jne .Lnot_same + cmpq PER_CPU_VAR(8(%rsi)), %rdx + jne .Lnot_same - movq %rbx, PER_CPU_VAR((%rsi)) - movq %rcx, PER_CPU_VAR(8(%rsi)) + /* *ptr = new */ + movq %rbx, PER_CPU_VAR(0(%rsi)) + movq %rcx, PER_CPU_VAR(8(%rsi)) + + /* set ZF in EFLAGS to indicate success */ + orl $X86_EFLAGS_ZF, (%rsp) popfq - mov $1, %al RET .Lnot_same: + /* *ptr != old */ + + /* old = *ptr */ + movq PER_CPU_VAR(0(%rsi)), %rax + movq PER_CPU_VAR(8(%rsi)), %rdx + + /* clear ZF in EFLAGS to indicate failure */ + andl $(~X86_EFLAGS_ZF), (%rsp) + popfq - xor %al,%al RET SYM_FUNC_END(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 6a912d58fecc..49805257b125 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -2,10 +2,16 @@ #include <linux/linkage.h> #include <asm/export.h> +#include <asm/percpu.h> +#include <asm/processor-flags.h> .text +#ifndef CONFIG_X86_CMPXCHG64 + /* + * Emulate 'cmpxchg8b (%esi)' on UP + * * Inputs: * %esi : memory location to compare * %eax : low 32 bits of old value @@ -15,32 +21,65 @@ */ SYM_FUNC_START(cmpxchg8b_emu) -# -# Emulate 'cmpxchg8b (%esi)' on UP except we don't -# set the whole ZF thing (caller will just compare -# eax:edx with the expected value) -# pushfl cli - cmpl (%esi), %eax - jne .Lnot_same - cmpl 4(%esi), %edx - jne .Lhalf_same + cmpl 0(%esi), %eax + jne .Lnot_same + cmpl 4(%esi), %edx + jne .Lnot_same + + movl %ebx, 0(%esi) + movl %ecx, 4(%esi) - movl %ebx, (%esi) - movl %ecx, 4(%esi) + orl $X86_EFLAGS_ZF, (%esp) popfl RET .Lnot_same: - movl (%esi), %eax -.Lhalf_same: - movl 4(%esi), %edx + movl 0(%esi), %eax + movl 4(%esi), %edx + + andl $(~X86_EFLAGS_ZF), (%esp) popfl RET SYM_FUNC_END(cmpxchg8b_emu) EXPORT_SYMBOL(cmpxchg8b_emu) + +#endif + +#ifndef CONFIG_UML + +SYM_FUNC_START(this_cpu_cmpxchg8b_emu) + + pushfl + cli + + cmpl PER_CPU_VAR(0(%esi)), %eax + jne .Lnot_same2 + cmpl PER_CPU_VAR(4(%esi)), %edx + jne .Lnot_same2 + + movl %ebx, PER_CPU_VAR(0(%esi)) + movl %ecx, PER_CPU_VAR(4(%esi)) + + orl $X86_EFLAGS_ZF, (%esp) + + popfl + RET + +.Lnot_same2: + movl PER_CPU_VAR(0(%esi)), %eax + movl PER_CPU_VAR(4(%esi)), %edx + + andl $(~X86_EFLAGS_ZF), (%esp) + + popfl + RET + +SYM_FUNC_END(this_cpu_cmpxchg8b_emu) + +#endif diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 52da614f953c..7308b7f777d7 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -245,6 +245,11 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + #undef ATOMIC_OPS #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) @@ -252,12 +257,13 @@ ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) -#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) - #endif /* _XTENSA_ATOMIC_H */ diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 2ddbda3a4374..ab8aa8f77cc4 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -986,8 +986,13 @@ union irte_ga_hi { }; struct irte_ga { - union irte_ga_lo lo; - union irte_ga_hi hi; + union { + struct { + union irte_ga_lo lo; + union irte_ga_hi hi; + }; + u128 irte; + }; }; struct irq_2_irte { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index e8a2e5984acb..9ea40960978b 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3023,10 +3023,10 @@ out: static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, struct irte_ga *irte, struct amd_ir_data *data) { - bool ret; struct irq_remap_table *table; - unsigned long flags; struct irte_ga *entry; + unsigned long flags; + u128 old; table = get_irq_table(iommu, devid); if (!table) @@ -3037,16 +3037,14 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, entry = (struct irte_ga *)table->table; entry = &entry[index]; - ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, - entry->lo.val, entry->hi.val, - irte->lo.val, irte->hi.val); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ - WARN_ON(!ret); + old = entry->irte; + WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); if (data) data->ref = entry; diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index a1b987335b31..08f56326e2f8 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -175,18 +175,14 @@ static int modify_irte(struct irq_2_iommu *irq_iommu, irte = &iommu->ir_table->base[index]; if ((irte->pst == 1) || (irte_modified->pst == 1)) { - bool ret; - - ret = cmpxchg_double(&irte->low, &irte->high, - irte->low, irte->high, - irte_modified->low, irte_modified->high); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ - WARN_ON(!ret); + u128 old = irte->irte; + WARN_ON(!try_cmpxchg128(&irte->irte, &old, irte_modified->irte)); } else { WRITE_ONCE(irte->low, irte_modified->low); WRITE_ONCE(irte->high, irte_modified->high); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 68b9d7ca864e..fd121a61f17c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) } } +#define cmp_int(l, r) ((l > r) - (l < r)) + +#ifdef CONFIG_PROVE_LOCKING +static int btree_lock_cmp_fn(const struct lockdep_map *_a, + const struct lockdep_map *_b) +{ + const struct btree *a = container_of(_a, struct btree, lock.dep_map); + const struct btree *b = container_of(_b, struct btree, lock.dep_map); + + return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key); +} + +static void btree_lock_print_fn(const struct lockdep_map *map) +{ + const struct btree *b = container_of(map, struct btree, lock.dep_map); + + printk(KERN_CONT " l=%u %llu:%llu", b->level, + KEY_INODE(&b->key), KEY_OFFSET(&b->key)); +} +#endif + static struct btree *mca_bucket_alloc(struct cache_set *c, struct bkey *k, gfp_t gfp) { @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, return NULL; init_rwsem(&b->lock); - lockdep_set_novalidate_class(&b->lock); + lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn); mutex_init(&b->write_lock); lockdep_set_novalidate_class(&b->write_lock); INIT_LIST_HEAD(&b->list); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index a2920bbfcad5..45d64b54115a 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) static inline void rw_lock(bool w, struct btree *b, int level) { - w ? down_write_nested(&b->lock, level + 1) - : down_read_nested(&b->lock, level + 1); + w ? down_write(&b->lock) + : down_read(&b->lock); if (w) b->seq++; } diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index e271d6708c87..22142c71d35a 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -130,7 +130,4 @@ ATOMIC_OP(xor, ^) #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new))) - #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 71ab4ba9c25d..e076e079f6b2 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -15,21 +15,21 @@ static __always_inline void arch_set_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); + raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); } static __always_inline void arch_clear_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); + raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); } static __always_inline void arch_change_bit(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); + raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); } static __always_inline int @@ -39,7 +39,7 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p); return !!(old & mask); } @@ -50,7 +50,7 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); return !!(old & mask); } @@ -61,7 +61,7 @@ arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p) unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p); return !!(old & mask); } diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 630f2f6b9595..40913516e654 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -25,7 +25,7 @@ arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) if (READ_ONCE(*p) & mask) return 1; - old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } @@ -41,7 +41,7 @@ static __always_inline void arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); + raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** @@ -63,7 +63,7 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); - arch_atomic_long_set_release((atomic_long_t *)p, old); + raw_atomic_long_set_release((atomic_long_t *)p, old); } /** @@ -83,7 +83,7 @@ static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 6432a7fade91..94cbd50cc870 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -89,27 +89,35 @@ do { \ __ret; \ }) -#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ +#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \ +({ \ + typeof(pcp) __val, __old = *(ovalp); \ + __val = _cmpxchg(pcp, __old, nval); \ + if (__val != __old) \ + *(ovalp) = __val; \ + __val == __old; \ +}) + +#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \ ({ \ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __ret; \ - __ret = *__p; \ - if (__ret == (oval)) \ + typeof(pcp) __val = *__p, ___old = *(ovalp); \ + bool __ret; \ + if (__val == ___old) { \ *__p = nval; \ + __ret = true; \ + } else { \ + *(ovalp) = __val; \ + __ret = false; \ + } \ __ret; \ }) -#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \ - typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \ - int __ret = 0; \ - if (*__p1 == (oval1) && *__p2 == (oval2)) { \ - *__p1 = nval1; \ - *__p2 = nval2; \ - __ret = 1; \ - } \ - (__ret); \ + typeof(pcp) __old = (oval); \ + raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \ + __old; \ }) #define __this_cpu_generic_read_nopreempt(pcp) \ @@ -170,23 +178,22 @@ do { \ __ret; \ }) -#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ +#define this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \ ({ \ - typeof(pcp) __ret; \ + bool __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ + __ret = raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) -#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - int __ret; \ + typeof(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ - __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ - oval1, oval2, nval1, nval2); \ + __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ raw_local_irq_restore(__flags); \ __ret; \ }) @@ -282,6 +289,62 @@ do { \ #define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) #endif +#ifndef raw_cpu_try_cmpxchg_1 +#ifdef raw_cpu_cmpxchg_1 +#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_1) +#else +#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef raw_cpu_try_cmpxchg_2 +#ifdef raw_cpu_cmpxchg_2 +#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_2) +#else +#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef raw_cpu_try_cmpxchg_4 +#ifdef raw_cpu_cmpxchg_4 +#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_4) +#else +#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef raw_cpu_try_cmpxchg_8 +#ifdef raw_cpu_cmpxchg_8 +#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_8) +#else +#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif + +#ifndef raw_cpu_try_cmpxchg64 +#ifdef raw_cpu_cmpxchg64 +#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg64) +#else +#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef raw_cpu_try_cmpxchg128 +#ifdef raw_cpu_cmpxchg128 +#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg128) +#else +#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \ + raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif + #ifndef raw_cpu_cmpxchg_1 #define raw_cpu_cmpxchg_1(pcp, oval, nval) \ raw_cpu_generic_cmpxchg(pcp, oval, nval) @@ -299,21 +362,13 @@ do { \ raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef raw_cpu_cmpxchg_double_1 -#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -#endif -#ifndef raw_cpu_cmpxchg_double_2 -#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -#endif -#ifndef raw_cpu_cmpxchg_double_4 -#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#ifndef raw_cpu_cmpxchg64 +#define raw_cpu_cmpxchg64(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef raw_cpu_cmpxchg_double_8 -#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#ifndef raw_cpu_cmpxchg128 +#define raw_cpu_cmpxchg128(pcp, oval, nval) \ + raw_cpu_generic_cmpxchg(pcp, oval, nval) #endif #ifndef this_cpu_read_1 @@ -407,6 +462,62 @@ do { \ #define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) #endif +#ifndef this_cpu_try_cmpxchg_1 +#ifdef this_cpu_cmpxchg_1 +#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_1) +#else +#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef this_cpu_try_cmpxchg_2 +#ifdef this_cpu_cmpxchg_2 +#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_2) +#else +#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef this_cpu_try_cmpxchg_4 +#ifdef this_cpu_cmpxchg_4 +#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_4) +#else +#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef this_cpu_try_cmpxchg_8 +#ifdef this_cpu_cmpxchg_8 +#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_8) +#else +#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif + +#ifndef this_cpu_try_cmpxchg64 +#ifdef this_cpu_cmpxchg64 +#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg64) +#else +#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif +#ifndef this_cpu_try_cmpxchg128 +#ifdef this_cpu_cmpxchg128 +#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \ + __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg128) +#else +#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \ + this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) +#endif +#endif + #ifndef this_cpu_cmpxchg_1 #define this_cpu_cmpxchg_1(pcp, oval, nval) \ this_cpu_generic_cmpxchg(pcp, oval, nval) @@ -424,21 +535,13 @@ do { \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef this_cpu_cmpxchg_double_1 -#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -#endif -#ifndef this_cpu_cmpxchg_double_2 -#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -#endif -#ifndef this_cpu_cmpxchg_double_4 -#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#ifndef this_cpu_cmpxchg64 +#define this_cpu_cmpxchg64(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) #endif -#ifndef this_cpu_cmpxchg_double_8 -#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) +#ifndef this_cpu_cmpxchg128 +#define this_cpu_cmpxchg128(pcp, oval, nval) \ + this_cpu_generic_cmpxchg(pcp, oval, nval) #endif #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h index 0b8e6bc55301..f3b37cbb3131 100644 --- a/include/crypto/b128ops.h +++ b/include/crypto/b128ops.h @@ -50,10 +50,6 @@ #include <linux/types.h> typedef struct { - u64 a, b; -} u128; - -typedef struct { __be64 a, b; } be128; @@ -61,20 +57,16 @@ typedef struct { __le64 b, a; } le128; -static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) +static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) { r->a = p->a ^ q->a; r->b = p->b ^ q->b; } -static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) -{ - u128_xor((u128 *)r, (u128 *)p, (u128 *)q); -} - static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) { - u128_xor((u128 *)r, (u128 *)p, (u128 *)q); + r->a = p->a ^ q->a; + r->b = p->b ^ q->b; } #endif /* _CRYPTO_B128OPS_H */ diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a6e4437c5f36..18f5744dfb5d 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -8,2664 +8,4653 @@ #include <linux/compiler.h> -#ifndef arch_xchg_relaxed -#define arch_xchg_acquire arch_xchg -#define arch_xchg_release arch_xchg -#define arch_xchg_relaxed arch_xchg -#else /* arch_xchg_relaxed */ - -#ifndef arch_xchg_acquire -#define arch_xchg_acquire(...) \ - __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#if defined(arch_xchg) +#define raw_xchg arch_xchg +#elif defined(arch_xchg_relaxed) +#define raw_xchg(...) \ + __atomic_op_fence(arch_xchg, __VA_ARGS__) +#else +extern void raw_xchg_not_implemented(void); +#define raw_xchg(...) raw_xchg_not_implemented() #endif -#ifndef arch_xchg_release -#define arch_xchg_release(...) \ - __atomic_op_release(arch_xchg, __VA_ARGS__) +#if defined(arch_xchg_acquire) +#define raw_xchg_acquire arch_xchg_acquire +#elif defined(arch_xchg_relaxed) +#define raw_xchg_acquire(...) \ + __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#elif defined(arch_xchg) +#define raw_xchg_acquire arch_xchg +#else +extern void raw_xchg_acquire_not_implemented(void); +#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented() #endif -#ifndef arch_xchg -#define arch_xchg(...) \ - __atomic_op_fence(arch_xchg, __VA_ARGS__) +#if defined(arch_xchg_release) +#define raw_xchg_release arch_xchg_release +#elif defined(arch_xchg_relaxed) +#define raw_xchg_release(...) \ + __atomic_op_release(arch_xchg, __VA_ARGS__) +#elif defined(arch_xchg) +#define raw_xchg_release arch_xchg +#else +extern void raw_xchg_release_not_implemented(void); +#define raw_xchg_release(...) raw_xchg_release_not_implemented() +#endif + +#if defined(arch_xchg_relaxed) +#define raw_xchg_relaxed arch_xchg_relaxed +#elif defined(arch_xchg) +#define raw_xchg_relaxed arch_xchg +#else +extern void raw_xchg_relaxed_not_implemented(void); +#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented() +#endif + +#if defined(arch_cmpxchg) +#define raw_cmpxchg arch_cmpxchg +#elif defined(arch_cmpxchg_relaxed) +#define raw_cmpxchg(...) \ + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) +#else +extern void raw_cmpxchg_not_implemented(void); +#define raw_cmpxchg(...) raw_cmpxchg_not_implemented() #endif -#endif /* arch_xchg_relaxed */ - -#ifndef arch_cmpxchg_relaxed -#define arch_cmpxchg_acquire arch_cmpxchg -#define arch_cmpxchg_release arch_cmpxchg -#define arch_cmpxchg_relaxed arch_cmpxchg -#else /* arch_cmpxchg_relaxed */ - -#ifndef arch_cmpxchg_acquire -#define arch_cmpxchg_acquire(...) \ +#if defined(arch_cmpxchg_acquire) +#define raw_cmpxchg_acquire arch_cmpxchg_acquire +#elif defined(arch_cmpxchg_relaxed) +#define raw_cmpxchg_acquire(...) \ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) +#elif defined(arch_cmpxchg) +#define raw_cmpxchg_acquire arch_cmpxchg +#else +extern void raw_cmpxchg_acquire_not_implemented(void); +#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented() #endif -#ifndef arch_cmpxchg_release -#define arch_cmpxchg_release(...) \ +#if defined(arch_cmpxchg_release) +#define raw_cmpxchg_release arch_cmpxchg_release +#elif defined(arch_cmpxchg_relaxed) +#define raw_cmpxchg_release(...) \ __atomic_op_release(arch_cmpxchg, __VA_ARGS__) +#elif defined(arch_cmpxchg) +#define raw_cmpxchg_release arch_cmpxchg +#else +extern void raw_cmpxchg_release_not_implemented(void); +#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented() +#endif + +#if defined(arch_cmpxchg_relaxed) +#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed +#elif defined(arch_cmpxchg) +#define raw_cmpxchg_relaxed arch_cmpxchg +#else +extern void raw_cmpxchg_relaxed_not_implemented(void); +#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented() +#endif + +#if defined(arch_cmpxchg64) +#define raw_cmpxchg64 arch_cmpxchg64 +#elif defined(arch_cmpxchg64_relaxed) +#define raw_cmpxchg64(...) \ + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#else +extern void raw_cmpxchg64_not_implemented(void); +#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented() #endif -#ifndef arch_cmpxchg -#define arch_cmpxchg(...) \ - __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) -#endif - -#endif /* arch_cmpxchg_relaxed */ - -#ifndef arch_cmpxchg64_relaxed -#define arch_cmpxchg64_acquire arch_cmpxchg64 -#define arch_cmpxchg64_release arch_cmpxchg64 -#define arch_cmpxchg64_relaxed arch_cmpxchg64 -#else /* arch_cmpxchg64_relaxed */ - -#ifndef arch_cmpxchg64_acquire -#define arch_cmpxchg64_acquire(...) \ +#if defined(arch_cmpxchg64_acquire) +#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire +#elif defined(arch_cmpxchg64_relaxed) +#define raw_cmpxchg64_acquire(...) \ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) +#elif defined(arch_cmpxchg64) +#define raw_cmpxchg64_acquire arch_cmpxchg64 +#else +extern void raw_cmpxchg64_acquire_not_implemented(void); +#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented() #endif -#ifndef arch_cmpxchg64_release -#define arch_cmpxchg64_release(...) \ +#if defined(arch_cmpxchg64_release) +#define raw_cmpxchg64_release arch_cmpxchg64_release +#elif defined(arch_cmpxchg64_relaxed) +#define raw_cmpxchg64_release(...) \ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) +#elif defined(arch_cmpxchg64) +#define raw_cmpxchg64_release arch_cmpxchg64 +#else +extern void raw_cmpxchg64_release_not_implemented(void); +#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented() +#endif + +#if defined(arch_cmpxchg64_relaxed) +#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed +#elif defined(arch_cmpxchg64) +#define raw_cmpxchg64_relaxed arch_cmpxchg64 +#else +extern void raw_cmpxchg64_relaxed_not_implemented(void); +#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented() +#endif + +#if defined(arch_cmpxchg128) +#define raw_cmpxchg128 arch_cmpxchg128 +#elif defined(arch_cmpxchg128_relaxed) +#define raw_cmpxchg128(...) \ + __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__) +#else +extern void raw_cmpxchg128_not_implemented(void); +#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented() +#endif + +#if defined(arch_cmpxchg128_acquire) +#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire +#elif defined(arch_cmpxchg128_relaxed) +#define raw_cmpxchg128_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__) +#elif defined(arch_cmpxchg128) +#define raw_cmpxchg128_acquire arch_cmpxchg128 +#else +extern void raw_cmpxchg128_acquire_not_implemented(void); +#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented() +#endif + +#if defined(arch_cmpxchg128_release) +#define raw_cmpxchg128_release arch_cmpxchg128_release +#elif defined(arch_cmpxchg128_relaxed) +#define raw_cmpxchg128_release(...) \ + __atomic_op_release(arch_cmpxchg128, __VA_ARGS__) +#elif defined(arch_cmpxchg128) +#define raw_cmpxchg128_release arch_cmpxchg128 +#else +extern void raw_cmpxchg128_release_not_implemented(void); +#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented() +#endif + +#if defined(arch_cmpxchg128_relaxed) +#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed +#elif defined(arch_cmpxchg128) +#define raw_cmpxchg128_relaxed arch_cmpxchg128 +#else +extern void raw_cmpxchg128_relaxed_not_implemented(void); +#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented() +#endif + +#if defined(arch_try_cmpxchg) +#define raw_try_cmpxchg arch_try_cmpxchg +#elif defined(arch_try_cmpxchg_relaxed) +#define raw_try_cmpxchg(...) \ + __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) +#else +#define raw_try_cmpxchg(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_cmpxchg((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) #endif -#ifndef arch_cmpxchg64 -#define arch_cmpxchg64(...) \ - __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#if defined(arch_try_cmpxchg_acquire) +#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire +#elif defined(arch_try_cmpxchg_relaxed) +#define raw_try_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) +#elif defined(arch_try_cmpxchg) +#define raw_try_cmpxchg_acquire arch_try_cmpxchg +#else +#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) #endif -#endif /* arch_cmpxchg64_relaxed */ - -#ifndef arch_try_cmpxchg_relaxed -#ifdef arch_try_cmpxchg -#define arch_try_cmpxchg_acquire arch_try_cmpxchg -#define arch_try_cmpxchg_release arch_try_cmpxchg -#define arch_try_cmpxchg_relaxed arch_try_cmpxchg -#endif /* arch_try_cmpxchg */ - -#ifndef arch_try_cmpxchg -#define arch_try_cmpxchg(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg_release) +#define raw_try_cmpxchg_release arch_try_cmpxchg_release +#elif defined(arch_try_cmpxchg_relaxed) +#define raw_try_cmpxchg_release(...) \ + __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) +#elif defined(arch_try_cmpxchg) +#define raw_try_cmpxchg_release arch_try_cmpxchg +#else +#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg */ +#endif -#ifndef arch_try_cmpxchg_acquire -#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg_relaxed) +#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed +#elif defined(arch_try_cmpxchg) +#define raw_try_cmpxchg_relaxed arch_try_cmpxchg +#else +#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg_acquire */ +#endif -#ifndef arch_try_cmpxchg_release -#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg64) +#define raw_try_cmpxchg64 arch_try_cmpxchg64 +#elif defined(arch_try_cmpxchg64_relaxed) +#define raw_try_cmpxchg64(...) \ + __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__) +#else +#define raw_try_cmpxchg64(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg_release */ +#endif -#ifndef arch_try_cmpxchg_relaxed -#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg64_acquire) +#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire +#elif defined(arch_try_cmpxchg64_relaxed) +#define raw_try_cmpxchg64_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__) +#elif defined(arch_try_cmpxchg64) +#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64 +#else +#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg_relaxed */ - -#else /* arch_try_cmpxchg_relaxed */ - -#ifndef arch_try_cmpxchg_acquire -#define arch_try_cmpxchg_acquire(...) \ - __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) #endif -#ifndef arch_try_cmpxchg_release -#define arch_try_cmpxchg_release(...) \ - __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) +#if defined(arch_try_cmpxchg64_release) +#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release +#elif defined(arch_try_cmpxchg64_relaxed) +#define raw_try_cmpxchg64_release(...) \ + __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__) +#elif defined(arch_try_cmpxchg64) +#define raw_try_cmpxchg64_release arch_try_cmpxchg64 +#else +#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) #endif -#ifndef arch_try_cmpxchg -#define arch_try_cmpxchg(...) \ - __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) +#if defined(arch_try_cmpxchg64_relaxed) +#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed +#elif defined(arch_try_cmpxchg64) +#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64 +#else +#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) #endif -#endif /* arch_try_cmpxchg_relaxed */ - -#ifndef arch_try_cmpxchg64_relaxed -#ifdef arch_try_cmpxchg64 -#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64 -#define arch_try_cmpxchg64_release arch_try_cmpxchg64 -#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64 -#endif /* arch_try_cmpxchg64 */ - -#ifndef arch_try_cmpxchg64 -#define arch_try_cmpxchg64(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg128) +#define raw_try_cmpxchg128 arch_try_cmpxchg128 +#elif defined(arch_try_cmpxchg128_relaxed) +#define raw_try_cmpxchg128(...) \ + __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__) +#else +#define raw_try_cmpxchg128(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg64 */ +#endif -#ifndef arch_try_cmpxchg64_acquire -#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg128_acquire) +#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire +#elif defined(arch_try_cmpxchg128_relaxed) +#define raw_try_cmpxchg128_acquire(...) \ + __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__) +#elif defined(arch_try_cmpxchg128) +#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128 +#else +#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg64_acquire */ +#endif -#ifndef arch_try_cmpxchg64_release -#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg128_release) +#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release +#elif defined(arch_try_cmpxchg128_relaxed) +#define raw_try_cmpxchg128_release(...) \ + __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__) +#elif defined(arch_try_cmpxchg128) +#define raw_try_cmpxchg128_release arch_try_cmpxchg128 +#else +#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg64_release */ +#endif -#ifndef arch_try_cmpxchg64_relaxed -#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \ +#if defined(arch_try_cmpxchg128_relaxed) +#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed +#elif defined(arch_try_cmpxchg128) +#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128 +#else +#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg64_relaxed */ - -#else /* arch_try_cmpxchg64_relaxed */ - -#ifndef arch_try_cmpxchg64_acquire -#define arch_try_cmpxchg64_acquire(...) \ - __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__) #endif -#ifndef arch_try_cmpxchg64_release -#define arch_try_cmpxchg64_release(...) \ - __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__) -#endif +#define raw_cmpxchg_local arch_cmpxchg_local -#ifndef arch_try_cmpxchg64 -#define arch_try_cmpxchg64(...) \ - __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__) +#ifdef arch_try_cmpxchg_local +#define raw_try_cmpxchg_local arch_try_cmpxchg_local +#else +#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) #endif -#endif /* arch_try_cmpxchg64_relaxed */ +#define raw_cmpxchg64_local arch_cmpxchg64_local -#ifndef arch_try_cmpxchg_local -#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \ +#ifdef arch_try_cmpxchg64_local +#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local +#else +#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg_local */ +#endif + +#define raw_cmpxchg128_local arch_cmpxchg128_local -#ifndef arch_try_cmpxchg64_local -#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \ +#ifdef arch_try_cmpxchg128_local +#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local +#else +#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \ ({ \ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ - ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \ + ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \ if (unlikely(___r != ___o)) \ *___op = ___r; \ likely(___r == ___o); \ }) -#endif /* arch_try_cmpxchg64_local */ +#endif + +#define raw_sync_cmpxchg arch_sync_cmpxchg + +/** + * raw_atomic_read() - atomic load with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_read() elsewhere. + * + * Return: The value loaded from @v. + */ +static __always_inline int +raw_atomic_read(const atomic_t *v) +{ + return arch_atomic_read(v); +} -#ifndef arch_atomic_read_acquire +/** + * raw_atomic_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere. + * + * Return: The value loaded from @v. + */ static __always_inline int -arch_atomic_read_acquire(const atomic_t *v) +raw_atomic_read_acquire(const atomic_t *v) { +#if defined(arch_atomic_read_acquire) + return arch_atomic_read_acquire(v); +#elif defined(arch_atomic_read) + return arch_atomic_read(v); +#else int ret; if (__native_word(atomic_t)) { ret = smp_load_acquire(&(v)->counter); } else { - ret = arch_atomic_read(v); + ret = raw_atomic_read(v); __atomic_acquire_fence(); } return ret; -} -#define arch_atomic_read_acquire arch_atomic_read_acquire #endif +} -#ifndef arch_atomic_set_release +/** + * raw_atomic_set() - atomic set with relaxed ordering + * @v: pointer to atomic_t + * @i: int value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_set() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_set_release(atomic_t *v, int i) +raw_atomic_set(atomic_t *v, int i) { + arch_atomic_set(v, i); +} + +/** + * raw_atomic_set_release() - atomic set with release ordering + * @v: pointer to atomic_t + * @i: int value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Safe to use in noinstr code; prefer atomic_set_release() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_set_release(atomic_t *v, int i) +{ +#if defined(arch_atomic_set_release) + arch_atomic_set_release(v, i); +#elif defined(arch_atomic_set) + arch_atomic_set(v, i); +#else if (__native_word(atomic_t)) { smp_store_release(&(v)->counter, i); } else { __atomic_release_fence(); - arch_atomic_set(v, i); + raw_atomic_set(v, i); } -} -#define arch_atomic_set_release arch_atomic_set_release #endif - -#ifndef arch_atomic_add_return_relaxed -#define arch_atomic_add_return_acquire arch_atomic_add_return -#define arch_atomic_add_return_release arch_atomic_add_return -#define arch_atomic_add_return_relaxed arch_atomic_add_return -#else /* arch_atomic_add_return_relaxed */ - -#ifndef arch_atomic_add_return_acquire -static __always_inline int -arch_atomic_add_return_acquire(int i, atomic_t *v) -{ - int ret = arch_atomic_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; } -#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire -#endif -#ifndef arch_atomic_add_return_release -static __always_inline int -arch_atomic_add_return_release(int i, atomic_t *v) +/** + * raw_atomic_add() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_add() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_add(int i, atomic_t *v) { - __atomic_release_fence(); - return arch_atomic_add_return_relaxed(i, v); + arch_atomic_add(i, v); } -#define arch_atomic_add_return_release arch_atomic_add_return_release -#endif -#ifndef arch_atomic_add_return +/** + * raw_atomic_add_return() - atomic add with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_add_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_add_return(int i, atomic_t *v) +raw_atomic_add_return(int i, atomic_t *v) { +#if defined(arch_atomic_add_return) + return arch_atomic_add_return(i, v); +#elif defined(arch_atomic_add_return_relaxed) int ret; __atomic_pre_full_fence(); ret = arch_atomic_add_return_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_add_return arch_atomic_add_return +#else +#error "Unable to define raw_atomic_add_return" #endif +} -#endif /* arch_atomic_add_return_relaxed */ - -#ifndef arch_atomic_fetch_add_relaxed -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add -#define arch_atomic_fetch_add_release arch_atomic_fetch_add -#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add -#else /* arch_atomic_fetch_add_relaxed */ - -#ifndef arch_atomic_fetch_add_acquire +/** + * raw_atomic_add_return_acquire() - atomic add with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_add_acquire(int i, atomic_t *v) +raw_atomic_add_return_acquire(int i, atomic_t *v) { - int ret = arch_atomic_fetch_add_relaxed(i, v); +#if defined(arch_atomic_add_return_acquire) + return arch_atomic_add_return_acquire(i, v); +#elif defined(arch_atomic_add_return_relaxed) + int ret = arch_atomic_add_return_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#elif defined(arch_atomic_add_return) + return arch_atomic_add_return(i, v); +#else +#error "Unable to define raw_atomic_add_return_acquire" #endif +} -#ifndef arch_atomic_fetch_add_release +/** + * raw_atomic_add_return_release() - atomic add with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_add_release(int i, atomic_t *v) +raw_atomic_add_return_release(int i, atomic_t *v) { +#if defined(arch_atomic_add_return_release) + return arch_atomic_add_return_release(i, v); +#elif defined(arch_atomic_add_return_relaxed) __atomic_release_fence(); - return arch_atomic_fetch_add_relaxed(i, v); + return arch_atomic_add_return_relaxed(i, v); +#elif defined(arch_atomic_add_return) + return arch_atomic_add_return(i, v); +#else +#error "Unable to define raw_atomic_add_return_release" +#endif } -#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release + +/** + * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline int +raw_atomic_add_return_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_add_return_relaxed) + return arch_atomic_add_return_relaxed(i, v); +#elif defined(arch_atomic_add_return) + return arch_atomic_add_return(i, v); +#else +#error "Unable to define raw_atomic_add_return_relaxed" #endif +} -#ifndef arch_atomic_fetch_add +/** + * raw_atomic_fetch_add() - atomic add with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_add(int i, atomic_t *v) +raw_atomic_fetch_add(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_add) + return arch_atomic_fetch_add(i, v); +#elif defined(arch_atomic_fetch_add_relaxed) int ret; __atomic_pre_full_fence(); ret = arch_atomic_fetch_add_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_add arch_atomic_fetch_add +#else +#error "Unable to define raw_atomic_fetch_add" #endif +} -#endif /* arch_atomic_fetch_add_relaxed */ - -#ifndef arch_atomic_sub_return_relaxed -#define arch_atomic_sub_return_acquire arch_atomic_sub_return -#define arch_atomic_sub_return_release arch_atomic_sub_return -#define arch_atomic_sub_return_relaxed arch_atomic_sub_return -#else /* arch_atomic_sub_return_relaxed */ - -#ifndef arch_atomic_sub_return_acquire +/** + * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_sub_return_acquire(int i, atomic_t *v) +raw_atomic_fetch_add_acquire(int i, atomic_t *v) { - int ret = arch_atomic_sub_return_relaxed(i, v); +#if defined(arch_atomic_fetch_add_acquire) + return arch_atomic_fetch_add_acquire(i, v); +#elif defined(arch_atomic_fetch_add_relaxed) + int ret = arch_atomic_fetch_add_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#elif defined(arch_atomic_fetch_add) + return arch_atomic_fetch_add(i, v); +#else +#error "Unable to define raw_atomic_fetch_add_acquire" #endif +} -#ifndef arch_atomic_sub_return_release +/** + * raw_atomic_fetch_add_release() - atomic add with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_sub_return_release(int i, atomic_t *v) +raw_atomic_fetch_add_release(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_add_release) + return arch_atomic_fetch_add_release(i, v); +#elif defined(arch_atomic_fetch_add_relaxed) __atomic_release_fence(); - return arch_atomic_sub_return_relaxed(i, v); + return arch_atomic_fetch_add_relaxed(i, v); +#elif defined(arch_atomic_fetch_add) + return arch_atomic_fetch_add(i, v); +#else +#error "Unable to define raw_atomic_fetch_add_release" +#endif } -#define arch_atomic_sub_return_release arch_atomic_sub_return_release + +/** + * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline int +raw_atomic_fetch_add_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_fetch_add_relaxed) + return arch_atomic_fetch_add_relaxed(i, v); +#elif defined(arch_atomic_fetch_add) + return arch_atomic_fetch_add(i, v); +#else +#error "Unable to define raw_atomic_fetch_add_relaxed" #endif +} -#ifndef arch_atomic_sub_return +/** + * raw_atomic_sub() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_sub() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_sub(int i, atomic_t *v) +{ + arch_atomic_sub(i, v); +} + +/** + * raw_atomic_sub_return() - atomic subtract with full ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_sub_return(int i, atomic_t *v) +raw_atomic_sub_return(int i, atomic_t *v) { +#if defined(arch_atomic_sub_return) + return arch_atomic_sub_return(i, v); +#elif defined(arch_atomic_sub_return_relaxed) int ret; __atomic_pre_full_fence(); ret = arch_atomic_sub_return_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_sub_return arch_atomic_sub_return +#else +#error "Unable to define raw_atomic_sub_return" #endif +} -#endif /* arch_atomic_sub_return_relaxed */ - -#ifndef arch_atomic_fetch_sub_relaxed -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub -#else /* arch_atomic_fetch_sub_relaxed */ - -#ifndef arch_atomic_fetch_sub_acquire +/** + * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_sub_acquire(int i, atomic_t *v) +raw_atomic_sub_return_acquire(int i, atomic_t *v) { - int ret = arch_atomic_fetch_sub_relaxed(i, v); +#if defined(arch_atomic_sub_return_acquire) + return arch_atomic_sub_return_acquire(i, v); +#elif defined(arch_atomic_sub_return_relaxed) + int ret = arch_atomic_sub_return_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#elif defined(arch_atomic_sub_return) + return arch_atomic_sub_return(i, v); +#else +#error "Unable to define raw_atomic_sub_return_acquire" #endif +} -#ifndef arch_atomic_fetch_sub_release +/** + * raw_atomic_sub_return_release() - atomic subtract with release ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_sub_release(int i, atomic_t *v) +raw_atomic_sub_return_release(int i, atomic_t *v) { +#if defined(arch_atomic_sub_return_release) + return arch_atomic_sub_return_release(i, v); +#elif defined(arch_atomic_sub_return_relaxed) __atomic_release_fence(); - return arch_atomic_fetch_sub_relaxed(i, v); + return arch_atomic_sub_return_relaxed(i, v); +#elif defined(arch_atomic_sub_return) + return arch_atomic_sub_return(i, v); +#else +#error "Unable to define raw_atomic_sub_return_release" +#endif } -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release + +/** + * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline int +raw_atomic_sub_return_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_sub_return_relaxed) + return arch_atomic_sub_return_relaxed(i, v); +#elif defined(arch_atomic_sub_return) + return arch_atomic_sub_return(i, v); +#else +#error "Unable to define raw_atomic_sub_return_relaxed" #endif +} -#ifndef arch_atomic_fetch_sub +/** + * raw_atomic_fetch_sub() - atomic subtract with full ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_sub(int i, atomic_t *v) +raw_atomic_fetch_sub(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_sub) + return arch_atomic_fetch_sub(i, v); +#elif defined(arch_atomic_fetch_sub_relaxed) int ret; __atomic_pre_full_fence(); ret = arch_atomic_fetch_sub_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#else +#error "Unable to define raw_atomic_fetch_sub" #endif - -#endif /* arch_atomic_fetch_sub_relaxed */ - -#ifndef arch_atomic_inc -static __always_inline void -arch_atomic_inc(atomic_t *v) -{ - arch_atomic_add(1, v); } -#define arch_atomic_inc arch_atomic_inc -#endif - -#ifndef arch_atomic_inc_return_relaxed -#ifdef arch_atomic_inc_return -#define arch_atomic_inc_return_acquire arch_atomic_inc_return -#define arch_atomic_inc_return_release arch_atomic_inc_return -#define arch_atomic_inc_return_relaxed arch_atomic_inc_return -#endif /* arch_atomic_inc_return */ -#ifndef arch_atomic_inc_return +/** + * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_inc_return(atomic_t *v) +raw_atomic_fetch_sub_acquire(int i, atomic_t *v) { - return arch_atomic_add_return(1, v); -} -#define arch_atomic_inc_return arch_atomic_inc_return +#if defined(arch_atomic_fetch_sub_acquire) + return arch_atomic_fetch_sub_acquire(i, v); +#elif defined(arch_atomic_fetch_sub_relaxed) + int ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic_fetch_sub) + return arch_atomic_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic_fetch_sub_acquire" #endif - -#ifndef arch_atomic_inc_return_acquire -static __always_inline int -arch_atomic_inc_return_acquire(atomic_t *v) -{ - return arch_atomic_add_return_acquire(1, v); } -#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire -#endif -#ifndef arch_atomic_inc_return_release +/** + * raw_atomic_fetch_sub_release() - atomic subtract with release ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_inc_return_release(atomic_t *v) +raw_atomic_fetch_sub_release(int i, atomic_t *v) { - return arch_atomic_add_return_release(1, v); -} -#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#if defined(arch_atomic_fetch_sub_release) + return arch_atomic_fetch_sub_release(i, v); +#elif defined(arch_atomic_fetch_sub_relaxed) + __atomic_release_fence(); + return arch_atomic_fetch_sub_relaxed(i, v); +#elif defined(arch_atomic_fetch_sub) + return arch_atomic_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic_fetch_sub_release" #endif - -#ifndef arch_atomic_inc_return_relaxed -static __always_inline int -arch_atomic_inc_return_relaxed(atomic_t *v) -{ - return arch_atomic_add_return_relaxed(1, v); } -#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed -#endif -#else /* arch_atomic_inc_return_relaxed */ - -#ifndef arch_atomic_inc_return_acquire +/** + * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_inc_return_acquire(atomic_t *v) +raw_atomic_fetch_sub_relaxed(int i, atomic_t *v) { - int ret = arch_atomic_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#if defined(arch_atomic_fetch_sub_relaxed) + return arch_atomic_fetch_sub_relaxed(i, v); +#elif defined(arch_atomic_fetch_sub) + return arch_atomic_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic_fetch_sub_relaxed" #endif +} -#ifndef arch_atomic_inc_return_release -static __always_inline int -arch_atomic_inc_return_release(atomic_t *v) +/** + * raw_atomic_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_inc() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_inc(atomic_t *v) { - __atomic_release_fence(); - return arch_atomic_inc_return_relaxed(v); -} -#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#if defined(arch_atomic_inc) + arch_atomic_inc(v); +#else + raw_atomic_add(1, v); #endif +} -#ifndef arch_atomic_inc_return +/** + * raw_atomic_inc_return() - atomic increment with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_inc_return(atomic_t *v) +raw_atomic_inc_return(atomic_t *v) { +#if defined(arch_atomic_inc_return) + return arch_atomic_inc_return(v); +#elif defined(arch_atomic_inc_return_relaxed) int ret; __atomic_pre_full_fence(); ret = arch_atomic_inc_return_relaxed(v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_inc_return arch_atomic_inc_return +#else + return raw_atomic_add_return(1, v); #endif +} -#endif /* arch_atomic_inc_return_relaxed */ - -#ifndef arch_atomic_fetch_inc_relaxed -#ifdef arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc -#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc -#endif /* arch_atomic_fetch_inc */ - -#ifndef arch_atomic_fetch_inc +/** + * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_inc(atomic_t *v) +raw_atomic_inc_return_acquire(atomic_t *v) { - return arch_atomic_fetch_add(1, v); -} -#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#if defined(arch_atomic_inc_return_acquire) + return arch_atomic_inc_return_acquire(v); +#elif defined(arch_atomic_inc_return_relaxed) + int ret = arch_atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic_inc_return) + return arch_atomic_inc_return(v); +#else + return raw_atomic_add_return_acquire(1, v); #endif +} -#ifndef arch_atomic_fetch_inc_acquire +/** + * raw_atomic_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_inc_acquire(atomic_t *v) +raw_atomic_inc_return_release(atomic_t *v) { - return arch_atomic_fetch_add_acquire(1, v); -} -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#if defined(arch_atomic_inc_return_release) + return arch_atomic_inc_return_release(v); +#elif defined(arch_atomic_inc_return_relaxed) + __atomic_release_fence(); + return arch_atomic_inc_return_relaxed(v); +#elif defined(arch_atomic_inc_return) + return arch_atomic_inc_return(v); +#else + return raw_atomic_add_return_release(1, v); #endif +} -#ifndef arch_atomic_fetch_inc_release +/** + * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_fetch_inc_release(atomic_t *v) +raw_atomic_inc_return_relaxed(atomic_t *v) { - return arch_atomic_fetch_add_release(1, v); -} -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#if defined(arch_atomic_inc_return_relaxed) + return arch_atomic_inc_return_relaxed(v); +#elif defined(arch_atomic_inc_return) + return arch_atomic_inc_return(v); +#else + return raw_atomic_add_return_relaxed(1, v); #endif +} -#ifndef arch_atomic_fetch_inc_relaxed +/** + * raw_atomic_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_inc_relaxed(atomic_t *v) +raw_atomic_fetch_inc(atomic_t *v) { - return arch_atomic_fetch_add_relaxed(1, v); -} -#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed +#if defined(arch_atomic_fetch_inc) + return arch_atomic_fetch_inc(v); +#elif defined(arch_atomic_fetch_inc_relaxed) + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic_fetch_add(1, v); #endif +} -#else /* arch_atomic_fetch_inc_relaxed */ - -#ifndef arch_atomic_fetch_inc_acquire +/** + * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_inc_acquire(atomic_t *v) +raw_atomic_fetch_inc_acquire(atomic_t *v) { +#if defined(arch_atomic_fetch_inc_acquire) + return arch_atomic_fetch_inc_acquire(v); +#elif defined(arch_atomic_fetch_inc_relaxed) int ret = arch_atomic_fetch_inc_relaxed(v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#elif defined(arch_atomic_fetch_inc) + return arch_atomic_fetch_inc(v); +#else + return raw_atomic_fetch_add_acquire(1, v); #endif +} -#ifndef arch_atomic_fetch_inc_release +/** + * raw_atomic_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_inc_release(atomic_t *v) +raw_atomic_fetch_inc_release(atomic_t *v) { +#if defined(arch_atomic_fetch_inc_release) + return arch_atomic_fetch_inc_release(v); +#elif defined(arch_atomic_fetch_inc_relaxed) __atomic_release_fence(); return arch_atomic_fetch_inc_relaxed(v); -} -#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#elif defined(arch_atomic_fetch_inc) + return arch_atomic_fetch_inc(v); +#else + return raw_atomic_fetch_add_release(1, v); #endif +} -#ifndef arch_atomic_fetch_inc +/** + * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_inc(atomic_t *v) +raw_atomic_fetch_inc_relaxed(atomic_t *v) { - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#if defined(arch_atomic_fetch_inc_relaxed) + return arch_atomic_fetch_inc_relaxed(v); +#elif defined(arch_atomic_fetch_inc) + return arch_atomic_fetch_inc(v); +#else + return raw_atomic_fetch_add_relaxed(1, v); #endif +} -#endif /* arch_atomic_fetch_inc_relaxed */ - -#ifndef arch_atomic_dec +/** + * raw_atomic_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_dec() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_dec(atomic_t *v) +raw_atomic_dec(atomic_t *v) { - arch_atomic_sub(1, v); -} -#define arch_atomic_dec arch_atomic_dec +#if defined(arch_atomic_dec) + arch_atomic_dec(v); +#else + raw_atomic_sub(1, v); #endif - -#ifndef arch_atomic_dec_return_relaxed -#ifdef arch_atomic_dec_return -#define arch_atomic_dec_return_acquire arch_atomic_dec_return -#define arch_atomic_dec_return_release arch_atomic_dec_return -#define arch_atomic_dec_return_relaxed arch_atomic_dec_return -#endif /* arch_atomic_dec_return */ - -#ifndef arch_atomic_dec_return -static __always_inline int -arch_atomic_dec_return(atomic_t *v) -{ - return arch_atomic_sub_return(1, v); } -#define arch_atomic_dec_return arch_atomic_dec_return -#endif -#ifndef arch_atomic_dec_return_acquire -static __always_inline int -arch_atomic_dec_return_acquire(atomic_t *v) -{ - return arch_atomic_sub_return_acquire(1, v); -} -#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire -#endif - -#ifndef arch_atomic_dec_return_release +/** + * raw_atomic_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_dec_return_release(atomic_t *v) +raw_atomic_dec_return(atomic_t *v) { - return arch_atomic_sub_return_release(1, v); -} -#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#if defined(arch_atomic_dec_return) + return arch_atomic_dec_return(v); +#elif defined(arch_atomic_dec_return_relaxed) + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic_sub_return(1, v); #endif - -#ifndef arch_atomic_dec_return_relaxed -static __always_inline int -arch_atomic_dec_return_relaxed(atomic_t *v) -{ - return arch_atomic_sub_return_relaxed(1, v); } -#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed -#endif - -#else /* arch_atomic_dec_return_relaxed */ -#ifndef arch_atomic_dec_return_acquire +/** + * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_dec_return_acquire(atomic_t *v) +raw_atomic_dec_return_acquire(atomic_t *v) { +#if defined(arch_atomic_dec_return_acquire) + return arch_atomic_dec_return_acquire(v); +#elif defined(arch_atomic_dec_return_relaxed) int ret = arch_atomic_dec_return_relaxed(v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#elif defined(arch_atomic_dec_return) + return arch_atomic_dec_return(v); +#else + return raw_atomic_sub_return_acquire(1, v); #endif +} -#ifndef arch_atomic_dec_return_release +/** + * raw_atomic_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline int -arch_atomic_dec_return_release(atomic_t *v) +raw_atomic_dec_return_release(atomic_t *v) { +#if defined(arch_atomic_dec_return_release) + return arch_atomic_dec_return_release(v); +#elif defined(arch_atomic_dec_return_relaxed) __atomic_release_fence(); return arch_atomic_dec_return_relaxed(v); +#elif defined(arch_atomic_dec_return) + return arch_atomic_dec_return(v); +#else + return raw_atomic_sub_return_release(1, v); +#endif } -#define arch_atomic_dec_return_release arch_atomic_dec_return_release + +/** + * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline int +raw_atomic_dec_return_relaxed(atomic_t *v) +{ +#if defined(arch_atomic_dec_return_relaxed) + return arch_atomic_dec_return_relaxed(v); +#elif defined(arch_atomic_dec_return) + return arch_atomic_dec_return(v); +#else + return raw_atomic_sub_return_relaxed(1, v); #endif +} -#ifndef arch_atomic_dec_return +/** + * raw_atomic_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_dec_return(atomic_t *v) +raw_atomic_fetch_dec(atomic_t *v) { +#if defined(arch_atomic_fetch_dec) + return arch_atomic_fetch_dec(v); +#elif defined(arch_atomic_fetch_dec_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_dec_return_relaxed(v); + ret = arch_atomic_fetch_dec_relaxed(v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_dec_return arch_atomic_dec_return +#else + return raw_atomic_fetch_sub(1, v); #endif - -#endif /* arch_atomic_dec_return_relaxed */ - -#ifndef arch_atomic_fetch_dec_relaxed -#ifdef arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec -#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec -#endif /* arch_atomic_fetch_dec */ - -#ifndef arch_atomic_fetch_dec -static __always_inline int -arch_atomic_fetch_dec(atomic_t *v) -{ - return arch_atomic_fetch_sub(1, v); } -#define arch_atomic_fetch_dec arch_atomic_fetch_dec -#endif -#ifndef arch_atomic_fetch_dec_acquire +/** + * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_dec_acquire(atomic_t *v) +raw_atomic_fetch_dec_acquire(atomic_t *v) { - return arch_atomic_fetch_sub_acquire(1, v); -} -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#if defined(arch_atomic_fetch_dec_acquire) + return arch_atomic_fetch_dec_acquire(v); +#elif defined(arch_atomic_fetch_dec_relaxed) + int ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic_fetch_dec) + return arch_atomic_fetch_dec(v); +#else + return raw_atomic_fetch_sub_acquire(1, v); #endif - -#ifndef arch_atomic_fetch_dec_release -static __always_inline int -arch_atomic_fetch_dec_release(atomic_t *v) -{ - return arch_atomic_fetch_sub_release(1, v); } -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release -#endif -#ifndef arch_atomic_fetch_dec_relaxed +/** + * raw_atomic_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_dec_relaxed(atomic_t *v) +raw_atomic_fetch_dec_release(atomic_t *v) { - return arch_atomic_fetch_sub_relaxed(1, v); -} -#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed +#if defined(arch_atomic_fetch_dec_release) + return arch_atomic_fetch_dec_release(v); +#elif defined(arch_atomic_fetch_dec_relaxed) + __atomic_release_fence(); + return arch_atomic_fetch_dec_relaxed(v); +#elif defined(arch_atomic_fetch_dec) + return arch_atomic_fetch_dec(v); +#else + return raw_atomic_fetch_sub_release(1, v); #endif +} -#else /* arch_atomic_fetch_dec_relaxed */ - -#ifndef arch_atomic_fetch_dec_acquire +/** + * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_dec_acquire(atomic_t *v) +raw_atomic_fetch_dec_relaxed(atomic_t *v) { - int ret = arch_atomic_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#if defined(arch_atomic_fetch_dec_relaxed) + return arch_atomic_fetch_dec_relaxed(v); +#elif defined(arch_atomic_fetch_dec) + return arch_atomic_fetch_dec(v); +#else + return raw_atomic_fetch_sub_relaxed(1, v); #endif +} -#ifndef arch_atomic_fetch_dec_release -static __always_inline int -arch_atomic_fetch_dec_release(atomic_t *v) +/** + * raw_atomic_and() - atomic bitwise AND with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_and() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_and(int i, atomic_t *v) { - __atomic_release_fence(); - return arch_atomic_fetch_dec_relaxed(v); + arch_atomic_and(i, v); } -#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release -#endif -#ifndef arch_atomic_fetch_dec +/** + * raw_atomic_fetch_and() - atomic bitwise AND with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_dec(atomic_t *v) +raw_atomic_fetch_and(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_and) + return arch_atomic_fetch_and(i, v); +#elif defined(arch_atomic_fetch_and_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_fetch_dec_relaxed(v); + ret = arch_atomic_fetch_and_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#else +#error "Unable to define raw_atomic_fetch_and" #endif +} -#endif /* arch_atomic_fetch_dec_relaxed */ - -#ifndef arch_atomic_fetch_and_relaxed -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and -#define arch_atomic_fetch_and_release arch_atomic_fetch_and -#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and -#else /* arch_atomic_fetch_and_relaxed */ - -#ifndef arch_atomic_fetch_and_acquire +/** + * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_and_acquire(int i, atomic_t *v) +raw_atomic_fetch_and_acquire(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_and_acquire) + return arch_atomic_fetch_and_acquire(i, v); +#elif defined(arch_atomic_fetch_and_relaxed) int ret = arch_atomic_fetch_and_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#elif defined(arch_atomic_fetch_and) + return arch_atomic_fetch_and(i, v); +#else +#error "Unable to define raw_atomic_fetch_and_acquire" #endif +} -#ifndef arch_atomic_fetch_and_release +/** + * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_and_release(int i, atomic_t *v) +raw_atomic_fetch_and_release(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_and_release) + return arch_atomic_fetch_and_release(i, v); +#elif defined(arch_atomic_fetch_and_relaxed) __atomic_release_fence(); return arch_atomic_fetch_and_relaxed(i, v); -} -#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#elif defined(arch_atomic_fetch_and) + return arch_atomic_fetch_and(i, v); +#else +#error "Unable to define raw_atomic_fetch_and_release" #endif +} -#ifndef arch_atomic_fetch_and +/** + * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_and(int i, atomic_t *v) +raw_atomic_fetch_and_relaxed(int i, atomic_t *v) { - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_fetch_and arch_atomic_fetch_and +#if defined(arch_atomic_fetch_and_relaxed) + return arch_atomic_fetch_and_relaxed(i, v); +#elif defined(arch_atomic_fetch_and) + return arch_atomic_fetch_and(i, v); +#else +#error "Unable to define raw_atomic_fetch_and_relaxed" #endif - -#endif /* arch_atomic_fetch_and_relaxed */ - -#ifndef arch_atomic_andnot -static __always_inline void -arch_atomic_andnot(int i, atomic_t *v) -{ - arch_atomic_and(~i, v); } -#define arch_atomic_andnot arch_atomic_andnot -#endif - -#ifndef arch_atomic_fetch_andnot_relaxed -#ifdef arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot -#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot -#endif /* arch_atomic_fetch_andnot */ -#ifndef arch_atomic_fetch_andnot -static __always_inline int -arch_atomic_fetch_andnot(int i, atomic_t *v) +/** + * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_andnot() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_andnot(int i, atomic_t *v) { - return arch_atomic_fetch_and(~i, v); -} -#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#if defined(arch_atomic_andnot) + arch_atomic_andnot(i, v); +#else + raw_atomic_and(~i, v); #endif - -#ifndef arch_atomic_fetch_andnot_acquire -static __always_inline int -arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) -{ - return arch_atomic_fetch_and_acquire(~i, v); } -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire -#endif -#ifndef arch_atomic_fetch_andnot_release +/** + * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_andnot_release(int i, atomic_t *v) +raw_atomic_fetch_andnot(int i, atomic_t *v) { - return arch_atomic_fetch_and_release(~i, v); -} -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#if defined(arch_atomic_fetch_andnot) + return arch_atomic_fetch_andnot(i, v); +#elif defined(arch_atomic_fetch_andnot_relaxed) + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic_fetch_and(~i, v); #endif - -#ifndef arch_atomic_fetch_andnot_relaxed -static __always_inline int -arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) -{ - return arch_atomic_fetch_and_relaxed(~i, v); } -#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed -#endif -#else /* arch_atomic_fetch_andnot_relaxed */ - -#ifndef arch_atomic_fetch_andnot_acquire +/** + * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_andnot_acquire) + return arch_atomic_fetch_andnot_acquire(i, v); +#elif defined(arch_atomic_fetch_andnot_relaxed) int ret = arch_atomic_fetch_andnot_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#elif defined(arch_atomic_fetch_andnot) + return arch_atomic_fetch_andnot(i, v); +#else + return raw_atomic_fetch_and_acquire(~i, v); #endif +} -#ifndef arch_atomic_fetch_andnot_release +/** + * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_andnot_release(int i, atomic_t *v) +raw_atomic_fetch_andnot_release(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_andnot_release) + return arch_atomic_fetch_andnot_release(i, v); +#elif defined(arch_atomic_fetch_andnot_relaxed) __atomic_release_fence(); return arch_atomic_fetch_andnot_relaxed(i, v); +#elif defined(arch_atomic_fetch_andnot) + return arch_atomic_fetch_andnot(i, v); +#else + return raw_atomic_fetch_and_release(~i, v); +#endif } -#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release + +/** + * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline int +raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_fetch_andnot_relaxed) + return arch_atomic_fetch_andnot_relaxed(i, v); +#elif defined(arch_atomic_fetch_andnot) + return arch_atomic_fetch_andnot(i, v); +#else + return raw_atomic_fetch_and_relaxed(~i, v); #endif +} + +/** + * raw_atomic_or() - atomic bitwise OR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_or() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_or(int i, atomic_t *v) +{ + arch_atomic_or(i, v); +} -#ifndef arch_atomic_fetch_andnot +/** + * raw_atomic_fetch_or() - atomic bitwise OR with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_andnot(int i, atomic_t *v) +raw_atomic_fetch_or(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_or) + return arch_atomic_fetch_or(i, v); +#elif defined(arch_atomic_fetch_or_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_fetch_andnot_relaxed(i, v); + ret = arch_atomic_fetch_or_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#else +#error "Unable to define raw_atomic_fetch_or" #endif +} -#endif /* arch_atomic_fetch_andnot_relaxed */ - -#ifndef arch_atomic_fetch_or_relaxed -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or -#define arch_atomic_fetch_or_release arch_atomic_fetch_or -#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or -#else /* arch_atomic_fetch_or_relaxed */ - -#ifndef arch_atomic_fetch_or_acquire +/** + * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_or_acquire(int i, atomic_t *v) +raw_atomic_fetch_or_acquire(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_or_acquire) + return arch_atomic_fetch_or_acquire(i, v); +#elif defined(arch_atomic_fetch_or_relaxed) int ret = arch_atomic_fetch_or_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#elif defined(arch_atomic_fetch_or) + return arch_atomic_fetch_or(i, v); +#else +#error "Unable to define raw_atomic_fetch_or_acquire" #endif +} -#ifndef arch_atomic_fetch_or_release +/** + * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_or_release(int i, atomic_t *v) +raw_atomic_fetch_or_release(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_or_release) + return arch_atomic_fetch_or_release(i, v); +#elif defined(arch_atomic_fetch_or_relaxed) __atomic_release_fence(); return arch_atomic_fetch_or_relaxed(i, v); +#elif defined(arch_atomic_fetch_or) + return arch_atomic_fetch_or(i, v); +#else +#error "Unable to define raw_atomic_fetch_or_release" +#endif } -#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release + +/** + * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline int +raw_atomic_fetch_or_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_fetch_or_relaxed) + return arch_atomic_fetch_or_relaxed(i, v); +#elif defined(arch_atomic_fetch_or) + return arch_atomic_fetch_or(i, v); +#else +#error "Unable to define raw_atomic_fetch_or_relaxed" #endif +} -#ifndef arch_atomic_fetch_or +/** + * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_xor() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic_xor(int i, atomic_t *v) +{ + arch_atomic_xor(i, v); +} + +/** + * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_or(int i, atomic_t *v) +raw_atomic_fetch_xor(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_xor) + return arch_atomic_fetch_xor(i, v); +#elif defined(arch_atomic_fetch_xor_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_fetch_or_relaxed(i, v); + ret = arch_atomic_fetch_xor_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_or arch_atomic_fetch_or +#else +#error "Unable to define raw_atomic_fetch_xor" #endif +} -#endif /* arch_atomic_fetch_or_relaxed */ - -#ifndef arch_atomic_fetch_xor_relaxed -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor -#else /* arch_atomic_fetch_xor_relaxed */ - -#ifndef arch_atomic_fetch_xor_acquire +/** + * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_xor_acquire(int i, atomic_t *v) +raw_atomic_fetch_xor_acquire(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_xor_acquire) + return arch_atomic_fetch_xor_acquire(i, v); +#elif defined(arch_atomic_fetch_xor_relaxed) int ret = arch_atomic_fetch_xor_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#elif defined(arch_atomic_fetch_xor) + return arch_atomic_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic_fetch_xor_acquire" #endif +} -#ifndef arch_atomic_fetch_xor_release +/** + * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_xor_release(int i, atomic_t *v) +raw_atomic_fetch_xor_release(int i, atomic_t *v) { +#if defined(arch_atomic_fetch_xor_release) + return arch_atomic_fetch_xor_release(i, v); +#elif defined(arch_atomic_fetch_xor_relaxed) __atomic_release_fence(); return arch_atomic_fetch_xor_relaxed(i, v); +#elif defined(arch_atomic_fetch_xor) + return arch_atomic_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic_fetch_xor_release" +#endif } -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release + +/** + * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline int +raw_atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ +#if defined(arch_atomic_fetch_xor_relaxed) + return arch_atomic_fetch_xor_relaxed(i, v); +#elif defined(arch_atomic_fetch_xor) + return arch_atomic_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic_fetch_xor_relaxed" #endif +} -#ifndef arch_atomic_fetch_xor +/** + * raw_atomic_xchg() - atomic exchange with full ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic_xchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_fetch_xor(int i, atomic_t *v) +raw_atomic_xchg(atomic_t *v, int new) { +#if defined(arch_atomic_xchg) + return arch_atomic_xchg(v, new); +#elif defined(arch_atomic_xchg_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_fetch_xor_relaxed(i, v); + ret = arch_atomic_xchg_relaxed(v, new); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#else + return raw_xchg(&v->counter, new); #endif +} -#endif /* arch_atomic_fetch_xor_relaxed */ - -#ifndef arch_atomic_xchg_relaxed -#define arch_atomic_xchg_acquire arch_atomic_xchg -#define arch_atomic_xchg_release arch_atomic_xchg -#define arch_atomic_xchg_relaxed arch_atomic_xchg -#else /* arch_atomic_xchg_relaxed */ - -#ifndef arch_atomic_xchg_acquire +/** + * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_xchg_acquire(atomic_t *v, int i) +raw_atomic_xchg_acquire(atomic_t *v, int new) { - int ret = arch_atomic_xchg_relaxed(v, i); +#if defined(arch_atomic_xchg_acquire) + return arch_atomic_xchg_acquire(v, new); +#elif defined(arch_atomic_xchg_relaxed) + int ret = arch_atomic_xchg_relaxed(v, new); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#elif defined(arch_atomic_xchg) + return arch_atomic_xchg(v, new); +#else + return raw_xchg_acquire(&v->counter, new); #endif +} -#ifndef arch_atomic_xchg_release +/** + * raw_atomic_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_xchg_release(atomic_t *v, int i) +raw_atomic_xchg_release(atomic_t *v, int new) { +#if defined(arch_atomic_xchg_release) + return arch_atomic_xchg_release(v, new); +#elif defined(arch_atomic_xchg_relaxed) __atomic_release_fence(); - return arch_atomic_xchg_relaxed(v, i); + return arch_atomic_xchg_relaxed(v, new); +#elif defined(arch_atomic_xchg) + return arch_atomic_xchg(v, new); +#else + return raw_xchg_release(&v->counter, new); +#endif } -#define arch_atomic_xchg_release arch_atomic_xchg_release + +/** + * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline int +raw_atomic_xchg_relaxed(atomic_t *v, int new) +{ +#if defined(arch_atomic_xchg_relaxed) + return arch_atomic_xchg_relaxed(v, new); +#elif defined(arch_atomic_xchg) + return arch_atomic_xchg(v, new); +#else + return raw_xchg_relaxed(&v->counter, new); #endif +} -#ifndef arch_atomic_xchg +/** + * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_xchg(atomic_t *v, int i) +raw_atomic_cmpxchg(atomic_t *v, int old, int new) { +#if defined(arch_atomic_cmpxchg) + return arch_atomic_cmpxchg(v, old, new); +#elif defined(arch_atomic_cmpxchg_relaxed) int ret; __atomic_pre_full_fence(); - ret = arch_atomic_xchg_relaxed(v, i); + ret = arch_atomic_cmpxchg_relaxed(v, old, new); __atomic_post_full_fence(); return ret; -} -#define arch_atomic_xchg arch_atomic_xchg +#else + return raw_cmpxchg(&v->counter, old, new); #endif +} -#endif /* arch_atomic_xchg_relaxed */ - -#ifndef arch_atomic_cmpxchg_relaxed -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg -#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg -#else /* arch_atomic_cmpxchg_relaxed */ - -#ifndef arch_atomic_cmpxchg_acquire +/** + * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { +#if defined(arch_atomic_cmpxchg_acquire) + return arch_atomic_cmpxchg_acquire(v, old, new); +#elif defined(arch_atomic_cmpxchg_relaxed) int ret = arch_atomic_cmpxchg_relaxed(v, old, new); __atomic_acquire_fence(); return ret; -} -#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#elif defined(arch_atomic_cmpxchg) + return arch_atomic_cmpxchg(v, old, new); +#else + return raw_cmpxchg_acquire(&v->counter, old, new); #endif +} -#ifndef arch_atomic_cmpxchg_release +/** + * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) +raw_atomic_cmpxchg_release(atomic_t *v, int old, int new) { +#if defined(arch_atomic_cmpxchg_release) + return arch_atomic_cmpxchg_release(v, old, new); +#elif defined(arch_atomic_cmpxchg_relaxed) __atomic_release_fence(); return arch_atomic_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#elif defined(arch_atomic_cmpxchg) + return arch_atomic_cmpxchg(v, old, new); +#else + return raw_cmpxchg_release(&v->counter, old, new); #endif +} -#ifndef arch_atomic_cmpxchg +/** + * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline int -arch_atomic_cmpxchg(atomic_t *v, int old, int new) +raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - int ret; - __atomic_pre_full_fence(); - ret = arch_atomic_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_cmpxchg arch_atomic_cmpxchg +#if defined(arch_atomic_cmpxchg_relaxed) + return arch_atomic_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic_cmpxchg) + return arch_atomic_cmpxchg(v, old, new); +#else + return raw_cmpxchg_relaxed(&v->counter, old, new); #endif +} -#endif /* arch_atomic_cmpxchg_relaxed */ - -#ifndef arch_atomic_try_cmpxchg_relaxed -#ifdef arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg -#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg -#endif /* arch_atomic_try_cmpxchg */ - -#ifndef arch_atomic_try_cmpxchg +/** + * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new) { +#if defined(arch_atomic_try_cmpxchg) + return arch_atomic_try_cmpxchg(v, old, new); +#elif defined(arch_atomic_try_cmpxchg_relaxed) + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +#else int r, o = *old; - r = arch_atomic_cmpxchg(v, o, new); + r = raw_atomic_cmpxchg(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg #endif +} -#ifndef arch_atomic_try_cmpxchg_acquire +/** + * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { +#if defined(arch_atomic_try_cmpxchg_acquire) + return arch_atomic_try_cmpxchg_acquire(v, old, new); +#elif defined(arch_atomic_try_cmpxchg_relaxed) + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic_try_cmpxchg) + return arch_atomic_try_cmpxchg(v, old, new); +#else int r, o = *old; - r = arch_atomic_cmpxchg_acquire(v, o, new); + r = raw_atomic_cmpxchg_acquire(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire #endif +} -#ifndef arch_atomic_try_cmpxchg_release +/** + * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { +#if defined(arch_atomic_try_cmpxchg_release) + return arch_atomic_try_cmpxchg_release(v, old, new); +#elif defined(arch_atomic_try_cmpxchg_relaxed) + __atomic_release_fence(); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic_try_cmpxchg) + return arch_atomic_try_cmpxchg(v, old, new); +#else int r, o = *old; - r = arch_atomic_cmpxchg_release(v, o, new); + r = raw_atomic_cmpxchg_release(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release #endif +} -#ifndef arch_atomic_try_cmpxchg_relaxed +/** + * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { +#if defined(arch_atomic_try_cmpxchg_relaxed) + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic_try_cmpxchg) + return arch_atomic_try_cmpxchg(v, old, new); +#else int r, o = *old; - r = arch_atomic_cmpxchg_relaxed(v, o, new); + r = raw_atomic_cmpxchg_relaxed(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed -#endif - -#else /* arch_atomic_try_cmpxchg_relaxed */ - -#ifndef arch_atomic_try_cmpxchg_acquire -static __always_inline bool -arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) -{ - bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire #endif - -#ifndef arch_atomic_try_cmpxchg_release -static __always_inline bool -arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) -{ - __atomic_release_fence(); - return arch_atomic_try_cmpxchg_relaxed(v, old, new); } -#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release -#endif -#ifndef arch_atomic_try_cmpxchg -static __always_inline bool -arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg -#endif - -#endif /* arch_atomic_try_cmpxchg_relaxed */ - -#ifndef arch_atomic_sub_and_test /** - * arch_atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t + * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. + * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic_sub_and_test(int i, atomic_t *v) +raw_atomic_sub_and_test(int i, atomic_t *v) { - return arch_atomic_sub_return(i, v) == 0; -} -#define arch_atomic_sub_and_test arch_atomic_sub_and_test +#if defined(arch_atomic_sub_and_test) + return arch_atomic_sub_and_test(i, v); +#else + return raw_atomic_sub_return(i, v) == 0; #endif +} -#ifndef arch_atomic_dec_and_test /** - * arch_atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t + * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic_t * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic_dec_and_test(atomic_t *v) +raw_atomic_dec_and_test(atomic_t *v) { - return arch_atomic_dec_return(v) == 0; -} -#define arch_atomic_dec_and_test arch_atomic_dec_and_test +#if defined(arch_atomic_dec_and_test) + return arch_atomic_dec_and_test(v); +#else + return raw_atomic_dec_return(v) == 0; #endif +} -#ifndef arch_atomic_inc_and_test /** - * arch_atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t + * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic_t * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic_inc_and_test(atomic_t *v) +raw_atomic_inc_and_test(atomic_t *v) { - return arch_atomic_inc_return(v) == 0; -} -#define arch_atomic_inc_and_test arch_atomic_inc_and_test +#if defined(arch_atomic_inc_and_test) + return arch_atomic_inc_and_test(v); +#else + return raw_atomic_inc_return(v) == 0; #endif +} -#ifndef arch_atomic_add_negative_relaxed -#ifdef arch_atomic_add_negative -#define arch_atomic_add_negative_acquire arch_atomic_add_negative -#define arch_atomic_add_negative_release arch_atomic_add_negative -#define arch_atomic_add_negative_relaxed arch_atomic_add_negative -#endif /* arch_atomic_add_negative */ - -#ifndef arch_atomic_add_negative /** - * arch_atomic_add_negative - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t + * raw_atomic_add_negative() - atomic add and test if negative with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere. * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic_add_negative(int i, atomic_t *v) +raw_atomic_add_negative(int i, atomic_t *v) { - return arch_atomic_add_return(i, v) < 0; -} -#define arch_atomic_add_negative arch_atomic_add_negative +#if defined(arch_atomic_add_negative) + return arch_atomic_add_negative(i, v); +#elif defined(arch_atomic_add_negative_relaxed) + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_negative_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic_add_return(i, v) < 0; #endif +} -#ifndef arch_atomic_add_negative_acquire /** - * arch_atomic_add_negative_acquire - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t + * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic_add_negative_acquire(int i, atomic_t *v) +raw_atomic_add_negative_acquire(int i, atomic_t *v) { - return arch_atomic_add_return_acquire(i, v) < 0; -} -#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire +#if defined(arch_atomic_add_negative_acquire) + return arch_atomic_add_negative_acquire(i, v); +#elif defined(arch_atomic_add_negative_relaxed) + bool ret = arch_atomic_add_negative_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic_add_negative) + return arch_atomic_add_negative(i, v); +#else + return raw_atomic_add_return_acquire(i, v) < 0; #endif +} -#ifndef arch_atomic_add_negative_release /** - * arch_atomic_add_negative_release - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t + * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere. * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic_add_negative_release(int i, atomic_t *v) +raw_atomic_add_negative_release(int i, atomic_t *v) { - return arch_atomic_add_return_release(i, v) < 0; -} -#define arch_atomic_add_negative_release arch_atomic_add_negative_release +#if defined(arch_atomic_add_negative_release) + return arch_atomic_add_negative_release(i, v); +#elif defined(arch_atomic_add_negative_relaxed) + __atomic_release_fence(); + return arch_atomic_add_negative_relaxed(i, v); +#elif defined(arch_atomic_add_negative) + return arch_atomic_add_negative(i, v); +#else + return raw_atomic_add_return_release(i, v) < 0; #endif +} -#ifndef arch_atomic_add_negative_relaxed /** - * arch_atomic_add_negative_relaxed - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic_t + * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic_add_negative_relaxed(int i, atomic_t *v) -{ - return arch_atomic_add_return_relaxed(i, v) < 0; -} -#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed -#endif - -#else /* arch_atomic_add_negative_relaxed */ - -#ifndef arch_atomic_add_negative_acquire -static __always_inline bool -arch_atomic_add_negative_acquire(int i, atomic_t *v) +raw_atomic_add_negative_relaxed(int i, atomic_t *v) { - bool ret = arch_atomic_add_negative_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire -#endif - -#ifndef arch_atomic_add_negative_release -static __always_inline bool -arch_atomic_add_negative_release(int i, atomic_t *v) -{ - __atomic_release_fence(); +#if defined(arch_atomic_add_negative_relaxed) return arch_atomic_add_negative_relaxed(i, v); -} -#define arch_atomic_add_negative_release arch_atomic_add_negative_release +#elif defined(arch_atomic_add_negative) + return arch_atomic_add_negative(i, v); +#else + return raw_atomic_add_return_relaxed(i, v) < 0; #endif - -#ifndef arch_atomic_add_negative -static __always_inline bool -arch_atomic_add_negative(int i, atomic_t *v) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic_add_negative_relaxed(i, v); - __atomic_post_full_fence(); - return ret; } -#define arch_atomic_add_negative arch_atomic_add_negative -#endif -#endif /* arch_atomic_add_negative_relaxed */ - -#ifndef arch_atomic_fetch_add_unless /** - * arch_atomic_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. + * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_t + * @a: int value to add + * @u: int value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere. * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v + * Return: The original value of @v. */ static __always_inline int -arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +raw_atomic_fetch_add_unless(atomic_t *v, int a, int u) { - int c = arch_atomic_read(v); +#if defined(arch_atomic_fetch_add_unless) + return arch_atomic_fetch_add_unless(v, a, u); +#else + int c = raw_atomic_read(v); do { if (unlikely(c == u)) break; - } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + } while (!raw_atomic_try_cmpxchg(v, &c, c + a)); return c; -} -#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #endif +} -#ifndef arch_atomic_add_unless /** - * arch_atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. + * raw_atomic_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_t + * @a: int value to add + * @u: int value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere. * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. + * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool -arch_atomic_add_unless(atomic_t *v, int a, int u) +raw_atomic_add_unless(atomic_t *v, int a, int u) { - return arch_atomic_fetch_add_unless(v, a, u) != u; -} -#define arch_atomic_add_unless arch_atomic_add_unless +#if defined(arch_atomic_add_unless) + return arch_atomic_add_unless(v, a, u); +#else + return raw_atomic_fetch_add_unless(v, a, u) != u; #endif +} -#ifndef arch_atomic_inc_not_zero /** - * arch_atomic_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic_t + * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. + * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool -arch_atomic_inc_not_zero(atomic_t *v) +raw_atomic_inc_not_zero(atomic_t *v) { - return arch_atomic_add_unless(v, 1, 0); -} -#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero +#if defined(arch_atomic_inc_not_zero) + return arch_atomic_inc_not_zero(v); +#else + return raw_atomic_add_unless(v, 1, 0); #endif +} -#ifndef arch_atomic_inc_unless_negative +/** + * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_inc_unless_negative(atomic_t *v) +raw_atomic_inc_unless_negative(atomic_t *v) { - int c = arch_atomic_read(v); +#if defined(arch_atomic_inc_unless_negative) + return arch_atomic_inc_unless_negative(v); +#else + int c = raw_atomic_read(v); do { if (unlikely(c < 0)) return false; - } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + } while (!raw_atomic_try_cmpxchg(v, &c, c + 1)); return true; -} -#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative #endif +} -#ifndef arch_atomic_dec_unless_positive +/** + * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_dec_unless_positive(atomic_t *v) +raw_atomic_dec_unless_positive(atomic_t *v) { - int c = arch_atomic_read(v); +#if defined(arch_atomic_dec_unless_positive) + return arch_atomic_dec_unless_positive(v); +#else + int c = raw_atomic_read(v); do { if (unlikely(c > 0)) return false; - } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + } while (!raw_atomic_try_cmpxchg(v, &c, c - 1)); return true; -} -#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive #endif +} -#ifndef arch_atomic_dec_if_positive +/** + * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline int -arch_atomic_dec_if_positive(atomic_t *v) +raw_atomic_dec_if_positive(atomic_t *v) { - int dec, c = arch_atomic_read(v); +#if defined(arch_atomic_dec_if_positive) + return arch_atomic_dec_if_positive(v); +#else + int dec, c = raw_atomic_read(v); do { dec = c - 1; if (unlikely(dec < 0)) break; - } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + } while (!raw_atomic_try_cmpxchg(v, &c, dec)); return dec; -} -#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive #endif +} #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif -#ifndef arch_atomic64_read_acquire +/** + * raw_atomic64_read() - atomic load with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_read() elsewhere. + * + * Return: The value loaded from @v. + */ static __always_inline s64 -arch_atomic64_read_acquire(const atomic64_t *v) +raw_atomic64_read(const atomic64_t *v) { + return arch_atomic64_read(v); +} + +/** + * raw_atomic64_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere. + * + * Return: The value loaded from @v. + */ +static __always_inline s64 +raw_atomic64_read_acquire(const atomic64_t *v) +{ +#if defined(arch_atomic64_read_acquire) + return arch_atomic64_read_acquire(v); +#elif defined(arch_atomic64_read) + return arch_atomic64_read(v); +#else s64 ret; if (__native_word(atomic64_t)) { ret = smp_load_acquire(&(v)->counter); } else { - ret = arch_atomic64_read(v); + ret = raw_atomic64_read(v); __atomic_acquire_fence(); } return ret; -} -#define arch_atomic64_read_acquire arch_atomic64_read_acquire #endif +} -#ifndef arch_atomic64_set_release +/** + * raw_atomic64_set() - atomic set with relaxed ordering + * @v: pointer to atomic64_t + * @i: s64 value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_set() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_set(atomic64_t *v, s64 i) +{ + arch_atomic64_set(v, i); +} + +/** + * raw_atomic64_set_release() - atomic set with release ordering + * @v: pointer to atomic64_t + * @i: s64 value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic64_set_release(atomic64_t *v, s64 i) +raw_atomic64_set_release(atomic64_t *v, s64 i) { +#if defined(arch_atomic64_set_release) + arch_atomic64_set_release(v, i); +#elif defined(arch_atomic64_set) + arch_atomic64_set(v, i); +#else if (__native_word(atomic64_t)) { smp_store_release(&(v)->counter, i); } else { __atomic_release_fence(); - arch_atomic64_set(v, i); + raw_atomic64_set(v, i); } -} -#define arch_atomic64_set_release arch_atomic64_set_release #endif - -#ifndef arch_atomic64_add_return_relaxed -#define arch_atomic64_add_return_acquire arch_atomic64_add_return -#define arch_atomic64_add_return_release arch_atomic64_add_return -#define arch_atomic64_add_return_relaxed arch_atomic64_add_return -#else /* arch_atomic64_add_return_relaxed */ - -#ifndef arch_atomic64_add_return_acquire -static __always_inline s64 -arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) -{ - s64 ret = arch_atomic64_add_return_relaxed(i, v); - __atomic_acquire_fence(); - return ret; } -#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire -#endif -#ifndef arch_atomic64_add_return_release -static __always_inline s64 -arch_atomic64_add_return_release(s64 i, atomic64_t *v) +/** + * raw_atomic64_add() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_add() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_add(s64 i, atomic64_t *v) { - __atomic_release_fence(); - return arch_atomic64_add_return_relaxed(i, v); + arch_atomic64_add(i, v); } -#define arch_atomic64_add_return_release arch_atomic64_add_return_release -#endif -#ifndef arch_atomic64_add_return +/** + * raw_atomic64_add_return() - atomic add with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_add_return(s64 i, atomic64_t *v) +raw_atomic64_add_return(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_add_return) + return arch_atomic64_add_return(i, v); +#elif defined(arch_atomic64_add_return_relaxed) s64 ret; __atomic_pre_full_fence(); ret = arch_atomic64_add_return_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_add_return arch_atomic64_add_return +#else +#error "Unable to define raw_atomic64_add_return" #endif +} -#endif /* arch_atomic64_add_return_relaxed */ - -#ifndef arch_atomic64_fetch_add_relaxed -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add -#else /* arch_atomic64_fetch_add_relaxed */ - -#ifndef arch_atomic64_fetch_add_acquire +/** + * raw_atomic64_add_return_acquire() - atomic add with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +raw_atomic64_add_return_acquire(s64 i, atomic64_t *v) { - s64 ret = arch_atomic64_fetch_add_relaxed(i, v); +#if defined(arch_atomic64_add_return_acquire) + return arch_atomic64_add_return_acquire(i, v); +#elif defined(arch_atomic64_add_return_relaxed) + s64 ret = arch_atomic64_add_return_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#elif defined(arch_atomic64_add_return) + return arch_atomic64_add_return(i, v); +#else +#error "Unable to define raw_atomic64_add_return_acquire" #endif +} -#ifndef arch_atomic64_fetch_add_release +/** + * raw_atomic64_add_return_release() - atomic add with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) +raw_atomic64_add_return_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_add_return_release) + return arch_atomic64_add_return_release(i, v); +#elif defined(arch_atomic64_add_return_relaxed) __atomic_release_fence(); - return arch_atomic64_fetch_add_relaxed(i, v); + return arch_atomic64_add_return_relaxed(i, v); +#elif defined(arch_atomic64_add_return) + return arch_atomic64_add_return(i, v); +#else +#error "Unable to define raw_atomic64_add_return_release" +#endif } -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release + +/** + * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline s64 +raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_add_return_relaxed) + return arch_atomic64_add_return_relaxed(i, v); +#elif defined(arch_atomic64_add_return) + return arch_atomic64_add_return(i, v); +#else +#error "Unable to define raw_atomic64_add_return_relaxed" #endif +} -#ifndef arch_atomic64_fetch_add +/** + * raw_atomic64_fetch_add() - atomic add with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_add(s64 i, atomic64_t *v) +raw_atomic64_fetch_add(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_add) + return arch_atomic64_fetch_add(i, v); +#elif defined(arch_atomic64_fetch_add_relaxed) s64 ret; __atomic_pre_full_fence(); ret = arch_atomic64_fetch_add_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#else +#error "Unable to define raw_atomic64_fetch_add" #endif +} -#endif /* arch_atomic64_fetch_add_relaxed */ - -#ifndef arch_atomic64_sub_return_relaxed -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return -#define arch_atomic64_sub_return_release arch_atomic64_sub_return -#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return -#else /* arch_atomic64_sub_return_relaxed */ - -#ifndef arch_atomic64_sub_return_acquire +/** + * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) +raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - s64 ret = arch_atomic64_sub_return_relaxed(i, v); +#if defined(arch_atomic64_fetch_add_acquire) + return arch_atomic64_fetch_add_acquire(i, v); +#elif defined(arch_atomic64_fetch_add_relaxed) + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#elif defined(arch_atomic64_fetch_add) + return arch_atomic64_fetch_add(i, v); +#else +#error "Unable to define raw_atomic64_fetch_add_acquire" #endif +} -#ifndef arch_atomic64_sub_return_release +/** + * raw_atomic64_fetch_add_release() - atomic add with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_sub_return_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_add_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_add_release) + return arch_atomic64_fetch_add_release(i, v); +#elif defined(arch_atomic64_fetch_add_relaxed) __atomic_release_fence(); - return arch_atomic64_sub_return_relaxed(i, v); + return arch_atomic64_fetch_add_relaxed(i, v); +#elif defined(arch_atomic64_fetch_add) + return arch_atomic64_fetch_add(i, v); +#else +#error "Unable to define raw_atomic64_fetch_add_release" +#endif } -#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release + +/** + * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline s64 +raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_fetch_add_relaxed) + return arch_atomic64_fetch_add_relaxed(i, v); +#elif defined(arch_atomic64_fetch_add) + return arch_atomic64_fetch_add(i, v); +#else +#error "Unable to define raw_atomic64_fetch_add_relaxed" #endif +} + +/** + * raw_atomic64_sub() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_sub(s64 i, atomic64_t *v) +{ + arch_atomic64_sub(i, v); +} -#ifndef arch_atomic64_sub_return +/** + * raw_atomic64_sub_return() - atomic subtract with full ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_sub_return(s64 i, atomic64_t *v) +raw_atomic64_sub_return(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_sub_return) + return arch_atomic64_sub_return(i, v); +#elif defined(arch_atomic64_sub_return_relaxed) s64 ret; __atomic_pre_full_fence(); ret = arch_atomic64_sub_return_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_sub_return arch_atomic64_sub_return +#else +#error "Unable to define raw_atomic64_sub_return" #endif +} -#endif /* arch_atomic64_sub_return_relaxed */ - -#ifndef arch_atomic64_fetch_sub_relaxed -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub -#else /* arch_atomic64_fetch_sub_relaxed */ - -#ifndef arch_atomic64_fetch_sub_acquire +/** + * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); +#if defined(arch_atomic64_sub_return_acquire) + return arch_atomic64_sub_return_acquire(i, v); +#elif defined(arch_atomic64_sub_return_relaxed) + s64 ret = arch_atomic64_sub_return_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#elif defined(arch_atomic64_sub_return) + return arch_atomic64_sub_return(i, v); +#else +#error "Unable to define raw_atomic64_sub_return_acquire" #endif +} -#ifndef arch_atomic64_fetch_sub_release +/** + * raw_atomic64_sub_return_release() - atomic subtract with release ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) +raw_atomic64_sub_return_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_sub_return_release) + return arch_atomic64_sub_return_release(i, v); +#elif defined(arch_atomic64_sub_return_relaxed) __atomic_release_fence(); - return arch_atomic64_fetch_sub_relaxed(i, v); + return arch_atomic64_sub_return_relaxed(i, v); +#elif defined(arch_atomic64_sub_return) + return arch_atomic64_sub_return(i, v); +#else +#error "Unable to define raw_atomic64_sub_return_release" +#endif } -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release + +/** + * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline s64 +raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_sub_return_relaxed) + return arch_atomic64_sub_return_relaxed(i, v); +#elif defined(arch_atomic64_sub_return) + return arch_atomic64_sub_return(i, v); +#else +#error "Unable to define raw_atomic64_sub_return_relaxed" #endif +} -#ifndef arch_atomic64_fetch_sub +/** + * raw_atomic64_fetch_sub() - atomic subtract with full ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +raw_atomic64_fetch_sub(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_sub) + return arch_atomic64_fetch_sub(i, v); +#elif defined(arch_atomic64_fetch_sub_relaxed) s64 ret; __atomic_pre_full_fence(); ret = arch_atomic64_fetch_sub_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#else +#error "Unable to define raw_atomic64_fetch_sub" #endif - -#endif /* arch_atomic64_fetch_sub_relaxed */ - -#ifndef arch_atomic64_inc -static __always_inline void -arch_atomic64_inc(atomic64_t *v) -{ - arch_atomic64_add(1, v); } -#define arch_atomic64_inc arch_atomic64_inc -#endif - -#ifndef arch_atomic64_inc_return_relaxed -#ifdef arch_atomic64_inc_return -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return -#define arch_atomic64_inc_return_release arch_atomic64_inc_return -#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return -#endif /* arch_atomic64_inc_return */ -#ifndef arch_atomic64_inc_return +/** + * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_inc_return(atomic64_t *v) +raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - return arch_atomic64_add_return(1, v); -} -#define arch_atomic64_inc_return arch_atomic64_inc_return +#if defined(arch_atomic64_fetch_sub_acquire) + return arch_atomic64_fetch_sub_acquire(i, v); +#elif defined(arch_atomic64_fetch_sub_relaxed) + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic64_fetch_sub) + return arch_atomic64_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic64_fetch_sub_acquire" #endif - -#ifndef arch_atomic64_inc_return_acquire -static __always_inline s64 -arch_atomic64_inc_return_acquire(atomic64_t *v) -{ - return arch_atomic64_add_return_acquire(1, v); } -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire -#endif -#ifndef arch_atomic64_inc_return_release +/** + * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_inc_return_release(atomic64_t *v) +raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - return arch_atomic64_add_return_release(1, v); -} -#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#if defined(arch_atomic64_fetch_sub_release) + return arch_atomic64_fetch_sub_release(i, v); +#elif defined(arch_atomic64_fetch_sub_relaxed) + __atomic_release_fence(); + return arch_atomic64_fetch_sub_relaxed(i, v); +#elif defined(arch_atomic64_fetch_sub) + return arch_atomic64_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic64_fetch_sub_release" #endif - -#ifndef arch_atomic64_inc_return_relaxed -static __always_inline s64 -arch_atomic64_inc_return_relaxed(atomic64_t *v) -{ - return arch_atomic64_add_return_relaxed(1, v); } -#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed -#endif -#else /* arch_atomic64_inc_return_relaxed */ - -#ifndef arch_atomic64_inc_return_acquire +/** + * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_inc_return_acquire(atomic64_t *v) +raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - s64 ret = arch_atomic64_inc_return_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#if defined(arch_atomic64_fetch_sub_relaxed) + return arch_atomic64_fetch_sub_relaxed(i, v); +#elif defined(arch_atomic64_fetch_sub) + return arch_atomic64_fetch_sub(i, v); +#else +#error "Unable to define raw_atomic64_fetch_sub_relaxed" #endif +} -#ifndef arch_atomic64_inc_return_release -static __always_inline s64 -arch_atomic64_inc_return_release(atomic64_t *v) +/** + * raw_atomic64_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_inc(atomic64_t *v) { - __atomic_release_fence(); - return arch_atomic64_inc_return_relaxed(v); -} -#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#if defined(arch_atomic64_inc) + arch_atomic64_inc(v); +#else + raw_atomic64_add(1, v); #endif +} -#ifndef arch_atomic64_inc_return +/** + * raw_atomic64_inc_return() - atomic increment with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_inc_return(atomic64_t *v) +raw_atomic64_inc_return(atomic64_t *v) { +#if defined(arch_atomic64_inc_return) + return arch_atomic64_inc_return(v); +#elif defined(arch_atomic64_inc_return_relaxed) s64 ret; __atomic_pre_full_fence(); ret = arch_atomic64_inc_return_relaxed(v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_inc_return arch_atomic64_inc_return +#else + return raw_atomic64_add_return(1, v); #endif +} -#endif /* arch_atomic64_inc_return_relaxed */ - -#ifndef arch_atomic64_fetch_inc_relaxed -#ifdef arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc -#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc -#endif /* arch_atomic64_fetch_inc */ - -#ifndef arch_atomic64_fetch_inc +/** + * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc(atomic64_t *v) +raw_atomic64_inc_return_acquire(atomic64_t *v) { - return arch_atomic64_fetch_add(1, v); -} -#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#if defined(arch_atomic64_inc_return_acquire) + return arch_atomic64_inc_return_acquire(v); +#elif defined(arch_atomic64_inc_return_relaxed) + s64 ret = arch_atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic64_inc_return) + return arch_atomic64_inc_return(v); +#else + return raw_atomic64_add_return_acquire(1, v); #endif +} -#ifndef arch_atomic64_fetch_inc_acquire +/** + * raw_atomic64_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc_acquire(atomic64_t *v) +raw_atomic64_inc_return_release(atomic64_t *v) { - return arch_atomic64_fetch_add_acquire(1, v); -} -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#if defined(arch_atomic64_inc_return_release) + return arch_atomic64_inc_return_release(v); +#elif defined(arch_atomic64_inc_return_relaxed) + __atomic_release_fence(); + return arch_atomic64_inc_return_relaxed(v); +#elif defined(arch_atomic64_inc_return) + return arch_atomic64_inc_return(v); +#else + return raw_atomic64_add_return_release(1, v); #endif +} -#ifndef arch_atomic64_fetch_inc_release +/** + * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc_release(atomic64_t *v) +raw_atomic64_inc_return_relaxed(atomic64_t *v) { - return arch_atomic64_fetch_add_release(1, v); -} -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#if defined(arch_atomic64_inc_return_relaxed) + return arch_atomic64_inc_return_relaxed(v); +#elif defined(arch_atomic64_inc_return) + return arch_atomic64_inc_return(v); +#else + return raw_atomic64_add_return_relaxed(1, v); #endif +} -#ifndef arch_atomic64_fetch_inc_relaxed +/** + * raw_atomic64_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +raw_atomic64_fetch_inc(atomic64_t *v) { - return arch_atomic64_fetch_add_relaxed(1, v); -} -#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed +#if defined(arch_atomic64_fetch_inc) + return arch_atomic64_fetch_inc(v); +#elif defined(arch_atomic64_fetch_inc_relaxed) + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic64_fetch_add(1, v); #endif +} -#else /* arch_atomic64_fetch_inc_relaxed */ - -#ifndef arch_atomic64_fetch_inc_acquire +/** + * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc_acquire(atomic64_t *v) +raw_atomic64_fetch_inc_acquire(atomic64_t *v) { +#if defined(arch_atomic64_fetch_inc_acquire) + return arch_atomic64_fetch_inc_acquire(v); +#elif defined(arch_atomic64_fetch_inc_relaxed) s64 ret = arch_atomic64_fetch_inc_relaxed(v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#elif defined(arch_atomic64_fetch_inc) + return arch_atomic64_fetch_inc(v); +#else + return raw_atomic64_fetch_add_acquire(1, v); #endif +} -#ifndef arch_atomic64_fetch_inc_release +/** + * raw_atomic64_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc_release(atomic64_t *v) +raw_atomic64_fetch_inc_release(atomic64_t *v) { +#if defined(arch_atomic64_fetch_inc_release) + return arch_atomic64_fetch_inc_release(v); +#elif defined(arch_atomic64_fetch_inc_relaxed) __atomic_release_fence(); return arch_atomic64_fetch_inc_relaxed(v); -} -#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#elif defined(arch_atomic64_fetch_inc) + return arch_atomic64_fetch_inc(v); +#else + return raw_atomic64_fetch_add_release(1, v); #endif +} -#ifndef arch_atomic64_fetch_inc +/** + * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_inc(atomic64_t *v) +raw_atomic64_fetch_inc_relaxed(atomic64_t *v) { - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_inc_relaxed(v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#if defined(arch_atomic64_fetch_inc_relaxed) + return arch_atomic64_fetch_inc_relaxed(v); +#elif defined(arch_atomic64_fetch_inc) + return arch_atomic64_fetch_inc(v); +#else + return raw_atomic64_fetch_add_relaxed(1, v); #endif - -#endif /* arch_atomic64_fetch_inc_relaxed */ - -#ifndef arch_atomic64_dec -static __always_inline void -arch_atomic64_dec(atomic64_t *v) -{ - arch_atomic64_sub(1, v); } -#define arch_atomic64_dec arch_atomic64_dec -#endif - -#ifndef arch_atomic64_dec_return_relaxed -#ifdef arch_atomic64_dec_return -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return -#define arch_atomic64_dec_return_release arch_atomic64_dec_return -#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return -#endif /* arch_atomic64_dec_return */ -#ifndef arch_atomic64_dec_return -static __always_inline s64 -arch_atomic64_dec_return(atomic64_t *v) +/** + * raw_atomic64_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_dec(atomic64_t *v) { - return arch_atomic64_sub_return(1, v); -} -#define arch_atomic64_dec_return arch_atomic64_dec_return +#if defined(arch_atomic64_dec) + arch_atomic64_dec(v); +#else + raw_atomic64_sub(1, v); #endif - -#ifndef arch_atomic64_dec_return_acquire -static __always_inline s64 -arch_atomic64_dec_return_acquire(atomic64_t *v) -{ - return arch_atomic64_sub_return_acquire(1, v); } -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire -#endif -#ifndef arch_atomic64_dec_return_release +/** + * raw_atomic64_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_dec_return_release(atomic64_t *v) +raw_atomic64_dec_return(atomic64_t *v) { - return arch_atomic64_sub_return_release(1, v); -} -#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#if defined(arch_atomic64_dec_return) + return arch_atomic64_dec_return(v); +#elif defined(arch_atomic64_dec_return_relaxed) + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic64_sub_return(1, v); #endif - -#ifndef arch_atomic64_dec_return_relaxed -static __always_inline s64 -arch_atomic64_dec_return_relaxed(atomic64_t *v) -{ - return arch_atomic64_sub_return_relaxed(1, v); } -#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed -#endif - -#else /* arch_atomic64_dec_return_relaxed */ -#ifndef arch_atomic64_dec_return_acquire +/** + * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_dec_return_acquire(atomic64_t *v) +raw_atomic64_dec_return_acquire(atomic64_t *v) { +#if defined(arch_atomic64_dec_return_acquire) + return arch_atomic64_dec_return_acquire(v); +#elif defined(arch_atomic64_dec_return_relaxed) s64 ret = arch_atomic64_dec_return_relaxed(v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#elif defined(arch_atomic64_dec_return) + return arch_atomic64_dec_return(v); +#else + return raw_atomic64_sub_return_acquire(1, v); #endif +} -#ifndef arch_atomic64_dec_return_release +/** + * raw_atomic64_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline s64 -arch_atomic64_dec_return_release(atomic64_t *v) +raw_atomic64_dec_return_release(atomic64_t *v) { +#if defined(arch_atomic64_dec_return_release) + return arch_atomic64_dec_return_release(v); +#elif defined(arch_atomic64_dec_return_relaxed) __atomic_release_fence(); return arch_atomic64_dec_return_relaxed(v); +#elif defined(arch_atomic64_dec_return) + return arch_atomic64_dec_return(v); +#else + return raw_atomic64_sub_return_release(1, v); +#endif } -#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release + +/** + * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ +static __always_inline s64 +raw_atomic64_dec_return_relaxed(atomic64_t *v) +{ +#if defined(arch_atomic64_dec_return_relaxed) + return arch_atomic64_dec_return_relaxed(v); +#elif defined(arch_atomic64_dec_return) + return arch_atomic64_dec_return(v); +#else + return raw_atomic64_sub_return_relaxed(1, v); #endif +} -#ifndef arch_atomic64_dec_return +/** + * raw_atomic64_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_dec_return(atomic64_t *v) +raw_atomic64_fetch_dec(atomic64_t *v) { +#if defined(arch_atomic64_fetch_dec) + return arch_atomic64_fetch_dec(v); +#elif defined(arch_atomic64_fetch_dec_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_dec_return_relaxed(v); + ret = arch_atomic64_fetch_dec_relaxed(v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_dec_return arch_atomic64_dec_return +#else + return raw_atomic64_fetch_sub(1, v); #endif - -#endif /* arch_atomic64_dec_return_relaxed */ - -#ifndef arch_atomic64_fetch_dec_relaxed -#ifdef arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec -#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec -#endif /* arch_atomic64_fetch_dec */ - -#ifndef arch_atomic64_fetch_dec -static __always_inline s64 -arch_atomic64_fetch_dec(atomic64_t *v) -{ - return arch_atomic64_fetch_sub(1, v); } -#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec -#endif -#ifndef arch_atomic64_fetch_dec_acquire +/** + * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_dec_acquire(atomic64_t *v) +raw_atomic64_fetch_dec_acquire(atomic64_t *v) { - return arch_atomic64_fetch_sub_acquire(1, v); -} -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#if defined(arch_atomic64_fetch_dec_acquire) + return arch_atomic64_fetch_dec_acquire(v); +#elif defined(arch_atomic64_fetch_dec_relaxed) + s64 ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic64_fetch_dec) + return arch_atomic64_fetch_dec(v); +#else + return raw_atomic64_fetch_sub_acquire(1, v); #endif - -#ifndef arch_atomic64_fetch_dec_release -static __always_inline s64 -arch_atomic64_fetch_dec_release(atomic64_t *v) -{ - return arch_atomic64_fetch_sub_release(1, v); } -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release -#endif -#ifndef arch_atomic64_fetch_dec_relaxed +/** + * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +raw_atomic64_fetch_dec_release(atomic64_t *v) { - return arch_atomic64_fetch_sub_relaxed(1, v); -} -#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed +#if defined(arch_atomic64_fetch_dec_release) + return arch_atomic64_fetch_dec_release(v); +#elif defined(arch_atomic64_fetch_dec_relaxed) + __atomic_release_fence(); + return arch_atomic64_fetch_dec_relaxed(v); +#elif defined(arch_atomic64_fetch_dec) + return arch_atomic64_fetch_dec(v); +#else + return raw_atomic64_fetch_sub_release(1, v); #endif +} -#else /* arch_atomic64_fetch_dec_relaxed */ - -#ifndef arch_atomic64_fetch_dec_acquire +/** + * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_dec_acquire(atomic64_t *v) +raw_atomic64_fetch_dec_relaxed(atomic64_t *v) { - s64 ret = arch_atomic64_fetch_dec_relaxed(v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#if defined(arch_atomic64_fetch_dec_relaxed) + return arch_atomic64_fetch_dec_relaxed(v); +#elif defined(arch_atomic64_fetch_dec) + return arch_atomic64_fetch_dec(v); +#else + return raw_atomic64_fetch_sub_relaxed(1, v); #endif +} -#ifndef arch_atomic64_fetch_dec_release -static __always_inline s64 -arch_atomic64_fetch_dec_release(atomic64_t *v) +/** + * raw_atomic64_and() - atomic bitwise AND with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_and() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_and(s64 i, atomic64_t *v) { - __atomic_release_fence(); - return arch_atomic64_fetch_dec_relaxed(v); + arch_atomic64_and(i, v); } -#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release -#endif -#ifndef arch_atomic64_fetch_dec +/** + * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_dec(atomic64_t *v) +raw_atomic64_fetch_and(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_and) + return arch_atomic64_fetch_and(i, v); +#elif defined(arch_atomic64_fetch_and_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_dec_relaxed(v); + ret = arch_atomic64_fetch_and_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#else +#error "Unable to define raw_atomic64_fetch_and" #endif +} -#endif /* arch_atomic64_fetch_dec_relaxed */ - -#ifndef arch_atomic64_fetch_and_relaxed -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and -#else /* arch_atomic64_fetch_and_relaxed */ - -#ifndef arch_atomic64_fetch_and_acquire +/** + * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_and_acquire) + return arch_atomic64_fetch_and_acquire(i, v); +#elif defined(arch_atomic64_fetch_and_relaxed) s64 ret = arch_atomic64_fetch_and_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#elif defined(arch_atomic64_fetch_and) + return arch_atomic64_fetch_and(i, v); +#else +#error "Unable to define raw_atomic64_fetch_and_acquire" #endif +} -#ifndef arch_atomic64_fetch_and_release +/** + * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_and_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_and_release) + return arch_atomic64_fetch_and_release(i, v); +#elif defined(arch_atomic64_fetch_and_relaxed) __atomic_release_fence(); return arch_atomic64_fetch_and_relaxed(i, v); -} -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#elif defined(arch_atomic64_fetch_and) + return arch_atomic64_fetch_and(i, v); +#else +#error "Unable to define raw_atomic64_fetch_and_release" #endif +} -#ifndef arch_atomic64_fetch_and +/** + * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_and(s64 i, atomic64_t *v) +raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_and_relaxed(i, v); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#if defined(arch_atomic64_fetch_and_relaxed) + return arch_atomic64_fetch_and_relaxed(i, v); +#elif defined(arch_atomic64_fetch_and) + return arch_atomic64_fetch_and(i, v); +#else +#error "Unable to define raw_atomic64_fetch_and_relaxed" #endif - -#endif /* arch_atomic64_fetch_and_relaxed */ - -#ifndef arch_atomic64_andnot -static __always_inline void -arch_atomic64_andnot(s64 i, atomic64_t *v) -{ - arch_atomic64_and(~i, v); } -#define arch_atomic64_andnot arch_atomic64_andnot -#endif -#ifndef arch_atomic64_fetch_andnot_relaxed -#ifdef arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot -#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot -#endif /* arch_atomic64_fetch_andnot */ - -#ifndef arch_atomic64_fetch_andnot -static __always_inline s64 -arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +/** + * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_andnot(s64 i, atomic64_t *v) { - return arch_atomic64_fetch_and(~i, v); -} -#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#if defined(arch_atomic64_andnot) + arch_atomic64_andnot(i, v); +#else + raw_atomic64_and(~i, v); #endif - -#ifndef arch_atomic64_fetch_andnot_acquire -static __always_inline s64 -arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and_acquire(~i, v); } -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire -#endif -#ifndef arch_atomic64_fetch_andnot_release +/** + * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_andnot(s64 i, atomic64_t *v) { - return arch_atomic64_fetch_and_release(~i, v); -} -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#if defined(arch_atomic64_fetch_andnot) + return arch_atomic64_fetch_andnot(i, v); +#elif defined(arch_atomic64_fetch_andnot_relaxed) + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic64_fetch_and(~i, v); #endif - -#ifndef arch_atomic64_fetch_andnot_relaxed -static __always_inline s64 -arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) -{ - return arch_atomic64_fetch_and_relaxed(~i, v); } -#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed -#endif - -#else /* arch_atomic64_fetch_andnot_relaxed */ -#ifndef arch_atomic64_fetch_andnot_acquire +/** + * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_andnot_acquire) + return arch_atomic64_fetch_andnot_acquire(i, v); +#elif defined(arch_atomic64_fetch_andnot_relaxed) s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#elif defined(arch_atomic64_fetch_andnot) + return arch_atomic64_fetch_andnot(i, v); +#else + return raw_atomic64_fetch_and_acquire(~i, v); #endif +} -#ifndef arch_atomic64_fetch_andnot_release +/** + * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_andnot_release) + return arch_atomic64_fetch_andnot_release(i, v); +#elif defined(arch_atomic64_fetch_andnot_relaxed) __atomic_release_fence(); return arch_atomic64_fetch_andnot_relaxed(i, v); +#elif defined(arch_atomic64_fetch_andnot) + return arch_atomic64_fetch_andnot(i, v); +#else + return raw_atomic64_fetch_and_release(~i, v); +#endif } -#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release + +/** + * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline s64 +raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_fetch_andnot_relaxed) + return arch_atomic64_fetch_andnot_relaxed(i, v); +#elif defined(arch_atomic64_fetch_andnot) + return arch_atomic64_fetch_andnot(i, v); +#else + return raw_atomic64_fetch_and_relaxed(~i, v); #endif +} -#ifndef arch_atomic64_fetch_andnot +/** + * raw_atomic64_or() - atomic bitwise OR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_or() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_or(s64 i, atomic64_t *v) +{ + arch_atomic64_or(i, v); +} + +/** + * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +raw_atomic64_fetch_or(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_or) + return arch_atomic64_fetch_or(i, v); +#elif defined(arch_atomic64_fetch_or_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_andnot_relaxed(i, v); + ret = arch_atomic64_fetch_or_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#else +#error "Unable to define raw_atomic64_fetch_or" #endif +} -#endif /* arch_atomic64_fetch_andnot_relaxed */ - -#ifndef arch_atomic64_fetch_or_relaxed -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or -#else /* arch_atomic64_fetch_or_relaxed */ - -#ifndef arch_atomic64_fetch_or_acquire +/** + * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_or_acquire) + return arch_atomic64_fetch_or_acquire(i, v); +#elif defined(arch_atomic64_fetch_or_relaxed) s64 ret = arch_atomic64_fetch_or_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#elif defined(arch_atomic64_fetch_or) + return arch_atomic64_fetch_or(i, v); +#else +#error "Unable to define raw_atomic64_fetch_or_acquire" #endif +} -#ifndef arch_atomic64_fetch_or_release +/** + * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_or_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_or_release) + return arch_atomic64_fetch_or_release(i, v); +#elif defined(arch_atomic64_fetch_or_relaxed) __atomic_release_fence(); return arch_atomic64_fetch_or_relaxed(i, v); +#elif defined(arch_atomic64_fetch_or) + return arch_atomic64_fetch_or(i, v); +#else +#error "Unable to define raw_atomic64_fetch_or_release" +#endif } -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release + +/** + * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline s64 +raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_fetch_or_relaxed) + return arch_atomic64_fetch_or_relaxed(i, v); +#elif defined(arch_atomic64_fetch_or) + return arch_atomic64_fetch_or(i, v); +#else +#error "Unable to define raw_atomic64_fetch_or_relaxed" #endif +} + +/** + * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_xor() elsewhere. + * + * Return: Nothing. + */ +static __always_inline void +raw_atomic64_xor(s64 i, atomic64_t *v) +{ + arch_atomic64_xor(i, v); +} -#ifndef arch_atomic64_fetch_or +/** + * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_or(s64 i, atomic64_t *v) +raw_atomic64_fetch_xor(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_xor) + return arch_atomic64_fetch_xor(i, v); +#elif defined(arch_atomic64_fetch_xor_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_or_relaxed(i, v); + ret = arch_atomic64_fetch_xor_relaxed(i, v); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#else +#error "Unable to define raw_atomic64_fetch_xor" #endif +} -#endif /* arch_atomic64_fetch_or_relaxed */ - -#ifndef arch_atomic64_fetch_xor_relaxed -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor -#else /* arch_atomic64_fetch_xor_relaxed */ - -#ifndef arch_atomic64_fetch_xor_acquire +/** + * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_xor_acquire) + return arch_atomic64_fetch_xor_acquire(i, v); +#elif defined(arch_atomic64_fetch_xor_relaxed) s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#elif defined(arch_atomic64_fetch_xor) + return arch_atomic64_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic64_fetch_xor_acquire" #endif +} -#ifndef arch_atomic64_fetch_xor_release +/** + * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) +raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v) { +#if defined(arch_atomic64_fetch_xor_release) + return arch_atomic64_fetch_xor_release(i, v); +#elif defined(arch_atomic64_fetch_xor_relaxed) __atomic_release_fence(); return arch_atomic64_fetch_xor_relaxed(i, v); +#elif defined(arch_atomic64_fetch_xor) + return arch_atomic64_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic64_fetch_xor_release" +#endif } -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release + +/** + * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline s64 +raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ +#if defined(arch_atomic64_fetch_xor_relaxed) + return arch_atomic64_fetch_xor_relaxed(i, v); +#elif defined(arch_atomic64_fetch_xor) + return arch_atomic64_fetch_xor(i, v); +#else +#error "Unable to define raw_atomic64_fetch_xor_relaxed" #endif +} -#ifndef arch_atomic64_fetch_xor +/** + * raw_atomic64_xchg() - atomic exchange with full ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +raw_atomic64_xchg(atomic64_t *v, s64 new) { +#if defined(arch_atomic64_xchg) + return arch_atomic64_xchg(v, new); +#elif defined(arch_atomic64_xchg_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_fetch_xor_relaxed(i, v); + ret = arch_atomic64_xchg_relaxed(v, new); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#else + return raw_xchg(&v->counter, new); #endif +} -#endif /* arch_atomic64_fetch_xor_relaxed */ - -#ifndef arch_atomic64_xchg_relaxed -#define arch_atomic64_xchg_acquire arch_atomic64_xchg -#define arch_atomic64_xchg_release arch_atomic64_xchg -#define arch_atomic64_xchg_relaxed arch_atomic64_xchg -#else /* arch_atomic64_xchg_relaxed */ - -#ifndef arch_atomic64_xchg_acquire +/** + * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) +raw_atomic64_xchg_acquire(atomic64_t *v, s64 new) { - s64 ret = arch_atomic64_xchg_relaxed(v, i); +#if defined(arch_atomic64_xchg_acquire) + return arch_atomic64_xchg_acquire(v, new); +#elif defined(arch_atomic64_xchg_relaxed) + s64 ret = arch_atomic64_xchg_relaxed(v, new); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire +#elif defined(arch_atomic64_xchg) + return arch_atomic64_xchg(v, new); +#else + return raw_xchg_acquire(&v->counter, new); #endif +} -#ifndef arch_atomic64_xchg_release +/** + * raw_atomic64_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_xchg_release(atomic64_t *v, s64 i) +raw_atomic64_xchg_release(atomic64_t *v, s64 new) { +#if defined(arch_atomic64_xchg_release) + return arch_atomic64_xchg_release(v, new); +#elif defined(arch_atomic64_xchg_relaxed) __atomic_release_fence(); - return arch_atomic64_xchg_relaxed(v, i); + return arch_atomic64_xchg_relaxed(v, new); +#elif defined(arch_atomic64_xchg) + return arch_atomic64_xchg(v, new); +#else + return raw_xchg_release(&v->counter, new); +#endif } -#define arch_atomic64_xchg_release arch_atomic64_xchg_release + +/** + * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ +static __always_inline s64 +raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new) +{ +#if defined(arch_atomic64_xchg_relaxed) + return arch_atomic64_xchg_relaxed(v, new); +#elif defined(arch_atomic64_xchg) + return arch_atomic64_xchg(v, new); +#else + return raw_xchg_relaxed(&v->counter, new); #endif +} -#ifndef arch_atomic64_xchg +/** + * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_xchg(atomic64_t *v, s64 i) +raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { +#if defined(arch_atomic64_cmpxchg) + return arch_atomic64_cmpxchg(v, old, new); +#elif defined(arch_atomic64_cmpxchg_relaxed) s64 ret; __atomic_pre_full_fence(); - ret = arch_atomic64_xchg_relaxed(v, i); + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); __atomic_post_full_fence(); return ret; -} -#define arch_atomic64_xchg arch_atomic64_xchg +#else + return raw_cmpxchg(&v->counter, old, new); #endif +} -#endif /* arch_atomic64_xchg_relaxed */ - -#ifndef arch_atomic64_cmpxchg_relaxed -#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg -#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg -#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg -#else /* arch_atomic64_cmpxchg_relaxed */ - -#ifndef arch_atomic64_cmpxchg_acquire +/** + * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { +#if defined(arch_atomic64_cmpxchg_acquire) + return arch_atomic64_cmpxchg_acquire(v, old, new); +#elif defined(arch_atomic64_cmpxchg_relaxed) s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); __atomic_acquire_fence(); return ret; -} -#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire +#elif defined(arch_atomic64_cmpxchg) + return arch_atomic64_cmpxchg(v, old, new); +#else + return raw_cmpxchg_acquire(&v->counter, old, new); #endif +} -#ifndef arch_atomic64_cmpxchg_release +/** + * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { +#if defined(arch_atomic64_cmpxchg_release) + return arch_atomic64_cmpxchg_release(v, old, new); +#elif defined(arch_atomic64_cmpxchg_relaxed) __atomic_release_fence(); return arch_atomic64_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release +#elif defined(arch_atomic64_cmpxchg) + return arch_atomic64_cmpxchg(v, old, new); +#else + return raw_cmpxchg_release(&v->counter, old, new); #endif +} -#ifndef arch_atomic64_cmpxchg +/** + * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline s64 -arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - s64 ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; -} -#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +#if defined(arch_atomic64_cmpxchg_relaxed) + return arch_atomic64_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic64_cmpxchg) + return arch_atomic64_cmpxchg(v, old, new); +#else + return raw_cmpxchg_relaxed(&v->counter, old, new); #endif +} -#endif /* arch_atomic64_cmpxchg_relaxed */ - -#ifndef arch_atomic64_try_cmpxchg_relaxed -#ifdef arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg -#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg -#endif /* arch_atomic64_try_cmpxchg */ - -#ifndef arch_atomic64_try_cmpxchg +/** + * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { +#if defined(arch_atomic64_try_cmpxchg) + return arch_atomic64_try_cmpxchg(v, old, new); +#elif defined(arch_atomic64_try_cmpxchg_relaxed) + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +#else s64 r, o = *old; - r = arch_atomic64_cmpxchg(v, o, new); + r = raw_atomic64_cmpxchg(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg #endif +} -#ifndef arch_atomic64_try_cmpxchg_acquire +/** + * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { +#if defined(arch_atomic64_try_cmpxchg_acquire) + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +#elif defined(arch_atomic64_try_cmpxchg_relaxed) + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic64_try_cmpxchg) + return arch_atomic64_try_cmpxchg(v, old, new); +#else s64 r, o = *old; - r = arch_atomic64_cmpxchg_acquire(v, o, new); + r = raw_atomic64_cmpxchg_acquire(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire #endif +} -#ifndef arch_atomic64_try_cmpxchg_release +/** + * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { +#if defined(arch_atomic64_try_cmpxchg_release) + return arch_atomic64_try_cmpxchg_release(v, old, new); +#elif defined(arch_atomic64_try_cmpxchg_relaxed) + __atomic_release_fence(); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic64_try_cmpxchg) + return arch_atomic64_try_cmpxchg(v, old, new); +#else s64 r, o = *old; - r = arch_atomic64_cmpxchg_release(v, o, new); + r = raw_atomic64_cmpxchg_release(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release #endif +} -#ifndef arch_atomic64_try_cmpxchg_relaxed +/** + * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { +#if defined(arch_atomic64_try_cmpxchg_relaxed) + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +#elif defined(arch_atomic64_try_cmpxchg) + return arch_atomic64_try_cmpxchg(v, old, new); +#else s64 r, o = *old; - r = arch_atomic64_cmpxchg_relaxed(v, o, new); + r = raw_atomic64_cmpxchg_relaxed(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} -#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed -#endif - -#else /* arch_atomic64_try_cmpxchg_relaxed */ - -#ifndef arch_atomic64_try_cmpxchg_acquire -static __always_inline bool -arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) -{ - bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire -#endif - -#ifndef arch_atomic64_try_cmpxchg_release -static __always_inline bool -arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) -{ - __atomic_release_fence(); - return arch_atomic64_try_cmpxchg_relaxed(v, old, new); -} -#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release #endif - -#ifndef arch_atomic64_try_cmpxchg -static __always_inline bool -arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); - __atomic_post_full_fence(); - return ret; } -#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg -#endif -#endif /* arch_atomic64_try_cmpxchg_relaxed */ - -#ifndef arch_atomic64_sub_and_test /** - * arch_atomic64_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic64_t + * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere. * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +raw_atomic64_sub_and_test(s64 i, atomic64_t *v) { - return arch_atomic64_sub_return(i, v) == 0; -} -#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test +#if defined(arch_atomic64_sub_and_test) + return arch_atomic64_sub_and_test(i, v); +#else + return raw_atomic64_sub_return(i, v) == 0; #endif +} -#ifndef arch_atomic64_dec_and_test /** - * arch_atomic64_dec_and_test - decrement and test - * @v: pointer of type atomic64_t + * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere. * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic64_dec_and_test(atomic64_t *v) +raw_atomic64_dec_and_test(atomic64_t *v) { - return arch_atomic64_dec_return(v) == 0; -} -#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test +#if defined(arch_atomic64_dec_and_test) + return arch_atomic64_dec_and_test(v); +#else + return raw_atomic64_dec_return(v) == 0; #endif +} -#ifndef arch_atomic64_inc_and_test /** - * arch_atomic64_inc_and_test - increment and test - * @v: pointer of type atomic64_t + * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. + * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool -arch_atomic64_inc_and_test(atomic64_t *v) +raw_atomic64_inc_and_test(atomic64_t *v) { - return arch_atomic64_inc_return(v) == 0; -} -#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test +#if defined(arch_atomic64_inc_and_test) + return arch_atomic64_inc_and_test(v); +#else + return raw_atomic64_inc_return(v) == 0; #endif +} -#ifndef arch_atomic64_add_negative_relaxed -#ifdef arch_atomic64_add_negative -#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative -#define arch_atomic64_add_negative_release arch_atomic64_add_negative -#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative -#endif /* arch_atomic64_add_negative */ - -#ifndef arch_atomic64_add_negative /** - * arch_atomic64_add_negative - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t + * raw_atomic64_add_negative() - atomic add and test if negative with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere. * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic64_add_negative(s64 i, atomic64_t *v) +raw_atomic64_add_negative(s64 i, atomic64_t *v) { - return arch_atomic64_add_return(i, v) < 0; -} -#define arch_atomic64_add_negative arch_atomic64_add_negative +#if defined(arch_atomic64_add_negative) + return arch_atomic64_add_negative(i, v); +#elif defined(arch_atomic64_add_negative_relaxed) + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_negative_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +#else + return raw_atomic64_add_return(i, v) < 0; #endif +} -#ifndef arch_atomic64_add_negative_acquire /** - * arch_atomic64_add_negative_acquire - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t + * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) +raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v) { - return arch_atomic64_add_return_acquire(i, v) < 0; -} -#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire +#if defined(arch_atomic64_add_negative_acquire) + return arch_atomic64_add_negative_acquire(i, v); +#elif defined(arch_atomic64_add_negative_relaxed) + bool ret = arch_atomic64_add_negative_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +#elif defined(arch_atomic64_add_negative) + return arch_atomic64_add_negative(i, v); +#else + return raw_atomic64_add_return_acquire(i, v) < 0; #endif +} -#ifndef arch_atomic64_add_negative_release /** - * arch_atomic64_add_negative_release - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t + * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic64_add_negative_release(s64 i, atomic64_t *v) +raw_atomic64_add_negative_release(s64 i, atomic64_t *v) { - return arch_atomic64_add_return_release(i, v) < 0; -} -#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release +#if defined(arch_atomic64_add_negative_release) + return arch_atomic64_add_negative_release(i, v); +#elif defined(arch_atomic64_add_negative_relaxed) + __atomic_release_fence(); + return arch_atomic64_add_negative_relaxed(i, v); +#elif defined(arch_atomic64_add_negative) + return arch_atomic64_add_negative(i, v); +#else + return raw_atomic64_add_return_release(i, v) < 0; #endif +} -#ifndef arch_atomic64_add_negative_relaxed /** - * arch_atomic64_add_negative_relaxed - Add and test if negative - * @i: integer value to add - * @v: pointer of type atomic64_t + * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. + * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool -arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) -{ - return arch_atomic64_add_return_relaxed(i, v) < 0; -} -#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed -#endif - -#else /* arch_atomic64_add_negative_relaxed */ - -#ifndef arch_atomic64_add_negative_acquire -static __always_inline bool -arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v) -{ - bool ret = arch_atomic64_add_negative_relaxed(i, v); - __atomic_acquire_fence(); - return ret; -} -#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire -#endif - -#ifndef arch_atomic64_add_negative_release -static __always_inline bool -arch_atomic64_add_negative_release(s64 i, atomic64_t *v) +raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) { - __atomic_release_fence(); +#if defined(arch_atomic64_add_negative_relaxed) return arch_atomic64_add_negative_relaxed(i, v); -} -#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release +#elif defined(arch_atomic64_add_negative) + return arch_atomic64_add_negative(i, v); +#else + return raw_atomic64_add_return_relaxed(i, v) < 0; #endif - -#ifndef arch_atomic64_add_negative -static __always_inline bool -arch_atomic64_add_negative(s64 i, atomic64_t *v) -{ - bool ret; - __atomic_pre_full_fence(); - ret = arch_atomic64_add_negative_relaxed(i, v); - __atomic_post_full_fence(); - return ret; } -#define arch_atomic64_add_negative arch_atomic64_add_negative -#endif - -#endif /* arch_atomic64_add_negative_relaxed */ -#ifndef arch_atomic64_fetch_add_unless /** - * arch_atomic64_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. + * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic64_t + * @a: s64 value to add + * @u: s64 value to compare with * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere. + * + * Return: The original value of @v. */ static __always_inline s64 -arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - s64 c = arch_atomic64_read(v); +#if defined(arch_atomic64_fetch_add_unless) + return arch_atomic64_fetch_add_unless(v, a, u); +#else + s64 c = raw_atomic64_read(v); do { if (unlikely(c == u)) break; - } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + } while (!raw_atomic64_try_cmpxchg(v, &c, c + a)); return c; -} -#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif +} -#ifndef arch_atomic64_add_unless /** - * arch_atomic64_add_unless - add unless the number is already a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. + * raw_atomic64_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic64_t + * @a: s64 value to add + * @u: s64 value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere. * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. + * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool -arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - return arch_atomic64_fetch_add_unless(v, a, u) != u; -} -#define arch_atomic64_add_unless arch_atomic64_add_unless +#if defined(arch_atomic64_add_unless) + return arch_atomic64_add_unless(v, a, u); +#else + return raw_atomic64_fetch_add_unless(v, a, u) != u; #endif +} -#ifndef arch_atomic64_inc_not_zero /** - * arch_atomic64_inc_not_zero - increment unless the number is zero - * @v: pointer of type atomic64_t + * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic64_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere. * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. + * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool -arch_atomic64_inc_not_zero(atomic64_t *v) +raw_atomic64_inc_not_zero(atomic64_t *v) { - return arch_atomic64_add_unless(v, 1, 0); -} -#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero +#if defined(arch_atomic64_inc_not_zero) + return arch_atomic64_inc_not_zero(v); +#else + return raw_atomic64_add_unless(v, 1, 0); #endif +} -#ifndef arch_atomic64_inc_unless_negative +/** + * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic64_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic64_inc_unless_negative(atomic64_t *v) +raw_atomic64_inc_unless_negative(atomic64_t *v) { - s64 c = arch_atomic64_read(v); +#if defined(arch_atomic64_inc_unless_negative) + return arch_atomic64_inc_unless_negative(v); +#else + s64 c = raw_atomic64_read(v); do { if (unlikely(c < 0)) return false; - } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1)); return true; -} -#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative #endif +} -#ifndef arch_atomic64_dec_unless_positive +/** + * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic64_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic64_dec_unless_positive(atomic64_t *v) +raw_atomic64_dec_unless_positive(atomic64_t *v) { - s64 c = arch_atomic64_read(v); +#if defined(arch_atomic64_dec_unless_positive) + return arch_atomic64_dec_unless_positive(v); +#else + s64 c = raw_atomic64_read(v); do { if (unlikely(c > 0)) return false; - } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1)); return true; -} -#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive #endif +} -#ifndef arch_atomic64_dec_if_positive +/** + * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic64_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline s64 -arch_atomic64_dec_if_positive(atomic64_t *v) +raw_atomic64_dec_if_positive(atomic64_t *v) { - s64 dec, c = arch_atomic64_read(v); +#if defined(arch_atomic64_dec_if_positive) + return arch_atomic64_dec_if_positive(v); +#else + s64 dec, c = raw_atomic64_read(v); do { dec = c - 1; if (unlikely(dec < 0)) break; - } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + } while (!raw_atomic64_try_cmpxchg(v, &c, dec)); return dec; -} -#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive #endif +} #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// ad2e2b4d168dbc60a73922616047a9bfa446af36 +// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index 03a232a1fa57..d401b406ef7c 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -4,15 +4,10 @@ // DO NOT MODIFY THIS FILE DIRECTLY /* - * This file provides wrappers with KASAN instrumentation for atomic operations. - * To use this functionality an arch's atomic.h file needs to define all - * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include - * this file at the end. This file provides atomic_read() that forwards to - * arch_atomic_read() for actual atomic operation. - * Note: if an arch atomic operation is implemented by means of other atomic - * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use - * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid - * double instrumentation. + * This file provoides atomic operations with explicit instrumentation (e.g. + * KASAN, KCSAN), which should be used unless it is necessary to avoid + * instrumentation. Where it is necessary to aovid instrumenation, the + * raw_atomic*() operations should be used. */ #ifndef _LINUX_ATOMIC_INSTRUMENTED_H #define _LINUX_ATOMIC_INSTRUMENTED_H @@ -21,1927 +16,4696 @@ #include <linux/compiler.h> #include <linux/instrumented.h> +/** + * atomic_read() - atomic load with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_read() there. + * + * Return: The value loaded from @v. + */ static __always_inline int atomic_read(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_read(v); -} - + return raw_atomic_read(v); +} + +/** + * atomic_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there. + * + * Return: The value loaded from @v. + */ static __always_inline int atomic_read_acquire(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_read_acquire(v); -} - + return raw_atomic_read_acquire(v); +} + +/** + * atomic_set() - atomic set with relaxed ordering + * @v: pointer to atomic_t + * @i: int value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_set() there. + * + * Return: Nothing. + */ static __always_inline void atomic_set(atomic_t *v, int i) { instrument_atomic_write(v, sizeof(*v)); - arch_atomic_set(v, i); -} - + raw_atomic_set(v, i); +} + +/** + * atomic_set_release() - atomic set with release ordering + * @v: pointer to atomic_t + * @i: int value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_set_release() there. + * + * Return: Nothing. + */ static __always_inline void atomic_set_release(atomic_t *v, int i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); - arch_atomic_set_release(v, i); -} - + raw_atomic_set_release(v, i); +} + +/** + * atomic_add() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add() there. + * + * Return: Nothing. + */ static __always_inline void atomic_add(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_add(i, v); + raw_atomic_add(i, v); } +/** + * atomic_add_return() - atomic add with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_return() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_add_return(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return(i, v); + return raw_atomic_add_return(i, v); } +/** + * atomic_add_return_acquire() - atomic add with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_acquire(i, v); + return raw_atomic_add_return_acquire(i, v); } +/** + * atomic_add_return_release() - atomic add with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_add_return_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_release(i, v); + return raw_atomic_add_return_release(i, v); } +/** + * atomic_add_return_relaxed() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_return_relaxed(i, v); + return raw_atomic_add_return_relaxed(i, v); } +/** + * atomic_fetch_add() - atomic add with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_add(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add(i, v); + return raw_atomic_fetch_add(i, v); } +/** + * atomic_fetch_add_acquire() - atomic add with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_acquire(i, v); + return raw_atomic_fetch_add_acquire(i, v); } +/** + * atomic_fetch_add_release() - atomic add with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_release(i, v); + return raw_atomic_fetch_add_release(i, v); } +/** + * atomic_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_relaxed(i, v); + return raw_atomic_fetch_add_relaxed(i, v); } +/** + * atomic_sub() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub() there. + * + * Return: Nothing. + */ static __always_inline void atomic_sub(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_sub(i, v); + raw_atomic_sub(i, v); } +/** + * atomic_sub_return() - atomic subtract with full ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub_return() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_sub_return(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return(i, v); + return raw_atomic_sub_return(i, v); } +/** + * atomic_sub_return_acquire() - atomic subtract with acquire ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_acquire(i, v); + return raw_atomic_sub_return_acquire(i, v); } +/** + * atomic_sub_return_release() - atomic subtract with release ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_release(i, v); + return raw_atomic_sub_return_release(i, v); } +/** + * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_return_relaxed(i, v); + return raw_atomic_sub_return_relaxed(i, v); } +/** + * atomic_fetch_sub() - atomic subtract with full ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub(i, v); + return raw_atomic_fetch_sub(i, v); } +/** + * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_acquire(i, v); + return raw_atomic_fetch_sub_acquire(i, v); } +/** + * atomic_fetch_sub_release() - atomic subtract with release ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_release(i, v); + return raw_atomic_fetch_sub_release(i, v); } +/** + * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: int value to subtract + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_sub_relaxed(i, v); + return raw_atomic_fetch_sub_relaxed(i, v); } +/** + * atomic_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc() there. + * + * Return: Nothing. + */ static __always_inline void atomic_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_inc(v); + raw_atomic_inc(v); } +/** + * atomic_inc_return() - atomic increment with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_return() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_inc_return(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return(v); + return raw_atomic_inc_return(v); } +/** + * atomic_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_inc_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_acquire(v); + return raw_atomic_inc_return_acquire(v); } +/** + * atomic_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_inc_return_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_release(v); + return raw_atomic_inc_return_release(v); } +/** + * atomic_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_return_relaxed(v); + return raw_atomic_inc_return_relaxed(v); } +/** + * atomic_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_inc(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc(v); + return raw_atomic_fetch_inc(v); } +/** + * atomic_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_acquire(v); + return raw_atomic_fetch_inc_acquire(v); } +/** + * atomic_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_inc_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_release(v); + return raw_atomic_fetch_inc_release(v); } +/** + * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_inc_relaxed(v); + return raw_atomic_fetch_inc_relaxed(v); } +/** + * atomic_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec() there. + * + * Return: Nothing. + */ static __always_inline void atomic_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_dec(v); + raw_atomic_dec(v); } +/** + * atomic_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_return() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_dec_return(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return(v); + return raw_atomic_dec_return(v); } +/** + * atomic_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_dec_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_acquire(v); + return raw_atomic_dec_return_acquire(v); } +/** + * atomic_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_dec_return_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_release(v); + return raw_atomic_dec_return_release(v); } +/** + * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_return_relaxed(v); + return raw_atomic_dec_return_relaxed(v); } +/** + * atomic_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_dec(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec(v); + return raw_atomic_fetch_dec(v); } +/** + * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_acquire(v); + return raw_atomic_fetch_dec_acquire(v); } +/** + * atomic_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_dec_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_release(v); + return raw_atomic_fetch_dec_release(v); } +/** + * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_dec_relaxed(v); + return raw_atomic_fetch_dec_relaxed(v); } +/** + * atomic_and() - atomic bitwise AND with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_and() there. + * + * Return: Nothing. + */ static __always_inline void atomic_and(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_and(i, v); + raw_atomic_and(i, v); } +/** + * atomic_fetch_and() - atomic bitwise AND with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_and(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and(i, v); + return raw_atomic_fetch_and(i, v); } +/** + * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_acquire(i, v); + return raw_atomic_fetch_and_acquire(i, v); } +/** + * atomic_fetch_and_release() - atomic bitwise AND with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_release(i, v); + return raw_atomic_fetch_and_release(i, v); } +/** + * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_and_relaxed(i, v); + return raw_atomic_fetch_and_relaxed(i, v); } +/** + * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_andnot() there. + * + * Return: Nothing. + */ static __always_inline void atomic_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_andnot(i, v); + raw_atomic_andnot(i, v); } +/** + * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot(i, v); + return raw_atomic_fetch_andnot(i, v); } +/** + * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_acquire(i, v); + return raw_atomic_fetch_andnot_acquire(i, v); } +/** + * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_release(i, v); + return raw_atomic_fetch_andnot_release(i, v); } +/** + * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_andnot_relaxed(i, v); + return raw_atomic_fetch_andnot_relaxed(i, v); } +/** + * atomic_or() - atomic bitwise OR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_or() there. + * + * Return: Nothing. + */ static __always_inline void atomic_or(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_or(i, v); + raw_atomic_or(i, v); } +/** + * atomic_fetch_or() - atomic bitwise OR with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_or(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or(i, v); + return raw_atomic_fetch_or(i, v); } +/** + * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_acquire(i, v); + return raw_atomic_fetch_or_acquire(i, v); } +/** + * atomic_fetch_or_release() - atomic bitwise OR with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_release(i, v); + return raw_atomic_fetch_or_release(i, v); } +/** + * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_or_relaxed(i, v); + return raw_atomic_fetch_or_relaxed(i, v); } +/** + * atomic_xor() - atomic bitwise XOR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_xor() there. + * + * Return: Nothing. + */ static __always_inline void atomic_xor(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_xor(i, v); + raw_atomic_xor(i, v); } +/** + * atomic_fetch_xor() - atomic bitwise XOR with full ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor(i, v); + return raw_atomic_fetch_xor(i, v); } +/** + * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_acquire(i, v); + return raw_atomic_fetch_xor_acquire(i, v); } +/** + * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_release(i, v); + return raw_atomic_fetch_xor_release(i, v); } +/** + * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: int value + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_xor_relaxed(i, v); + return raw_atomic_fetch_xor_relaxed(i, v); } +/** + * atomic_xchg() - atomic exchange with full ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_xchg() there. + * + * Return: The original value of @v. + */ static __always_inline int -atomic_xchg(atomic_t *v, int i) +atomic_xchg(atomic_t *v, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg(v, i); + return raw_atomic_xchg(v, new); } +/** + * atomic_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int -atomic_xchg_acquire(atomic_t *v, int i) +atomic_xchg_acquire(atomic_t *v, int new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_acquire(v, i); + return raw_atomic_xchg_acquire(v, new); } +/** + * atomic_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline int -atomic_xchg_release(atomic_t *v, int i) +atomic_xchg_release(atomic_t *v, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_release(v, i); + return raw_atomic_xchg_release(v, new); } +/** + * atomic_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic_t + * @new: int value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int -atomic_xchg_relaxed(atomic_t *v, int i) +atomic_xchg_relaxed(atomic_t *v, int new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_xchg_relaxed(v, i); + return raw_atomic_xchg_relaxed(v, new); } +/** + * atomic_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg(v, old, new); + return raw_atomic_cmpxchg(v, old, new); } +/** + * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_acquire(v, old, new); + return raw_atomic_cmpxchg_acquire(v, old, new); } +/** + * atomic_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_release(v, old, new); + return raw_atomic_cmpxchg_release(v, old, new); } +/** + * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_t + * @old: int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_cmpxchg_relaxed(v, old, new); + return raw_atomic_cmpxchg_relaxed(v, old, new); } +/** + * atomic_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg(v, old, new); -} - + return raw_atomic_try_cmpxchg(v, old, new); +} + +/** + * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_acquire(v, old, new); -} - + return raw_atomic_try_cmpxchg_acquire(v, old, new); +} + +/** + * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_release(v, old, new); -} - + return raw_atomic_try_cmpxchg_release(v, old, new); +} + +/** + * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_t + * @old: pointer to int value to compare with + * @new: int value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_try_cmpxchg_relaxed(v, old, new); -} - + return raw_atomic_try_cmpxchg_relaxed(v, old, new); +} + +/** + * atomic_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_sub_and_test(i, v); + return raw_atomic_sub_and_test(i, v); } +/** + * atomic_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_dec_and_test(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_and_test(v); + return raw_atomic_dec_and_test(v); } +/** + * atomic_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_inc_and_test(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_and_test(v); + return raw_atomic_inc_and_test(v); } +/** + * atomic_add_negative() - atomic add and test if negative with full ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_negative() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_add_negative(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_negative(i, v); + return raw_atomic_add_negative(i, v); } +/** + * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_add_negative_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_negative_acquire(i, v); + return raw_atomic_add_negative_acquire(i, v); } +/** + * atomic_add_negative_release() - atomic add and test if negative with release ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_add_negative_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_negative_release(i, v); + return raw_atomic_add_negative_release(i, v); } +/** + * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: int value to add + * @v: pointer to atomic_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_add_negative_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_negative_relaxed(i, v); + return raw_atomic_add_negative_relaxed(i, v); } +/** + * atomic_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_t + * @a: int value to add + * @u: int value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there. + * + * Return: The original value of @v. + */ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_fetch_add_unless(v, a, u); + return raw_atomic_fetch_add_unless(v, a, u); } +/** + * atomic_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_t + * @a: int value to add + * @u: int value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_add_unless() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_add_unless(v, a, u); + return raw_atomic_add_unless(v, a, u); } +/** + * atomic_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_inc_not_zero(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_not_zero(v); + return raw_atomic_inc_not_zero(v); } +/** + * atomic_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_inc_unless_negative(v); + return raw_atomic_inc_unless_negative(v); } +/** + * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_unless_positive(v); + return raw_atomic_dec_unless_positive(v); } +/** + * atomic_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline int atomic_dec_if_positive(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_dec_if_positive(v); + return raw_atomic_dec_if_positive(v); } +/** + * atomic64_read() - atomic load with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_read() there. + * + * Return: The value loaded from @v. + */ static __always_inline s64 atomic64_read(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic64_read(v); -} - + return raw_atomic64_read(v); +} + +/** + * atomic64_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there. + * + * Return: The value loaded from @v. + */ static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic64_read_acquire(v); -} - + return raw_atomic64_read_acquire(v); +} + +/** + * atomic64_set() - atomic set with relaxed ordering + * @v: pointer to atomic64_t + * @i: s64 value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_set() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_set(atomic64_t *v, s64 i) { instrument_atomic_write(v, sizeof(*v)); - arch_atomic64_set(v, i); -} - + raw_atomic64_set(v, i); +} + +/** + * atomic64_set_release() - atomic set with release ordering + * @v: pointer to atomic64_t + * @i: s64 value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_set_release() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); - arch_atomic64_set_release(v, i); -} - + raw_atomic64_set_release(v, i); +} + +/** + * atomic64_add() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_add(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_add(i, v); + raw_atomic64_add(i, v); } +/** + * atomic64_add_return() - atomic add with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_return() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return(i, v); + return raw_atomic64_add_return(i, v); } +/** + * atomic64_add_return_acquire() - atomic add with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_acquire(i, v); + return raw_atomic64_add_return_acquire(i, v); } +/** + * atomic64_add_return_release() - atomic add with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_release(i, v); + return raw_atomic64_add_return_release(i, v); } +/** + * atomic64_add_return_relaxed() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_return_relaxed(i, v); + return raw_atomic64_add_return_relaxed(i, v); } +/** + * atomic64_fetch_add() - atomic add with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add(i, v); + return raw_atomic64_fetch_add(i, v); } +/** + * atomic64_fetch_add_acquire() - atomic add with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_acquire(i, v); + return raw_atomic64_fetch_add_acquire(i, v); } +/** + * atomic64_fetch_add_release() - atomic add with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_release(i, v); + return raw_atomic64_fetch_add_release(i, v); } +/** + * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_relaxed(i, v); + return raw_atomic64_fetch_add_relaxed(i, v); } +/** + * atomic64_sub() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_sub(i, v); + raw_atomic64_sub(i, v); } +/** + * atomic64_sub_return() - atomic subtract with full ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return(i, v); + return raw_atomic64_sub_return(i, v); } +/** + * atomic64_sub_return_acquire() - atomic subtract with acquire ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_acquire(i, v); + return raw_atomic64_sub_return_acquire(i, v); } +/** + * atomic64_sub_return_release() - atomic subtract with release ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_release(i, v); + return raw_atomic64_sub_return_release(i, v); } +/** + * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_return_relaxed(i, v); + return raw_atomic64_sub_return_relaxed(i, v); } +/** + * atomic64_fetch_sub() - atomic subtract with full ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub(i, v); + return raw_atomic64_fetch_sub(i, v); } +/** + * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_acquire(i, v); + return raw_atomic64_fetch_sub_acquire(i, v); } +/** + * atomic64_fetch_sub_release() - atomic subtract with release ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_release(i, v); + return raw_atomic64_fetch_sub_release(i, v); } +/** + * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: s64 value to subtract + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_sub_relaxed(i, v); + return raw_atomic64_fetch_sub_relaxed(i, v); } +/** + * atomic64_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_inc(v); + raw_atomic64_inc(v); } +/** + * atomic64_inc_return() - atomic increment with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_inc_return(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return(v); + return raw_atomic64_inc_return(v); } +/** + * atomic64_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_acquire(v); + return raw_atomic64_inc_return_acquire(v); } +/** + * atomic64_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_release(v); + return raw_atomic64_inc_return_release(v); } +/** + * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_return_relaxed(v); + return raw_atomic64_inc_return_relaxed(v); } +/** + * atomic64_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc(v); + return raw_atomic64_fetch_inc(v); } +/** + * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_acquire(v); + return raw_atomic64_fetch_inc_acquire(v); } +/** + * atomic64_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_release(v); + return raw_atomic64_fetch_inc_release(v); } +/** + * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_inc_relaxed(v); + return raw_atomic64_fetch_inc_relaxed(v); } +/** + * atomic64_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_dec(v); + raw_atomic64_dec(v); } +/** + * atomic64_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_dec_return(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return(v); + return raw_atomic64_dec_return(v); } +/** + * atomic64_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_acquire(v); + return raw_atomic64_dec_return_acquire(v); } +/** + * atomic64_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_release(v); + return raw_atomic64_dec_return_release(v); } +/** + * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_return_relaxed(v); + return raw_atomic64_dec_return_relaxed(v); } +/** + * atomic64_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec(v); + return raw_atomic64_fetch_dec(v); } +/** + * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_acquire(v); + return raw_atomic64_fetch_dec_acquire(v); } +/** + * atomic64_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_release(v); + return raw_atomic64_fetch_dec_release(v); } +/** + * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_dec_relaxed(v); + return raw_atomic64_fetch_dec_relaxed(v); } +/** + * atomic64_and() - atomic bitwise AND with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_and() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_and(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_and(i, v); + raw_atomic64_and(i, v); } +/** + * atomic64_fetch_and() - atomic bitwise AND with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and(i, v); + return raw_atomic64_fetch_and(i, v); } +/** + * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_acquire(i, v); + return raw_atomic64_fetch_and_acquire(i, v); } +/** + * atomic64_fetch_and_release() - atomic bitwise AND with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_release(i, v); + return raw_atomic64_fetch_and_release(i, v); } +/** + * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_and_relaxed(i, v); + return raw_atomic64_fetch_and_relaxed(i, v); } +/** + * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_andnot() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_andnot(i, v); + raw_atomic64_andnot(i, v); } +/** + * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot(i, v); + return raw_atomic64_fetch_andnot(i, v); } +/** + * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_acquire(i, v); + return raw_atomic64_fetch_andnot_acquire(i, v); } +/** + * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_release(i, v); + return raw_atomic64_fetch_andnot_release(i, v); } +/** + * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_andnot_relaxed(i, v); + return raw_atomic64_fetch_andnot_relaxed(i, v); } +/** + * atomic64_or() - atomic bitwise OR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_or() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_or(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_or(i, v); + raw_atomic64_or(i, v); } +/** + * atomic64_fetch_or() - atomic bitwise OR with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or(i, v); + return raw_atomic64_fetch_or(i, v); } +/** + * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_acquire(i, v); + return raw_atomic64_fetch_or_acquire(i, v); } +/** + * atomic64_fetch_or_release() - atomic bitwise OR with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_release(i, v); + return raw_atomic64_fetch_or_release(i, v); } +/** + * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_or_relaxed(i, v); + return raw_atomic64_fetch_or_relaxed(i, v); } +/** + * atomic64_xor() - atomic bitwise XOR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_xor() there. + * + * Return: Nothing. + */ static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic64_xor(i, v); + raw_atomic64_xor(i, v); } +/** + * atomic64_fetch_xor() - atomic bitwise XOR with full ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor(i, v); + return raw_atomic64_fetch_xor(i, v); } +/** + * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_acquire(i, v); + return raw_atomic64_fetch_xor_acquire(i, v); } +/** + * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_release(i, v); + return raw_atomic64_fetch_xor_release(i, v); } +/** + * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: s64 value + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_xor_relaxed(i, v); + return raw_atomic64_fetch_xor_relaxed(i, v); } +/** + * atomic64_xchg() - atomic exchange with full ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_xchg() there. + * + * Return: The original value of @v. + */ static __always_inline s64 -atomic64_xchg(atomic64_t *v, s64 i) +atomic64_xchg(atomic64_t *v, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg(v, i); + return raw_atomic64_xchg(v, new); } +/** + * atomic64_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 -atomic64_xchg_acquire(atomic64_t *v, s64 i) +atomic64_xchg_acquire(atomic64_t *v, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_acquire(v, i); + return raw_atomic64_xchg_acquire(v, new); } +/** + * atomic64_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 -atomic64_xchg_release(atomic64_t *v, s64 i) +atomic64_xchg_release(atomic64_t *v, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_release(v, i); + return raw_atomic64_xchg_release(v, new); } +/** + * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic64_t + * @new: s64 value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 -atomic64_xchg_relaxed(atomic64_t *v, s64 i) +atomic64_xchg_relaxed(atomic64_t *v, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_xchg_relaxed(v, i); + return raw_atomic64_xchg_relaxed(v, new); } +/** + * atomic64_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg(v, old, new); + return raw_atomic64_cmpxchg(v, old, new); } +/** + * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_acquire(v, old, new); + return raw_atomic64_cmpxchg_acquire(v, old, new); } +/** + * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_release(v, old, new); + return raw_atomic64_cmpxchg_release(v, old, new); } +/** + * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic64_t + * @old: s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_cmpxchg_relaxed(v, old, new); + return raw_atomic64_cmpxchg_relaxed(v, old, new); } +/** + * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg(v, old, new); -} - + return raw_atomic64_try_cmpxchg(v, old, new); +} + +/** + * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_acquire(v, old, new); -} - + return raw_atomic64_try_cmpxchg_acquire(v, old, new); +} + +/** + * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_release(v, old, new); -} - + return raw_atomic64_try_cmpxchg_release(v, old, new); +} + +/** + * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic64_t + * @old: pointer to s64 value to compare with + * @new: s64 value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic64_try_cmpxchg_relaxed(v, old, new); -} - + return raw_atomic64_try_cmpxchg_relaxed(v, old, new); +} + +/** + * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_sub_and_test(i, v); + return raw_atomic64_sub_and_test(i, v); } +/** + * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_and_test(v); + return raw_atomic64_dec_and_test(v); } +/** + * atomic64_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_and_test(v); + return raw_atomic64_inc_and_test(v); } +/** + * atomic64_add_negative() - atomic add and test if negative with full ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_negative(i, v); + return raw_atomic64_add_negative(i, v); } +/** + * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic64_add_negative_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_negative_acquire(i, v); + return raw_atomic64_add_negative_acquire(i, v); } +/** + * atomic64_add_negative_release() - atomic add and test if negative with release ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic64_add_negative_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_negative_release(i, v); + return raw_atomic64_add_negative_release(i, v); } +/** + * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: s64 value to add + * @v: pointer to atomic64_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic64_add_negative_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_negative_relaxed(i, v); + return raw_atomic64_add_negative_relaxed(i, v); } +/** + * atomic64_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic64_t + * @a: s64 value to add + * @u: s64 value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there. + * + * Return: The original value of @v. + */ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_fetch_add_unless(v, a, u); + return raw_atomic64_fetch_add_unless(v, a, u); } +/** + * atomic64_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic64_t + * @a: s64 value to add + * @u: s64 value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_add_unless(v, a, u); + return raw_atomic64_add_unless(v, a, u); } +/** + * atomic64_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic64_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_not_zero(v); + return raw_atomic64_inc_not_zero(v); } +/** + * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic64_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_inc_unless_negative(v); + return raw_atomic64_inc_unless_negative(v); } +/** + * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic64_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_unless_positive(v); + return raw_atomic64_dec_unless_positive(v); } +/** + * atomic64_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic64_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic64_dec_if_positive(v); + return raw_atomic64_dec_if_positive(v); } +/** + * atomic_long_read() - atomic load with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_read() there. + * + * Return: The value loaded from @v. + */ static __always_inline long atomic_long_read(const atomic_long_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_long_read(v); -} - + return raw_atomic_long_read(v); +} + +/** + * atomic_long_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there. + * + * Return: The value loaded from @v. + */ static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { instrument_atomic_read(v, sizeof(*v)); - return arch_atomic_long_read_acquire(v); -} - + return raw_atomic_long_read_acquire(v); +} + +/** + * atomic_long_set() - atomic set with relaxed ordering + * @v: pointer to atomic_long_t + * @i: long value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_set() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_set(atomic_long_t *v, long i) { instrument_atomic_write(v, sizeof(*v)); - arch_atomic_long_set(v, i); -} - + raw_atomic_long_set(v, i); +} + +/** + * atomic_long_set_release() - atomic set with release ordering + * @v: pointer to atomic_long_t + * @i: long value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); - arch_atomic_long_set_release(v, i); -} - + raw_atomic_long_set_release(v, i); +} + +/** + * atomic_long_add() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_add(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_add(i, v); + raw_atomic_long_add(i, v); } +/** + * atomic_long_add_return() - atomic add with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_return(i, v); + return raw_atomic_long_add_return(i, v); } +/** + * atomic_long_add_return_acquire() - atomic add with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_return_acquire(i, v); + return raw_atomic_long_add_return_acquire(i, v); } +/** + * atomic_long_add_return_release() - atomic add with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_return_release(i, v); + return raw_atomic_long_add_return_release(i, v); } +/** + * atomic_long_add_return_relaxed() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_return_relaxed(i, v); + return raw_atomic_long_add_return_relaxed(i, v); } +/** + * atomic_long_fetch_add() - atomic add with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_add(i, v); + return raw_atomic_long_fetch_add(i, v); } +/** + * atomic_long_fetch_add_acquire() - atomic add with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_add_acquire(i, v); + return raw_atomic_long_fetch_add_acquire(i, v); } +/** + * atomic_long_fetch_add_release() - atomic add with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_add_release(i, v); + return raw_atomic_long_fetch_add_release(i, v); } +/** + * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_add_relaxed(i, v); + return raw_atomic_long_fetch_add_relaxed(i, v); } +/** + * atomic_long_sub() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_sub(i, v); + raw_atomic_long_sub(i, v); } +/** + * atomic_long_sub_return() - atomic subtract with full ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_sub_return(i, v); + return raw_atomic_long_sub_return(i, v); } +/** + * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_sub_return_acquire(i, v); + return raw_atomic_long_sub_return_acquire(i, v); } +/** + * atomic_long_sub_return_release() - atomic subtract with release ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_sub_return_release(i, v); + return raw_atomic_long_sub_return_release(i, v); } +/** + * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_sub_return_relaxed(i, v); + return raw_atomic_long_sub_return_relaxed(i, v); } +/** + * atomic_long_fetch_sub() - atomic subtract with full ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_sub(i, v); + return raw_atomic_long_fetch_sub(i, v); } +/** + * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_sub_acquire(i, v); + return raw_atomic_long_fetch_sub_acquire(i, v); } +/** + * atomic_long_fetch_sub_release() - atomic subtract with release ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_sub_release(i, v); + return raw_atomic_long_fetch_sub_release(i, v); } +/** + * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_sub_relaxed(i, v); + return raw_atomic_long_fetch_sub_relaxed(i, v); } +/** + * atomic_long_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_inc(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_inc(v); + raw_atomic_long_inc(v); } +/** + * atomic_long_inc_return() - atomic increment with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_inc_return(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_return(v); + return raw_atomic_long_inc_return(v); } +/** + * atomic_long_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_return_acquire(v); + return raw_atomic_long_inc_return_acquire(v); } +/** + * atomic_long_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_return_release(v); + return raw_atomic_long_inc_return_release(v); } +/** + * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_return_relaxed(v); + return raw_atomic_long_inc_return_relaxed(v); } +/** + * atomic_long_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_inc(v); + return raw_atomic_long_fetch_inc(v); } +/** + * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_inc_acquire(v); + return raw_atomic_long_fetch_inc_acquire(v); } +/** + * atomic_long_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_inc_release(v); + return raw_atomic_long_fetch_inc_release(v); } +/** + * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_inc_relaxed(v); + return raw_atomic_long_fetch_inc_relaxed(v); } +/** + * atomic_long_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_dec(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_dec(v); + raw_atomic_long_dec(v); } +/** + * atomic_long_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_dec_return(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_return(v); + return raw_atomic_long_dec_return(v); } +/** + * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_return_acquire(v); + return raw_atomic_long_dec_return_acquire(v); } +/** + * atomic_long_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_return_release(v); + return raw_atomic_long_dec_return_release(v); } +/** + * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there. + * + * Return: The updated value of @v. + */ static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_return_relaxed(v); + return raw_atomic_long_dec_return_relaxed(v); } +/** + * atomic_long_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_dec(v); + return raw_atomic_long_fetch_dec(v); } +/** + * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_dec_acquire(v); + return raw_atomic_long_fetch_dec_acquire(v); } +/** + * atomic_long_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_dec_release(v); + return raw_atomic_long_fetch_dec_release(v); } +/** + * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_dec_relaxed(v); + return raw_atomic_long_fetch_dec_relaxed(v); } +/** + * atomic_long_and() - atomic bitwise AND with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_and() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_and(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_and(i, v); + raw_atomic_long_and(i, v); } +/** + * atomic_long_fetch_and() - atomic bitwise AND with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_and(i, v); + return raw_atomic_long_fetch_and(i, v); } +/** + * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_and_acquire(i, v); + return raw_atomic_long_fetch_and_acquire(i, v); } +/** + * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_and_release(i, v); + return raw_atomic_long_fetch_and_release(i, v); } +/** + * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_and_relaxed(i, v); + return raw_atomic_long_fetch_and_relaxed(i, v); } +/** + * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_andnot(i, v); + raw_atomic_long_andnot(i, v); } +/** + * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_andnot(i, v); + return raw_atomic_long_fetch_andnot(i, v); } +/** + * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_andnot_acquire(i, v); + return raw_atomic_long_fetch_andnot_acquire(i, v); } +/** + * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_andnot_release(i, v); + return raw_atomic_long_fetch_andnot_release(i, v); } +/** + * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_andnot_relaxed(i, v); + return raw_atomic_long_fetch_andnot_relaxed(i, v); } +/** + * atomic_long_or() - atomic bitwise OR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_or() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_or(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_or(i, v); + raw_atomic_long_or(i, v); } +/** + * atomic_long_fetch_or() - atomic bitwise OR with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_or(i, v); + return raw_atomic_long_fetch_or(i, v); } +/** + * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_or_acquire(i, v); + return raw_atomic_long_fetch_or_acquire(i, v); } +/** + * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_or_release(i, v); + return raw_atomic_long_fetch_or_release(i, v); } +/** + * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_or_relaxed(i, v); + return raw_atomic_long_fetch_or_relaxed(i, v); } +/** + * atomic_long_xor() - atomic bitwise XOR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_xor() there. + * + * Return: Nothing. + */ static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - arch_atomic_long_xor(i, v); + raw_atomic_long_xor(i, v); } +/** + * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_xor(i, v); + return raw_atomic_long_fetch_xor(i, v); } +/** + * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_xor_acquire(i, v); + return raw_atomic_long_fetch_xor_acquire(i, v); } +/** + * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_xor_release(i, v); + return raw_atomic_long_fetch_xor_release(i, v); } +/** + * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_xor_relaxed(i, v); + return raw_atomic_long_fetch_xor_relaxed(i, v); } +/** + * atomic_long_xchg() - atomic exchange with full ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there. + * + * Return: The original value of @v. + */ static __always_inline long -atomic_long_xchg(atomic_long_t *v, long i) +atomic_long_xchg(atomic_long_t *v, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_xchg(v, i); + return raw_atomic_long_xchg(v, new); } +/** + * atomic_long_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long -atomic_long_xchg_acquire(atomic_long_t *v, long i) +atomic_long_xchg_acquire(atomic_long_t *v, long new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_xchg_acquire(v, i); + return raw_atomic_long_xchg_acquire(v, new); } +/** + * atomic_long_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline long -atomic_long_xchg_release(atomic_long_t *v, long i) +atomic_long_xchg_release(atomic_long_t *v, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_xchg_release(v, i); + return raw_atomic_long_xchg_release(v, new); } +/** + * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long -atomic_long_xchg_relaxed(atomic_long_t *v, long i) +atomic_long_xchg_relaxed(atomic_long_t *v, long new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_xchg_relaxed(v, i); + return raw_atomic_long_xchg_relaxed(v, new); } +/** + * atomic_long_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_cmpxchg(v, old, new); + return raw_atomic_long_cmpxchg(v, old, new); } +/** + * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_cmpxchg_acquire(v, old, new); + return raw_atomic_long_cmpxchg_acquire(v, old, new); } +/** + * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_cmpxchg_release(v, old, new); + return raw_atomic_long_cmpxchg_release(v, old, new); } +/** + * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_cmpxchg_relaxed(v, old, new); + return raw_atomic_long_cmpxchg_relaxed(v, old, new); } +/** + * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_long_try_cmpxchg(v, old, new); -} - + return raw_atomic_long_try_cmpxchg(v, old, new); +} + +/** + * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_long_try_cmpxchg_acquire(v, old, new); -} - + return raw_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +/** + * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_long_try_cmpxchg_release(v, old, new); -} - + return raw_atomic_long_try_cmpxchg_release(v, old, new); +} + +/** + * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); - return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); -} - + return raw_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +/** + * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_sub_and_test(i, v); + return raw_atomic_long_sub_and_test(i, v); } +/** + * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_and_test(v); + return raw_atomic_long_dec_and_test(v); } +/** + * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_and_test(v); + return raw_atomic_long_inc_and_test(v); } +/** + * atomic_long_add_negative() - atomic add and test if negative with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_negative(i, v); + return raw_atomic_long_add_negative(i, v); } +/** + * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_long_add_negative_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_negative_acquire(i, v); + return raw_atomic_long_add_negative_acquire(i, v); } +/** + * atomic_long_add_negative_release() - atomic add and test if negative with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_long_add_negative_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_negative_release(i, v); + return raw_atomic_long_add_negative_release(i, v); } +/** + * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool atomic_long_add_negative_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_negative_relaxed(i, v); + return raw_atomic_long_add_negative_relaxed(i, v); } +/** + * atomic_long_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_long_t + * @a: long value to add + * @u: long value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there. + * + * Return: The original value of @v. + */ static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_fetch_add_unless(v, a, u); + return raw_atomic_long_fetch_add_unless(v, a, u); } +/** + * atomic_long_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_long_t + * @a: long value to add + * @u: long value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_add_unless(v, a, u); + return raw_atomic_long_add_unless(v, a, u); } +/** + * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic_long_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_not_zero(v); + return raw_atomic_long_inc_not_zero(v); } +/** + * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic_long_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_inc_unless_negative(v); + return raw_atomic_long_inc_unless_negative(v); } +/** + * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic_long_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_unless_positive(v); + return raw_atomic_long_dec_unless_positive(v); } +/** + * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic_long_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); - return arch_atomic_long_dec_if_positive(v); + return raw_atomic_long_dec_if_positive(v); } #define xchg(ptr, ...) \ @@ -1949,14 +4713,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg(__ai_ptr, __VA_ARGS__); \ + raw_xchg(__ai_ptr, __VA_ARGS__); \ }) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ + raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #define xchg_release(ptr, ...) \ @@ -1964,14 +4728,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_release(__ai_ptr, __VA_ARGS__); \ + raw_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ + raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg(ptr, ...) \ @@ -1979,14 +4743,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_release(ptr, ...) \ @@ -1994,14 +4758,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64(ptr, ...) \ @@ -2009,14 +4773,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_release(ptr, ...) \ @@ -2024,14 +4788,44 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_mb(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + kcsan_release(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define try_cmpxchg(ptr, oldp, ...) \ @@ -2041,7 +4835,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_acquire(ptr, oldp, ...) \ @@ -2050,7 +4844,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_release(ptr, oldp, ...) \ @@ -2060,7 +4854,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_relaxed(ptr, oldp, ...) \ @@ -2069,7 +4863,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64(ptr, oldp, ...) \ @@ -2079,7 +4873,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_acquire(ptr, oldp, ...) \ @@ -2088,7 +4882,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_release(ptr, oldp, ...) \ @@ -2098,7 +4892,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_relaxed(ptr, oldp, ...) \ @@ -2107,21 +4901,66 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + kcsan_mb(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_acquire(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_release(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + kcsan_release(); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg128_relaxed(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ + raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg128_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ @@ -2129,7 +4968,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ - arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ + raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define try_cmpxchg_local(ptr, oldp, ...) \ @@ -2138,7 +4977,7 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_local(ptr, oldp, ...) \ @@ -2147,24 +4986,18 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ - arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ + raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#define cmpxchg_double(ptr, ...) \ +#define try_cmpxchg128_local(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kcsan_mb(); \ - instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ - arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) -#define cmpxchg_double_local(ptr, ...) \ -({ \ - typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ - arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ -}) - #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 6b513a42e1a1b5962532a019b7fc91eaa044ad5e +// 1568f875fef72097413caab8339120c065a39aa4 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index 2fc51ba66beb..c82947170ddc 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -21,1030 +21,1778 @@ typedef atomic_t atomic_long_t; #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed #endif -#ifdef CONFIG_64BIT - -static __always_inline long -arch_atomic_long_read(const atomic_long_t *v) -{ - return arch_atomic64_read(v); -} - -static __always_inline long -arch_atomic_long_read_acquire(const atomic_long_t *v) -{ - return arch_atomic64_read_acquire(v); -} - -static __always_inline void -arch_atomic_long_set(atomic_long_t *v, long i) -{ - arch_atomic64_set(v, i); -} - -static __always_inline void -arch_atomic_long_set_release(atomic_long_t *v, long i) -{ - arch_atomic64_set_release(v, i); -} - -static __always_inline void -arch_atomic_long_add(long i, atomic_long_t *v) -{ - arch_atomic64_add(i, v); -} - -static __always_inline long -arch_atomic_long_add_return(long i, atomic_long_t *v) -{ - return arch_atomic64_add_return(i, v); -} - -static __always_inline long -arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_add_return_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_add_return_release(long i, atomic_long_t *v) -{ - return arch_atomic64_add_return_release(i, v); -} - -static __always_inline long -arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_add_return_relaxed(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_add(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_add(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_add_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_add_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_add_relaxed(i, v); -} - -static __always_inline void -arch_atomic_long_sub(long i, atomic_long_t *v) -{ - arch_atomic64_sub(i, v); -} - -static __always_inline long -arch_atomic_long_sub_return(long i, atomic_long_t *v) -{ - return arch_atomic64_sub_return(i, v); -} - -static __always_inline long -arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_sub_return_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_sub_return_release(long i, atomic_long_t *v) -{ - return arch_atomic64_sub_return_release(i, v); -} - -static __always_inline long -arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_sub_return_relaxed(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_sub(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_sub(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_sub_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_sub_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_sub_relaxed(i, v); -} - -static __always_inline void -arch_atomic_long_inc(atomic_long_t *v) -{ - arch_atomic64_inc(v); -} - -static __always_inline long -arch_atomic_long_inc_return(atomic_long_t *v) -{ - return arch_atomic64_inc_return(v); -} - -static __always_inline long -arch_atomic_long_inc_return_acquire(atomic_long_t *v) -{ - return arch_atomic64_inc_return_acquire(v); -} - -static __always_inline long -arch_atomic_long_inc_return_release(atomic_long_t *v) -{ - return arch_atomic64_inc_return_release(v); -} - -static __always_inline long -arch_atomic_long_inc_return_relaxed(atomic_long_t *v) -{ - return arch_atomic64_inc_return_relaxed(v); -} - -static __always_inline long -arch_atomic_long_fetch_inc(atomic_long_t *v) -{ - return arch_atomic64_fetch_inc(v); -} - -static __always_inline long -arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) -{ - return arch_atomic64_fetch_inc_acquire(v); -} - -static __always_inline long -arch_atomic_long_fetch_inc_release(atomic_long_t *v) -{ - return arch_atomic64_fetch_inc_release(v); -} - -static __always_inline long -arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) -{ - return arch_atomic64_fetch_inc_relaxed(v); -} - -static __always_inline void -arch_atomic_long_dec(atomic_long_t *v) -{ - arch_atomic64_dec(v); -} - -static __always_inline long -arch_atomic_long_dec_return(atomic_long_t *v) -{ - return arch_atomic64_dec_return(v); -} - -static __always_inline long -arch_atomic_long_dec_return_acquire(atomic_long_t *v) -{ - return arch_atomic64_dec_return_acquire(v); -} - -static __always_inline long -arch_atomic_long_dec_return_release(atomic_long_t *v) -{ - return arch_atomic64_dec_return_release(v); -} - -static __always_inline long -arch_atomic_long_dec_return_relaxed(atomic_long_t *v) -{ - return arch_atomic64_dec_return_relaxed(v); -} - -static __always_inline long -arch_atomic_long_fetch_dec(atomic_long_t *v) -{ - return arch_atomic64_fetch_dec(v); -} - -static __always_inline long -arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) -{ - return arch_atomic64_fetch_dec_acquire(v); -} - -static __always_inline long -arch_atomic_long_fetch_dec_release(atomic_long_t *v) -{ - return arch_atomic64_fetch_dec_release(v); -} - -static __always_inline long -arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) -{ - return arch_atomic64_fetch_dec_relaxed(v); -} - -static __always_inline void -arch_atomic_long_and(long i, atomic_long_t *v) -{ - arch_atomic64_and(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_and(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_and(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_and_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_and_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_and_relaxed(i, v); -} - -static __always_inline void -arch_atomic_long_andnot(long i, atomic_long_t *v) -{ - arch_atomic64_andnot(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_andnot(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_andnot_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_andnot_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_andnot_relaxed(i, v); -} - -static __always_inline void -arch_atomic_long_or(long i, atomic_long_t *v) -{ - arch_atomic64_or(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_or(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_or(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_or_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_or_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_or_relaxed(i, v); -} - -static __always_inline void -arch_atomic_long_xor(long i, atomic_long_t *v) -{ - arch_atomic64_xor(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_xor(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_xor(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_xor_acquire(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_xor_release(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_fetch_xor_relaxed(i, v); -} - -static __always_inline long -arch_atomic_long_xchg(atomic_long_t *v, long i) -{ - return arch_atomic64_xchg(v, i); -} - -static __always_inline long -arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) -{ - return arch_atomic64_xchg_acquire(v, i); -} - -static __always_inline long -arch_atomic_long_xchg_release(atomic_long_t *v, long i) -{ - return arch_atomic64_xchg_release(v, i); -} - -static __always_inline long -arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) -{ - return arch_atomic64_xchg_relaxed(v, i); -} - -static __always_inline long -arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) -{ - return arch_atomic64_cmpxchg(v, old, new); -} - -static __always_inline long -arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +/** + * raw_atomic_long_read() - atomic load with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically loads the value of @v with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_read() elsewhere. + * + * Return: The value loaded from @v. + */ +static __always_inline long +raw_atomic_long_read(const atomic_long_t *v) { - return arch_atomic64_cmpxchg_acquire(v, old, new); -} - -static __always_inline long -arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) -{ - return arch_atomic64_cmpxchg_release(v, old, new); -} - -static __always_inline long -arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) -{ - return arch_atomic64_cmpxchg_relaxed(v, old, new); -} - -static __always_inline bool -arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) -{ - return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); -} - -static __always_inline bool -arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) -{ - return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); -} - -static __always_inline bool -arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) -{ - return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); -} - -static __always_inline bool -arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) -{ - return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); -} - -static __always_inline bool -arch_atomic_long_sub_and_test(long i, atomic_long_t *v) -{ - return arch_atomic64_sub_and_test(i, v); -} - -static __always_inline bool -arch_atomic_long_dec_and_test(atomic_long_t *v) -{ - return arch_atomic64_dec_and_test(v); -} - -static __always_inline bool -arch_atomic_long_inc_and_test(atomic_long_t *v) -{ - return arch_atomic64_inc_and_test(v); -} - -static __always_inline bool -arch_atomic_long_add_negative(long i, atomic_long_t *v) -{ - return arch_atomic64_add_negative(i, v); -} - -static __always_inline bool -arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) -{ - return arch_atomic64_add_negative_acquire(i, v); -} - -static __always_inline bool -arch_atomic_long_add_negative_release(long i, atomic_long_t *v) -{ - return arch_atomic64_add_negative_release(i, v); -} - -static __always_inline bool -arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) -{ - return arch_atomic64_add_negative_relaxed(i, v); -} - -static __always_inline long -arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) -{ - return arch_atomic64_fetch_add_unless(v, a, u); -} - -static __always_inline bool -arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) -{ - return arch_atomic64_add_unless(v, a, u); -} - -static __always_inline bool -arch_atomic_long_inc_not_zero(atomic_long_t *v) -{ - return arch_atomic64_inc_not_zero(v); -} - -static __always_inline bool -arch_atomic_long_inc_unless_negative(atomic_long_t *v) -{ - return arch_atomic64_inc_unless_negative(v); -} - -static __always_inline bool -arch_atomic_long_dec_unless_positive(atomic_long_t *v) -{ - return arch_atomic64_dec_unless_positive(v); -} - -static __always_inline long -arch_atomic_long_dec_if_positive(atomic_long_t *v) -{ - return arch_atomic64_dec_if_positive(v); -} - -#else /* CONFIG_64BIT */ - -static __always_inline long -arch_atomic_long_read(const atomic_long_t *v) -{ - return arch_atomic_read(v); +#ifdef CONFIG_64BIT + return raw_atomic64_read(v); +#else + return raw_atomic_read(v); +#endif } +/** + * raw_atomic_long_read_acquire() - atomic load with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically loads the value of @v with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere. + * + * Return: The value loaded from @v. + */ static __always_inline long -arch_atomic_long_read_acquire(const atomic_long_t *v) +raw_atomic_long_read_acquire(const atomic_long_t *v) { - return arch_atomic_read_acquire(v); +#ifdef CONFIG_64BIT + return raw_atomic64_read_acquire(v); +#else + return raw_atomic_read_acquire(v); +#endif } +/** + * raw_atomic_long_set() - atomic set with relaxed ordering + * @v: pointer to atomic_long_t + * @i: long value to assign + * + * Atomically sets @v to @i with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_set() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_set(atomic_long_t *v, long i) +raw_atomic_long_set(atomic_long_t *v, long i) { - arch_atomic_set(v, i); +#ifdef CONFIG_64BIT + raw_atomic64_set(v, i); +#else + raw_atomic_set(v, i); +#endif } +/** + * raw_atomic_long_set_release() - atomic set with release ordering + * @v: pointer to atomic_long_t + * @i: long value to assign + * + * Atomically sets @v to @i with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_set_release(atomic_long_t *v, long i) +raw_atomic_long_set_release(atomic_long_t *v, long i) { - arch_atomic_set_release(v, i); +#ifdef CONFIG_64BIT + raw_atomic64_set_release(v, i); +#else + raw_atomic_set_release(v, i); +#endif } +/** + * raw_atomic_long_add() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_add(long i, atomic_long_t *v) +raw_atomic_long_add(long i, atomic_long_t *v) { - arch_atomic_add(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_add(i, v); +#else + raw_atomic_add(i, v); +#endif } +/** + * raw_atomic_long_add_return() - atomic add with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_add_return(long i, atomic_long_t *v) +raw_atomic_long_add_return(long i, atomic_long_t *v) { - return arch_atomic_add_return(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_return(i, v); +#else + return raw_atomic_add_return(i, v); +#endif } +/** + * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +raw_atomic_long_add_return_acquire(long i, atomic_long_t *v) { - return arch_atomic_add_return_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_return_acquire(i, v); +#else + return raw_atomic_add_return_acquire(i, v); +#endif } +/** + * raw_atomic_long_add_return_release() - atomic add with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_add_return_release(long i, atomic_long_t *v) +raw_atomic_long_add_return_release(long i, atomic_long_t *v) { - return arch_atomic_add_return_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_return_release(i, v); +#else + return raw_atomic_add_return_release(i, v); +#endif } +/** + * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v) { - return arch_atomic_add_return_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_return_relaxed(i, v); +#else + return raw_atomic_add_return_relaxed(i, v); +#endif } +/** + * raw_atomic_long_fetch_add() - atomic add with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_add(long i, atomic_long_t *v) +raw_atomic_long_fetch_add(long i, atomic_long_t *v) { - return arch_atomic_fetch_add(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_add(i, v); +#else + return raw_atomic_fetch_add(i, v); +#endif } +/** + * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_add_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_add_acquire(i, v); +#else + return raw_atomic_fetch_add_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_add_release() - atomic add with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_add_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_add_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_add_release(i, v); +#else + return raw_atomic_fetch_add_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_add_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_add_relaxed(i, v); +#else + return raw_atomic_fetch_add_relaxed(i, v); +#endif } +/** + * raw_atomic_long_sub() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_sub(long i, atomic_long_t *v) +raw_atomic_long_sub(long i, atomic_long_t *v) { - arch_atomic_sub(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_sub(i, v); +#else + raw_atomic_sub(i, v); +#endif } +/** + * raw_atomic_long_sub_return() - atomic subtract with full ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_sub_return(long i, atomic_long_t *v) +raw_atomic_long_sub_return(long i, atomic_long_t *v) { - return arch_atomic_sub_return(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_sub_return(i, v); +#else + return raw_atomic_sub_return(i, v); +#endif } +/** + * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v) { - return arch_atomic_sub_return_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_sub_return_acquire(i, v); +#else + return raw_atomic_sub_return_acquire(i, v); +#endif } +/** + * raw_atomic_long_sub_return_release() - atomic subtract with release ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +raw_atomic_long_sub_return_release(long i, atomic_long_t *v) { - return arch_atomic_sub_return_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_sub_return_release(i, v); +#else + return raw_atomic_sub_return_release(i, v); +#endif } +/** + * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { - return arch_atomic_sub_return_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_sub_return_relaxed(i, v); +#else + return raw_atomic_sub_return_relaxed(i, v); +#endif } +/** + * raw_atomic_long_fetch_sub() - atomic subtract with full ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +raw_atomic_long_fetch_sub(long i, atomic_long_t *v) { - return arch_atomic_fetch_sub(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_sub(i, v); +#else + return raw_atomic_fetch_sub(i, v); +#endif } +/** + * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_sub_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_sub_acquire(i, v); +#else + return raw_atomic_fetch_sub_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_sub_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_sub_release(i, v); +#else + return raw_atomic_fetch_sub_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering + * @i: long value to subtract + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_sub_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_sub_relaxed(i, v); +#else + return raw_atomic_fetch_sub_relaxed(i, v); +#endif } +/** + * raw_atomic_long_inc() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_inc(atomic_long_t *v) +raw_atomic_long_inc(atomic_long_t *v) { - arch_atomic_inc(v); +#ifdef CONFIG_64BIT + raw_atomic64_inc(v); +#else + raw_atomic_inc(v); +#endif } +/** + * raw_atomic_long_inc_return() - atomic increment with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_inc_return(atomic_long_t *v) +raw_atomic_long_inc_return(atomic_long_t *v) { - return arch_atomic_inc_return(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_return(v); +#else + return raw_atomic_inc_return(v); +#endif } +/** + * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_inc_return_acquire(atomic_long_t *v) +raw_atomic_long_inc_return_acquire(atomic_long_t *v) { - return arch_atomic_inc_return_acquire(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_return_acquire(v); +#else + return raw_atomic_inc_return_acquire(v); +#endif } +/** + * raw_atomic_long_inc_return_release() - atomic increment with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_inc_return_release(atomic_long_t *v) +raw_atomic_long_inc_return_release(atomic_long_t *v) { - return arch_atomic_inc_return_release(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_return_release(v); +#else + return raw_atomic_inc_return_release(v); +#endif } +/** + * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +raw_atomic_long_inc_return_relaxed(atomic_long_t *v) { - return arch_atomic_inc_return_relaxed(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_return_relaxed(v); +#else + return raw_atomic_inc_return_relaxed(v); +#endif } +/** + * raw_atomic_long_fetch_inc() - atomic increment with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_inc(atomic_long_t *v) +raw_atomic_long_fetch_inc(atomic_long_t *v) { - return arch_atomic_fetch_inc(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_inc(v); +#else + return raw_atomic_fetch_inc(v); +#endif } +/** + * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +raw_atomic_long_fetch_inc_acquire(atomic_long_t *v) { - return arch_atomic_fetch_inc_acquire(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_inc_acquire(v); +#else + return raw_atomic_fetch_inc_acquire(v); +#endif } +/** + * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_inc_release(atomic_long_t *v) +raw_atomic_long_fetch_inc_release(atomic_long_t *v) { - return arch_atomic_fetch_inc_release(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_inc_release(v); +#else + return raw_atomic_fetch_inc_release(v); +#endif } +/** + * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v) { - return arch_atomic_fetch_inc_relaxed(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_inc_relaxed(v); +#else + return raw_atomic_fetch_inc_relaxed(v); +#endif } +/** + * raw_atomic_long_dec() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_dec(atomic_long_t *v) +raw_atomic_long_dec(atomic_long_t *v) { - arch_atomic_dec(v); +#ifdef CONFIG_64BIT + raw_atomic64_dec(v); +#else + raw_atomic_dec(v); +#endif } +/** + * raw_atomic_long_dec_return() - atomic decrement with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_dec_return(atomic_long_t *v) +raw_atomic_long_dec_return(atomic_long_t *v) { - return arch_atomic_dec_return(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_return(v); +#else + return raw_atomic_dec_return(v); +#endif } +/** + * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_dec_return_acquire(atomic_long_t *v) +raw_atomic_long_dec_return_acquire(atomic_long_t *v) { - return arch_atomic_dec_return_acquire(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_return_acquire(v); +#else + return raw_atomic_dec_return_acquire(v); +#endif } +/** + * raw_atomic_long_dec_return_release() - atomic decrement with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_dec_return_release(atomic_long_t *v) +raw_atomic_long_dec_return_release(atomic_long_t *v) { - return arch_atomic_dec_return_release(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_return_release(v); +#else + return raw_atomic_dec_return_release(v); +#endif } +/** + * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere. + * + * Return: The updated value of @v. + */ static __always_inline long -arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +raw_atomic_long_dec_return_relaxed(atomic_long_t *v) { - return arch_atomic_dec_return_relaxed(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_return_relaxed(v); +#else + return raw_atomic_dec_return_relaxed(v); +#endif } +/** + * raw_atomic_long_fetch_dec() - atomic decrement with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_dec(atomic_long_t *v) +raw_atomic_long_fetch_dec(atomic_long_t *v) { - return arch_atomic_fetch_dec(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_dec(v); +#else + return raw_atomic_fetch_dec(v); +#endif } +/** + * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +raw_atomic_long_fetch_dec_acquire(atomic_long_t *v) { - return arch_atomic_fetch_dec_acquire(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_dec_acquire(v); +#else + return raw_atomic_fetch_dec_acquire(v); +#endif } +/** + * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_dec_release(atomic_long_t *v) +raw_atomic_long_fetch_dec_release(atomic_long_t *v) { - return arch_atomic_fetch_dec_release(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_dec_release(v); +#else + return raw_atomic_fetch_dec_release(v); +#endif } +/** + * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v) { - return arch_atomic_fetch_dec_relaxed(v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_dec_relaxed(v); +#else + return raw_atomic_fetch_dec_relaxed(v); +#endif } +/** + * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_and() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_and(long i, atomic_long_t *v) +raw_atomic_long_and(long i, atomic_long_t *v) { - arch_atomic_and(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_and(i, v); +#else + raw_atomic_and(i, v); +#endif } +/** + * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_and(long i, atomic_long_t *v) +raw_atomic_long_fetch_and(long i, atomic_long_t *v) { - return arch_atomic_fetch_and(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_and(i, v); +#else + return raw_atomic_fetch_and(i, v); +#endif } +/** + * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_and_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_and_acquire(i, v); +#else + return raw_atomic_fetch_and_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_and_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_and_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_and_release(i, v); +#else + return raw_atomic_fetch_and_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_and_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_and_relaxed(i, v); +#else + return raw_atomic_fetch_and_relaxed(i, v); +#endif } +/** + * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_andnot(long i, atomic_long_t *v) +raw_atomic_long_andnot(long i, atomic_long_t *v) { - arch_atomic_andnot(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_andnot(i, v); +#else + raw_atomic_andnot(i, v); +#endif } +/** + * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +raw_atomic_long_fetch_andnot(long i, atomic_long_t *v) { - return arch_atomic_fetch_andnot(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_andnot(i, v); +#else + return raw_atomic_fetch_andnot(i, v); +#endif } +/** + * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_andnot_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_andnot_acquire(i, v); +#else + return raw_atomic_fetch_andnot_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_andnot_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_andnot_release(i, v); +#else + return raw_atomic_fetch_andnot_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v & ~@i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_andnot_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_andnot_relaxed(i, v); +#else + return raw_atomic_fetch_andnot_relaxed(i, v); +#endif } +/** + * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_or() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_or(long i, atomic_long_t *v) +raw_atomic_long_or(long i, atomic_long_t *v) { - arch_atomic_or(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_or(i, v); +#else + raw_atomic_or(i, v); +#endif } +/** + * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_or(long i, atomic_long_t *v) +raw_atomic_long_fetch_or(long i, atomic_long_t *v) { - return arch_atomic_fetch_or(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_or(i, v); +#else + return raw_atomic_fetch_or(i, v); +#endif } +/** + * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_or_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_or_acquire(i, v); +#else + return raw_atomic_fetch_or_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_or_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_or_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_or_release(i, v); +#else + return raw_atomic_fetch_or_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v | @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_or_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_or_relaxed(i, v); +#else + return raw_atomic_fetch_or_relaxed(i, v); +#endif } +/** + * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere. + * + * Return: Nothing. + */ static __always_inline void -arch_atomic_long_xor(long i, atomic_long_t *v) +raw_atomic_long_xor(long i, atomic_long_t *v) { - arch_atomic_xor(i, v); +#ifdef CONFIG_64BIT + raw_atomic64_xor(i, v); +#else + raw_atomic_xor(i, v); +#endif } +/** + * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +raw_atomic_long_fetch_xor(long i, atomic_long_t *v) { - return arch_atomic_fetch_xor(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_xor(i, v); +#else + return raw_atomic_fetch_xor(i, v); +#endif } +/** + * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { - return arch_atomic_fetch_xor_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_xor_acquire(i, v); +#else + return raw_atomic_fetch_xor_acquire(i, v); +#endif } +/** + * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v) { - return arch_atomic_fetch_xor_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_xor_release(i, v); +#else + return raw_atomic_fetch_xor_release(i, v); +#endif } +/** + * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering + * @i: long value + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v ^ @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { - return arch_atomic_fetch_xor_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_xor_relaxed(i, v); +#else + return raw_atomic_fetch_xor_relaxed(i, v); +#endif } +/** + * raw_atomic_long_xchg() - atomic exchange with full ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_xchg(atomic_long_t *v, long i) +raw_atomic_long_xchg(atomic_long_t *v, long new) { - return arch_atomic_xchg(v, i); +#ifdef CONFIG_64BIT + return raw_atomic64_xchg(v, new); +#else + return raw_atomic_xchg(v, new); +#endif } +/** + * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +raw_atomic_long_xchg_acquire(atomic_long_t *v, long new) { - return arch_atomic_xchg_acquire(v, i); +#ifdef CONFIG_64BIT + return raw_atomic64_xchg_acquire(v, new); +#else + return raw_atomic_xchg_acquire(v, new); +#endif } +/** + * raw_atomic_long_xchg_release() - atomic exchange with release ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_xchg_release(atomic_long_t *v, long i) +raw_atomic_long_xchg_release(atomic_long_t *v, long new) { - return arch_atomic_xchg_release(v, i); +#ifdef CONFIG_64BIT + return raw_atomic64_xchg_release(v, new); +#else + return raw_atomic_xchg_release(v, new); +#endif } +/** + * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @new: long value to assign + * + * Atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new) { - return arch_atomic_xchg_relaxed(v, i); +#ifdef CONFIG_64BIT + return raw_atomic64_xchg_relaxed(v, new); +#else + return raw_atomic_xchg_relaxed(v, new); +#endif } +/** + * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { - return arch_atomic_cmpxchg(v, old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_cmpxchg(v, old, new); +#else + return raw_atomic_cmpxchg(v, old, new); +#endif } +/** + * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { - return arch_atomic_cmpxchg_acquire(v, old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_cmpxchg_acquire(v, old, new); +#else + return raw_atomic_cmpxchg_acquire(v, old, new); +#endif } +/** + * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { - return arch_atomic_cmpxchg_release(v, old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_cmpxchg_release(v, old, new); +#else + return raw_atomic_cmpxchg_release(v, old, new); +#endif } +/** + * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @old: long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { - return arch_atomic_cmpxchg_relaxed(v, old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_cmpxchg_relaxed(v, old, new); +#else + return raw_atomic_cmpxchg_relaxed(v, old, new); +#endif } +/** + * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with full ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { - return arch_atomic_try_cmpxchg(v, (int *)old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_try_cmpxchg(v, (s64 *)old, new); +#else + return raw_atomic_try_cmpxchg(v, (int *)old, new); +#endif } +/** + * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with acquire ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { - return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +#else + return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new); +#endif } +/** + * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with release ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { - return arch_atomic_try_cmpxchg_release(v, (int *)old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new); +#else + return raw_atomic_try_cmpxchg_release(v, (int *)old, new); +#endif } +/** + * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering + * @v: pointer to atomic_long_t + * @old: pointer to long value to compare with + * @new: long value to assign + * + * If (@v == @old), atomically updates @v to @new with relaxed ordering. + * Otherwise, updates @old to the current value of @v. + * + * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere. + * + * Return: @true if the exchange occured, @false otherwise. + */ static __always_inline bool -arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { - return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +#ifdef CONFIG_64BIT + return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +#else + return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +#endif } +/** + * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool -arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +raw_atomic_long_sub_and_test(long i, atomic_long_t *v) { - return arch_atomic_sub_and_test(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_sub_and_test(i, v); +#else + return raw_atomic_sub_and_test(i, v); +#endif } +/** + * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool -arch_atomic_long_dec_and_test(atomic_long_t *v) +raw_atomic_long_dec_and_test(atomic_long_t *v) { - return arch_atomic_dec_and_test(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_and_test(v); +#else + return raw_atomic_dec_and_test(v); +#endif } +/** + * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere. + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ static __always_inline bool -arch_atomic_long_inc_and_test(atomic_long_t *v) +raw_atomic_long_inc_and_test(atomic_long_t *v) { - return arch_atomic_inc_and_test(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_and_test(v); +#else + return raw_atomic_inc_and_test(v); +#endif } +/** + * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool -arch_atomic_long_add_negative(long i, atomic_long_t *v) +raw_atomic_long_add_negative(long i, atomic_long_t *v) { - return arch_atomic_add_negative(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_negative(i, v); +#else + return raw_atomic_add_negative(i, v); +#endif } +/** + * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with acquire ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool -arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v) +raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v) { - return arch_atomic_add_negative_acquire(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_negative_acquire(i, v); +#else + return raw_atomic_add_negative_acquire(i, v); +#endif } +/** + * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with release ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool -arch_atomic_long_add_negative_release(long i, atomic_long_t *v) +raw_atomic_long_add_negative_release(long i, atomic_long_t *v) { - return arch_atomic_add_negative_release(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_negative_release(i, v); +#else + return raw_atomic_add_negative_release(i, v); +#endif } +/** + * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering + * @i: long value to add + * @v: pointer to atomic_long_t + * + * Atomically updates @v to (@v + @i) with relaxed ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere. + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ static __always_inline bool -arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) +raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v) { - return arch_atomic_add_negative_relaxed(i, v); +#ifdef CONFIG_64BIT + return raw_atomic64_add_negative_relaxed(i, v); +#else + return raw_atomic_add_negative_relaxed(i, v); +#endif } +/** + * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_long_t + * @a: long value to add + * @u: long value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere. + * + * Return: The original value of @v. + */ static __always_inline long -arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { - return arch_atomic_fetch_add_unless(v, a, u); +#ifdef CONFIG_64BIT + return raw_atomic64_fetch_add_unless(v, a, u); +#else + return raw_atomic_fetch_add_unless(v, a, u); +#endif } +/** + * raw_atomic_long_add_unless() - atomic add unless value with full ordering + * @v: pointer to atomic_long_t + * @a: long value to add + * @u: long value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +raw_atomic_long_add_unless(atomic_long_t *v, long a, long u) { - return arch_atomic_add_unless(v, a, u); +#ifdef CONFIG_64BIT + return raw_atomic64_add_unless(v, a, u); +#else + return raw_atomic_add_unless(v, a, u); +#endif } +/** + * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering + * @v: pointer to atomic_long_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_long_inc_not_zero(atomic_long_t *v) +raw_atomic_long_inc_not_zero(atomic_long_t *v) { - return arch_atomic_inc_not_zero(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_not_zero(v); +#else + return raw_atomic_inc_not_zero(v); +#endif } +/** + * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering + * @v: pointer to atomic_long_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_long_inc_unless_negative(atomic_long_t *v) +raw_atomic_long_inc_unless_negative(atomic_long_t *v) { - return arch_atomic_inc_unless_negative(v); +#ifdef CONFIG_64BIT + return raw_atomic64_inc_unless_negative(v); +#else + return raw_atomic_inc_unless_negative(v); +#endif } +/** + * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering + * @v: pointer to atomic_long_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere. + * + * Return: @true if @v was updated, @false otherwise. + */ static __always_inline bool -arch_atomic_long_dec_unless_positive(atomic_long_t *v) +raw_atomic_long_dec_unless_positive(atomic_long_t *v) { - return arch_atomic_dec_unless_positive(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_unless_positive(v); +#else + return raw_atomic_dec_unless_positive(v); +#endif } +/** + * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering + * @v: pointer to atomic_long_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. + * + * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere. + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ static __always_inline long -arch_atomic_long_dec_if_positive(atomic_long_t *v) +raw_atomic_long_dec_if_positive(atomic_long_t *v) { - return arch_atomic_dec_if_positive(v); +#ifdef CONFIG_64BIT + return raw_atomic64_dec_if_positive(v); +#else + return raw_atomic_dec_if_positive(v); +#endif } -#endif /* CONFIG_64BIT */ #endif /* _LINUX_ATOMIC_LONG_H */ -// a194c07d7d2f4b0e178d3c118c919775d5d65f50 +// 4ef23f98c73cff96d239896175fd26b10b88899e diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index d3cbb6c16bab..6e76b9dba00e 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -119,7 +119,7 @@ extern void ct_idle_exit(void); */ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) { - return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); + return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); } /* @@ -128,7 +128,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) */ static __always_inline unsigned long ct_state_inc(int incby) { - return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); + return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); } static __always_inline bool warn_rcu_enter(void) diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index fdd537ea513f..bbff5f7f8803 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct context_tracking, context_tracking); #ifdef CONFIG_CONTEXT_TRACKING_USER static __always_inline int __ct_state(void) { - return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; + return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; } #endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index ca736b05ec7b..0d2e2a38b92d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -1071,7 +1071,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) */ static __always_inline unsigned int num_online_cpus(void) { - return arch_atomic_read(&__num_online_cpus); + return raw_atomic_read(&__num_online_cpus); } #define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 725d5e6acec0..27dbd4c64860 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -202,67 +202,74 @@ static inline void detect_intel_iommu(void) struct irte { union { - /* Shared between remapped and posted mode*/ struct { - __u64 present : 1, /* 0 */ - fpd : 1, /* 1 */ - __res0 : 6, /* 2 - 6 */ - avail : 4, /* 8 - 11 */ - __res1 : 3, /* 12 - 14 */ - pst : 1, /* 15 */ - vector : 8, /* 16 - 23 */ - __res2 : 40; /* 24 - 63 */ + union { + /* Shared between remapped and posted mode*/ + struct { + __u64 present : 1, /* 0 */ + fpd : 1, /* 1 */ + __res0 : 6, /* 2 - 6 */ + avail : 4, /* 8 - 11 */ + __res1 : 3, /* 12 - 14 */ + pst : 1, /* 15 */ + vector : 8, /* 16 - 23 */ + __res2 : 40; /* 24 - 63 */ + }; + + /* Remapped mode */ + struct { + __u64 r_present : 1, /* 0 */ + r_fpd : 1, /* 1 */ + dst_mode : 1, /* 2 */ + redir_hint : 1, /* 3 */ + trigger_mode : 1, /* 4 */ + dlvry_mode : 3, /* 5 - 7 */ + r_avail : 4, /* 8 - 11 */ + r_res0 : 4, /* 12 - 15 */ + r_vector : 8, /* 16 - 23 */ + r_res1 : 8, /* 24 - 31 */ + dest_id : 32; /* 32 - 63 */ + }; + + /* Posted mode */ + struct { + __u64 p_present : 1, /* 0 */ + p_fpd : 1, /* 1 */ + p_res0 : 6, /* 2 - 7 */ + p_avail : 4, /* 8 - 11 */ + p_res1 : 2, /* 12 - 13 */ + p_urgent : 1, /* 14 */ + p_pst : 1, /* 15 */ + p_vector : 8, /* 16 - 23 */ + p_res2 : 14, /* 24 - 37 */ + pda_l : 26; /* 38 - 63 */ + }; + __u64 low; + }; + + union { + /* Shared between remapped and posted mode*/ + struct { + __u64 sid : 16, /* 64 - 79 */ + sq : 2, /* 80 - 81 */ + svt : 2, /* 82 - 83 */ + __res3 : 44; /* 84 - 127 */ + }; + + /* Posted mode*/ + struct { + __u64 p_sid : 16, /* 64 - 79 */ + p_sq : 2, /* 80 - 81 */ + p_svt : 2, /* 82 - 83 */ + p_res3 : 12, /* 84 - 95 */ + pda_h : 32; /* 96 - 127 */ + }; + __u64 high; + }; }; - - /* Remapped mode */ - struct { - __u64 r_present : 1, /* 0 */ - r_fpd : 1, /* 1 */ - dst_mode : 1, /* 2 */ - redir_hint : 1, /* 3 */ - trigger_mode : 1, /* 4 */ - dlvry_mode : 3, /* 5 - 7 */ - r_avail : 4, /* 8 - 11 */ - r_res0 : 4, /* 12 - 15 */ - r_vector : 8, /* 16 - 23 */ - r_res1 : 8, /* 24 - 31 */ - dest_id : 32; /* 32 - 63 */ - }; - - /* Posted mode */ - struct { - __u64 p_present : 1, /* 0 */ - p_fpd : 1, /* 1 */ - p_res0 : 6, /* 2 - 7 */ - p_avail : 4, /* 8 - 11 */ - p_res1 : 2, /* 12 - 13 */ - p_urgent : 1, /* 14 */ - p_pst : 1, /* 15 */ - p_vector : 8, /* 16 - 23 */ - p_res2 : 14, /* 24 - 37 */ - pda_l : 26; /* 38 - 63 */ - }; - __u64 low; - }; - - union { - /* Shared between remapped and posted mode*/ - struct { - __u64 sid : 16, /* 64 - 79 */ - sq : 2, /* 80 - 81 */ - svt : 2, /* 82 - 83 */ - __res3 : 44; /* 84 - 127 */ - }; - - /* Posted mode*/ - struct { - __u64 p_sid : 16, /* 64 - 79 */ - p_sq : 2, /* 80 - 81 */ - p_svt : 2, /* 82 - 83 */ - p_res3 : 12, /* 84 - 95 */ - pda_h : 32; /* 96 - 127 */ - }; - __u64 high; +#ifdef CONFIG_IRQ_REMAP + __u128 irte; +#endif }; }; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 4e968ebadce6..f0a949b7c973 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -257,7 +257,7 @@ extern enum jump_label_type jump_label_init_type(struct jump_entry *entry); static __always_inline int static_key_count(struct static_key *key) { - return arch_atomic_read(&key->enabled); + return raw_atomic_read(&key->enabled); } static __always_inline void jump_label_init(void) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 74bd269a80a2..310f85903c91 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -447,6 +447,14 @@ extern int lockdep_is_held(const void *); #endif /* !LOCKDEP */ +#ifdef CONFIG_PROVE_LOCKING +void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn); + +#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__) +#else +#define lock_set_cmp_fn(lock, ...) do { } while (0) +#endif + enum xhlock_context_t { XHLOCK_HARD, XHLOCK_SOFT, diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 59f4fb1626ea..2ebc323d345a 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -85,6 +85,11 @@ struct lock_trace; #define LOCKSTAT_POINTS 4 +struct lockdep_map; +typedef int (*lock_cmp_fn)(const struct lockdep_map *a, + const struct lockdep_map *b); +typedef void (*lock_print_fn)(const struct lockdep_map *map); + /* * The lock-class itself. The order of the structure members matters. * reinit_class() zeroes the key member and all subsequent members. @@ -110,6 +115,9 @@ struct lock_class { struct list_head locks_after, locks_before; const struct lockdep_subclass_key *key; + lock_cmp_fn cmp_fn; + lock_print_fn print_fn; + unsigned int subclass; unsigned int dep_gen_id; diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index e60727be79c4..ec3573119923 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -343,31 +343,19 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } pscr2_ret__; \ }) -/* - * Special handling for cmpxchg_double. cmpxchg_double is passed two - * percpu variables. The first has to be aligned to a double word - * boundary and the second has to follow directly thereafter. - * We enforce this on all architectures even if they don't support - * a double cmpxchg instruction, since it's a cheap requirement, and it - * avoids breaking the requirement for architectures with the instruction. - */ -#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +#define __pcpu_size_call_return2bool(stem, variable, ...) \ ({ \ - bool pdcrb_ret__; \ - __verify_pcpu_ptr(&(pcp1)); \ - BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ - VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ - VM_BUG_ON((unsigned long)(&(pcp2)) != \ - (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ - switch(sizeof(pcp1)) { \ - case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ - case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ - case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ - case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + bool pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ default: \ __bad_size_call_parameter(); break; \ } \ - pdcrb_ret__; \ + pscr2_ret__; \ }) #define __pcpu_size_call(stem, variable, ...) \ @@ -426,9 +414,8 @@ do { \ #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) #define raw_cpu_cmpxchg(pcp, oval, nval) \ __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) -#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) - +#define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ + __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) @@ -488,11 +475,6 @@ do { \ raw_cpu_cmpxchg(pcp, oval, nval); \ }) -#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ -({ __this_cpu_preempt_check("cmpxchg_double"); \ - raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ -}) - #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) @@ -513,9 +495,8 @@ do { \ #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) #define this_cpu_cmpxchg(pcp, oval, nval) \ __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) -#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) - +#define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ + __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f6df03f934e5..deb90cf4bffb 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -39,7 +39,8 @@ enum stat_item { CPU_PARTIAL_FREE, /* Refill cpu partial on free */ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ - NR_SLUB_STAT_ITEMS }; + NR_SLUB_STAT_ITEMS +}; #ifndef CONFIG_SLUB_TINY /* @@ -47,8 +48,13 @@ enum stat_item { * with this_cpu_cmpxchg_double() alignment requirements. */ struct kmem_cache_cpu { - void **freelist; /* Pointer to next available object */ - unsigned long tid; /* Globally unique transaction id */ + union { + struct { + void **freelist; /* Pointer to next available object */ + unsigned long tid; /* Globally unique transaction id */ + }; + freelist_aba_t freelist_tid; + }; struct slab *slab; /* The slab from which we are allocating */ #ifdef CONFIG_SLUB_CPU_PARTIAL struct slab *partial; /* Partially allocated frozen slabs */ diff --git a/include/linux/types.h b/include/linux/types.h index 688fb943556a..becb8cd5916f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -10,6 +10,11 @@ #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] +#ifdef __SIZEOF_INT128__ +typedef __s128 s128; +typedef __u128 u128; +#endif + typedef u32 __kernel_dev_t; typedef __kernel_fd_set fd_set; diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 308433be33c2..6375a0684052 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -13,6 +13,10 @@ #include <linux/posix_types.h> +#ifdef __SIZEOF_INT128__ +typedef __signed__ __int128 __s128 __attribute__((aligned(16))); +typedef unsigned __int128 __u128 __attribute__((aligned(16))); +#endif /* * Below are truly Linux-specific types that should never collide with diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index a09f1c19336a..6ef0b35fc28c 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state) * In this we case we don't care about any concurrency/ordering. */ if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) - arch_atomic_set(&ct->state, state); + raw_atomic_set(&ct->state, state); } else { /* * Even if context tracking is disabled on this CPU, because it's outside @@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state) */ if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { /* Tracking for vtime only, no concurrent RCU EQS accounting */ - arch_atomic_set(&ct->state, state); + raw_atomic_set(&ct->state, state); } else { /* * Tracking for vtime and RCU EQS. Make sure we don't race @@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state) * RCU only requires RCU_DYNTICKS_IDX increments to be fully * ordered. */ - arch_atomic_add(state, &ct->state); + raw_atomic_add(state, &ct->state); } } } @@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state) * In this we case we don't care about any concurrency/ordering. */ if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) - arch_atomic_set(&ct->state, CONTEXT_KERNEL); + raw_atomic_set(&ct->state, CONTEXT_KERNEL); } else { if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { /* Tracking for vtime only, no concurrent RCU EQS accounting */ - arch_atomic_set(&ct->state, CONTEXT_KERNEL); + raw_atomic_set(&ct->state, CONTEXT_KERNEL); } else { /* * Tracking for vtime and RCU EQS. Make sure we don't race @@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state) * RCU only requires RCU_DYNTICKS_IDX increments to be fully * ordered. */ - arch_atomic_sub(state, &ct->state); + raw_atomic_sub(state, &ct->state); } } } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4dfd2f3e09b2..111607d91489 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -709,7 +709,7 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) usage[i] = '\0'; } -static void __print_lock_name(struct lock_class *class) +static void __print_lock_name(struct held_lock *hlock, struct lock_class *class) { char str[KSYM_NAME_LEN]; const char *name; @@ -724,17 +724,19 @@ static void __print_lock_name(struct lock_class *class) printk(KERN_CONT "#%d", class->name_version); if (class->subclass) printk(KERN_CONT "/%d", class->subclass); + if (hlock && class->print_fn) + class->print_fn(hlock->instance); } } -static void print_lock_name(struct lock_class *class) +static void print_lock_name(struct held_lock *hlock, struct lock_class *class) { char usage[LOCK_USAGE_CHARS]; get_usage_chars(class, usage); printk(KERN_CONT " ("); - __print_lock_name(class); + __print_lock_name(hlock, class); printk(KERN_CONT "){%s}-{%d:%d}", usage, class->wait_type_outer ?: class->wait_type_inner, class->wait_type_inner); @@ -772,7 +774,7 @@ static void print_lock(struct held_lock *hlock) } printk(KERN_CONT "%px", hlock->instance); - print_lock_name(lock); + print_lock_name(hlock, lock); printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); } @@ -1868,7 +1870,7 @@ print_circular_bug_entry(struct lock_list *target, int depth) if (debug_locks_silent) return; printk("\n-> #%u", depth); - print_lock_name(target->class); + print_lock_name(NULL, target->class); printk(KERN_CONT ":\n"); print_lock_trace(target->trace, 6); } @@ -1899,11 +1901,11 @@ print_circular_lock_scenario(struct held_lock *src, */ if (parent != source) { printk("Chain exists of:\n "); - __print_lock_name(source); + __print_lock_name(src, source); printk(KERN_CONT " --> "); - __print_lock_name(parent); + __print_lock_name(NULL, parent); printk(KERN_CONT " --> "); - __print_lock_name(target); + __print_lock_name(tgt, target); printk(KERN_CONT "\n\n"); } @@ -1914,13 +1916,13 @@ print_circular_lock_scenario(struct held_lock *src, printk(" rlock("); else printk(" lock("); - __print_lock_name(target); + __print_lock_name(tgt, target); printk(KERN_CONT ");\n"); printk(" lock("); - __print_lock_name(parent); + __print_lock_name(NULL, parent); printk(KERN_CONT ");\n"); printk(" lock("); - __print_lock_name(target); + __print_lock_name(tgt, target); printk(KERN_CONT ");\n"); if (src_read != 0) printk(" rlock("); @@ -1928,7 +1930,7 @@ print_circular_lock_scenario(struct held_lock *src, printk(" sync("); else printk(" lock("); - __print_lock_name(source); + __print_lock_name(src, source); printk(KERN_CONT ");\n"); printk("\n *** DEADLOCK ***\n\n"); } @@ -2154,6 +2156,8 @@ check_path(struct held_lock *target, struct lock_list *src_entry, return ret; } +static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *); + /* * Prove that the dependency graph starting at <src> can not * lead to <target>. If it can, there is a circle when adding @@ -2185,7 +2189,10 @@ check_noncircular(struct held_lock *src, struct held_lock *target, *trace = save_trace(); } - print_circular_bug(&src_entry, target_entry, src, target); + if (src->class_idx == target->class_idx) + print_deadlock_bug(current, src, target); + else + print_circular_bug(&src_entry, target_entry, src, target); } return ret; @@ -2346,7 +2353,7 @@ static void print_lock_class_header(struct lock_class *class, int depth) int bit; printk("%*s->", depth, ""); - print_lock_name(class); + print_lock_name(NULL, class); #ifdef CONFIG_DEBUG_LOCKDEP printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); #endif @@ -2528,11 +2535,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry, */ if (middle_class != unsafe_class) { printk("Chain exists of:\n "); - __print_lock_name(safe_class); + __print_lock_name(NULL, safe_class); printk(KERN_CONT " --> "); - __print_lock_name(middle_class); + __print_lock_name(NULL, middle_class); printk(KERN_CONT " --> "); - __print_lock_name(unsafe_class); + __print_lock_name(NULL, unsafe_class); printk(KERN_CONT "\n\n"); } @@ -2540,18 +2547,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry, printk(" CPU0 CPU1\n"); printk(" ---- ----\n"); printk(" lock("); - __print_lock_name(unsafe_class); + __print_lock_name(NULL, unsafe_class); printk(KERN_CONT ");\n"); printk(" local_irq_disable();\n"); printk(" lock("); - __print_lock_name(safe_class); + __print_lock_name(NULL, safe_class); printk(KERN_CONT ");\n"); printk(" lock("); - __print_lock_name(middle_class); + __print_lock_name(NULL, middle_class); printk(KERN_CONT ");\n"); printk(" <Interrupt>\n"); printk(" lock("); - __print_lock_name(safe_class); + __print_lock_name(NULL, safe_class); printk(KERN_CONT ");\n"); printk("\n *** DEADLOCK ***\n\n"); } @@ -2588,20 +2595,20 @@ print_bad_irq_dependency(struct task_struct *curr, pr_warn("\nand this task is already holding:\n"); print_lock(prev); pr_warn("which would create a new lock dependency:\n"); - print_lock_name(hlock_class(prev)); + print_lock_name(prev, hlock_class(prev)); pr_cont(" ->"); - print_lock_name(hlock_class(next)); + print_lock_name(next, hlock_class(next)); pr_cont("\n"); pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", irqclass); - print_lock_name(backwards_entry->class); + print_lock_name(NULL, backwards_entry->class); pr_warn("\n... which became %s-irq-safe at:\n", irqclass); print_lock_trace(backwards_entry->class->usage_traces[bit1], 1); pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); - print_lock_name(forwards_entry->class); + print_lock_name(NULL, forwards_entry->class); pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); pr_warn("..."); @@ -2971,10 +2978,10 @@ print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) printk(" CPU0\n"); printk(" ----\n"); printk(" lock("); - __print_lock_name(prev); + __print_lock_name(prv, prev); printk(KERN_CONT ");\n"); printk(" lock("); - __print_lock_name(next); + __print_lock_name(nxt, next); printk(KERN_CONT ");\n"); printk("\n *** DEADLOCK ***\n\n"); printk(" May be due to missing lock nesting notation\n\n"); @@ -2984,6 +2991,8 @@ static void print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { + struct lock_class *class = hlock_class(prev); + if (!debug_locks_off_graph_unlock() || debug_locks_silent) return; @@ -2998,6 +3007,11 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, pr_warn("\nbut task is already holding lock:\n"); print_lock(prev); + if (class->cmp_fn) { + pr_warn("and the lock comparison function returns %i:\n", + class->cmp_fn(prev->instance, next->instance)); + } + pr_warn("\nother info that might help us debug this:\n"); print_deadlock_scenario(next, prev); lockdep_print_held_locks(curr); @@ -3019,6 +3033,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, static int check_deadlock(struct task_struct *curr, struct held_lock *next) { + struct lock_class *class; struct held_lock *prev; struct held_lock *nest = NULL; int i; @@ -3039,6 +3054,12 @@ check_deadlock(struct task_struct *curr, struct held_lock *next) if ((next->read == 2) && prev->read) continue; + class = hlock_class(prev); + + if (class->cmp_fn && + class->cmp_fn(prev->instance, next->instance) < 0) + continue; + /* * We're holding the nest_lock, which serializes this lock's * nesting behaviour. @@ -3100,6 +3121,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, return 2; } + if (prev->class_idx == next->class_idx) { + struct lock_class *class = hlock_class(prev); + + if (class->cmp_fn && + class->cmp_fn(prev->instance, next->instance) < 0) + return 2; + } + /* * Prove that the new <prev> -> <next> dependency would not * create a circular dependency in the graph. (We do this by @@ -3576,7 +3605,7 @@ static void print_chain_keys_chain(struct lock_chain *chain) hlock_id = chain_hlocks[chain->base + i]; chain_key = print_chain_key_iteration(hlock_id, chain_key); - print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id)); + print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id)); printk("\n"); } } @@ -3933,11 +3962,11 @@ static void print_usage_bug_scenario(struct held_lock *lock) printk(" CPU0\n"); printk(" ----\n"); printk(" lock("); - __print_lock_name(class); + __print_lock_name(lock, class); printk(KERN_CONT ");\n"); printk(" <Interrupt>\n"); printk(" lock("); - __print_lock_name(class); + __print_lock_name(lock, class); printk(KERN_CONT ");\n"); printk("\n *** DEADLOCK ***\n\n"); } @@ -4023,7 +4052,7 @@ print_irq_inversion_bug(struct task_struct *curr, pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); else pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); - print_lock_name(other->class); + print_lock_name(NULL, other->class); pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); pr_warn("\nother info that might help us debug this:\n"); @@ -4896,6 +4925,33 @@ EXPORT_SYMBOL_GPL(lockdep_init_map_type); struct lock_class_key __lockdep_no_validate__; EXPORT_SYMBOL_GPL(__lockdep_no_validate__); +#ifdef CONFIG_PROVE_LOCKING +void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn, + lock_print_fn print_fn) +{ + struct lock_class *class = lock->class_cache[0]; + unsigned long flags; + + raw_local_irq_save(flags); + lockdep_recursion_inc(); + + if (!class) + class = register_lock_class(lock, 0, 0); + + if (class) { + WARN_ON(class->cmp_fn && class->cmp_fn != cmp_fn); + WARN_ON(class->print_fn && class->print_fn != print_fn); + + class->cmp_fn = cmp_fn; + class->print_fn = print_fn; + } + + lockdep_recursion_finish(); + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn); +#endif + static void print_lock_nested_lock_not_held(struct task_struct *curr, struct held_lock *hlock) diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 5a575a0ba4e6..3c6193de9cde 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -287,7 +287,7 @@ again: clock = wrap_max(clock, min_clock); clock = wrap_min(clock, max_clock); - if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock)) + if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock)) goto again; return clock; diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c index 771d82dc5f14..c40e5d913234 100644 --- a/lib/crypto/curve25519-hacl64.c +++ b/lib/crypto/curve25519-hacl64.c @@ -14,8 +14,6 @@ #include <crypto/curve25519.h> #include <linux/string.h> -typedef __uint128_t u128; - static __always_inline u64 u64_eq_mask(u64 a, u64 b) { u64 x = a ^ b; diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index d34cf4053668..988702c9b3b2 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -10,8 +10,6 @@ #include <asm/unaligned.h> #include <crypto/internal/poly1305.h> -typedef __uint128_t u128; - void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[POLY1305_BLOCK_SIZE]) { diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..bc36edd5ba4f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -6,6 +6,38 @@ */ void __init kmem_cache_init(void); +#ifdef CONFIG_64BIT +# ifdef system_has_cmpxchg128 +# define system_has_freelist_aba() system_has_cmpxchg128() +# define try_cmpxchg_freelist try_cmpxchg128 +# endif +#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 +typedef u128 freelist_full_t; +#else /* CONFIG_64BIT */ +# ifdef system_has_cmpxchg64 +# define system_has_freelist_aba() system_has_cmpxchg64() +# define try_cmpxchg_freelist try_cmpxchg64 +# endif +#define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 +typedef u64 freelist_full_t; +#endif /* CONFIG_64BIT */ + +#if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) +#undef system_has_freelist_aba +#endif + +/* + * Freelist pointer and counter to cmpxchg together, avoids the typical ABA + * problems with cmpxchg of just a pointer. + */ +typedef union { + struct { + void *freelist; + unsigned long counter; + }; + freelist_full_t full; +} freelist_aba_t; + /* Reuses the bits in struct page */ struct slab { unsigned long __page_flags; @@ -38,14 +70,21 @@ struct slab { #endif }; /* Double-word boundary */ - void *freelist; /* first free object */ union { - unsigned long counters; struct { - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; + void *freelist; /* first free object */ + union { + unsigned long counters; + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; }; +#ifdef system_has_freelist_aba + freelist_aba_t freelist_counter; +#endif }; }; struct rcu_head rcu_head; @@ -72,8 +111,8 @@ SLAB_MATCH(memcg_data, memcg_data); #endif #undef SLAB_MATCH static_assert(sizeof(struct slab) <= sizeof(struct page)); -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB) -static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *))); +#if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) +static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); #endif /** diff --git a/mm/slub.c b/mm/slub.c index c87628cd8a9a..7529626bbec2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -292,7 +292,12 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Poison object */ #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) /* Use cmpxchg_double */ + +#ifdef system_has_freelist_aba #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) +#else +#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) +#endif /* * Tracking user of a slab. @@ -512,6 +517,40 @@ static __always_inline void slab_unlock(struct slab *slab) __bit_spin_unlock(PG_locked, &page->flags); } +static inline bool +__update_freelist_fast(struct slab *slab, + void *freelist_old, unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ +#ifdef system_has_freelist_aba + freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; + freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; + + return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); +#else + return false; +#endif +} + +static inline bool +__update_freelist_slow(struct slab *slab, + void *freelist_old, unsigned long counters_old, + void *freelist_new, unsigned long counters_new) +{ + bool ret = false; + + slab_lock(slab); + if (slab->freelist == freelist_old && + slab->counters == counters_old) { + slab->freelist = freelist_new; + slab->counters = counters_new; + ret = true; + } + slab_unlock(slab); + + return ret; +} + /* * Interrupts must be disabled (for the fallback code to work right), typically * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is @@ -519,33 +558,25 @@ static __always_inline void slab_unlock(struct slab *slab) * allocation/ free operation in hardirq context. Therefore nothing can * interrupt the operation. */ -static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, +static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { + bool ret; + if (USE_LOCKLESS_FAST_PATH()) lockdep_assert_irqs_disabled(); -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&slab->freelist, &slab->counters, - freelist_old, counters_old, - freelist_new, counters_new)) - return true; - } else -#endif - { - slab_lock(slab); - if (slab->freelist == freelist_old && - slab->counters == counters_old) { - slab->freelist = freelist_new; - slab->counters = counters_new; - slab_unlock(slab); - return true; - } - slab_unlock(slab); + ret = __update_freelist_fast(slab, freelist_old, counters_old, + freelist_new, counters_new); + } else { + ret = __update_freelist_slow(slab, freelist_old, counters_old, + freelist_new, counters_new); } + if (likely(ret)) + return true; cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); @@ -557,36 +588,26 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab return false; } -static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, +static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) + bool ret; + if (s->flags & __CMPXCHG_DOUBLE) { - if (cmpxchg_double(&slab->freelist, &slab->counters, - freelist_old, counters_old, - freelist_new, counters_new)) - return true; - } else -#endif - { + ret = __update_freelist_fast(slab, freelist_old, counters_old, + freelist_new, counters_new); + } else { unsigned long flags; local_irq_save(flags); - slab_lock(slab); - if (slab->freelist == freelist_old && - slab->counters == counters_old) { - slab->freelist = freelist_new; - slab->counters = counters_new; - slab_unlock(slab); - local_irq_restore(flags); - return true; - } - slab_unlock(slab); + ret = __update_freelist_slow(slab, freelist_old, counters_old, + freelist_new, counters_new); local_irq_restore(flags); } + if (likely(ret)) + return true; cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); @@ -2228,7 +2249,7 @@ static inline void *acquire_slab(struct kmem_cache *s, VM_BUG_ON(new.frozen); new.frozen = 1; - if (!__cmpxchg_double_slab(s, slab, + if (!__slab_update_freelist(s, slab, freelist, counters, new.freelist, new.counters, "acquire_slab")) @@ -2554,7 +2575,7 @@ redo: } - if (!cmpxchg_double_slab(s, slab, + if (!slab_update_freelist(s, slab, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")) { @@ -2611,7 +2632,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) new.frozen = 0; - } while (!__cmpxchg_double_slab(s, slab, + } while (!__slab_update_freelist(s, slab, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")); @@ -3008,6 +3029,18 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) } #ifndef CONFIG_SLUB_TINY +static inline bool +__update_cpu_freelist_fast(struct kmem_cache *s, + void *freelist_old, void *freelist_new, + unsigned long tid) +{ + freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; + freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; + + return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, + &old.full, new.full); +} + /* * Check the slab->freelist and either transfer the freelist to the * per cpu freelist or deactivate the slab. @@ -3034,7 +3067,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) new.inuse = slab->objects; new.frozen = freelist != NULL; - } while (!__cmpxchg_double_slab(s, slab, + } while (!__slab_update_freelist(s, slab, freelist, counters, NULL, new.counters, "get_freelist")); @@ -3359,11 +3392,7 @@ redo: * against code executing on this cpu *not* from access by * other cpus. */ - if (unlikely(!this_cpu_cmpxchg_double( - s->cpu_slab->freelist, s->cpu_slab->tid, - object, tid, - next_object, next_tid(tid)))) { - + if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { note_cmpxchg_failure("slab_alloc", s, tid); goto redo; } @@ -3631,7 +3660,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, } } - } while (!cmpxchg_double_slab(s, slab, + } while (!slab_update_freelist(s, slab, prior, counters, head, new.counters, "__slab_free")); @@ -3736,11 +3765,7 @@ redo: set_freepointer(s, tail_obj, freelist); - if (unlikely(!this_cpu_cmpxchg_double( - s->cpu_slab->freelist, s->cpu_slab->tid, - freelist, tid, - head, next_tid(tid)))) { - + if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { note_cmpxchg_failure("slab_free", s, tid); goto redo; } @@ -4505,11 +4530,11 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) } } -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) +#ifdef system_has_freelist_aba + if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { /* Enable fast mode */ s->flags |= __CMPXCHG_DOUBLE; + } #endif /* diff --git a/scripts/atomic/atomic-tbl.sh b/scripts/atomic/atomic-tbl.sh index 81d5c32039dd..608ff39ebd8c 100755 --- a/scripts/atomic/atomic-tbl.sh +++ b/scripts/atomic/atomic-tbl.sh @@ -36,9 +36,16 @@ meta_has_relaxed() meta_in "$1" "BFIR" } -#find_fallback_template(pfx, name, sfx, order) -find_fallback_template() +#meta_is_implicitly_relaxed(meta) +meta_is_implicitly_relaxed() { + meta_in "$1" "vls" +} + +#find_template(tmpltype, pfx, name, sfx, order) +find_template() +{ + local tmpltype="$1"; shift local pfx="$1"; shift local name="$1"; shift local sfx="$1"; shift @@ -52,8 +59,8 @@ find_fallback_template() # # Start at the most specific, and fall back to the most general. Once # we find a specific fallback, don't bother looking for more. - for base in "${pfx}${name}${sfx}${order}" "${name}"; do - file="${ATOMICDIR}/fallbacks/${base}" + for base in "${pfx}${name}${sfx}${order}" "${pfx}${name}${sfx}" "${name}"; do + file="${ATOMICDIR}/${tmpltype}/${base}" if [ -f "${file}" ]; then printf "${file}" @@ -62,6 +69,18 @@ find_fallback_template() done } +#find_fallback_template(pfx, name, sfx, order) +find_fallback_template() +{ + find_template "fallbacks" "$@" +} + +#find_kerneldoc_template(pfx, name, sfx, order) +find_kerneldoc_template() +{ + find_template "kerneldoc" "$@" +} + #gen_ret_type(meta, int) gen_ret_type() { local meta="$1"; shift @@ -142,6 +161,91 @@ gen_args() done } +#gen_desc_return(meta) +gen_desc_return() +{ + local meta="$1"; shift + + case "${meta}" in + [v]) + printf "Return: Nothing." + ;; + [Ff]) + printf "Return: The original value of @v." + ;; + [R]) + printf "Return: The updated value of @v." + ;; + [l]) + printf "Return: The value of @v." + ;; + esac +} + +#gen_template_kerneldoc(template, class, meta, pfx, name, sfx, order, atomic, int, args...) +gen_template_kerneldoc() +{ + local template="$1"; shift + local class="$1"; shift + local meta="$1"; shift + local pfx="$1"; shift + local name="$1"; shift + local sfx="$1"; shift + local order="$1"; shift + local atomic="$1"; shift + local int="$1"; shift + + local atomicname="${atomic}_${pfx}${name}${sfx}${order}" + + local ret="$(gen_ret_type "${meta}" "${int}")" + local retstmt="$(gen_ret_stmt "${meta}")" + local params="$(gen_params "${int}" "${atomic}" "$@")" + local args="$(gen_args "$@")" + local desc_order="" + local desc_instrumentation="" + local desc_return="" + + if [ ! -z "${order}" ]; then + desc_order="${order##_}" + elif meta_is_implicitly_relaxed "${meta}"; then + desc_order="relaxed" + else + desc_order="full" + fi + + if [ -z "${class}" ]; then + desc_noinstr="Unsafe to use in noinstr code; use raw_${atomicname}() there." + else + desc_noinstr="Safe to use in noinstr code; prefer ${atomicname}() elsewhere." + fi + + desc_return="$(gen_desc_return "${meta}")" + + . ${template} +} + +#gen_kerneldoc(class, meta, pfx, name, sfx, order, atomic, int, args...) +gen_kerneldoc() +{ + local class="$1"; shift + local meta="$1"; shift + local pfx="$1"; shift + local name="$1"; shift + local sfx="$1"; shift + local order="$1"; shift + + local atomicname="${atomic}_${pfx}${name}${sfx}${order}" + + local tmpl="$(find_kerneldoc_template "${pfx}" "${name}" "${sfx}" "${order}")" + if [ -z "${tmpl}" ]; then + printf "/*\n" + printf " * No kerneldoc available for ${class}${atomicname}\n" + printf " */\n" + else + gen_template_kerneldoc "${tmpl}" "${class}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" + fi +} + #gen_proto_order_variants(meta, pfx, name, sfx, ...) gen_proto_order_variants() { diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl index 85ca8d9b5c27..903946cbf1b3 100644 --- a/scripts/atomic/atomics.tbl +++ b/scripts/atomic/atomics.tbl @@ -27,7 +27,7 @@ and vF i v andnot vF i v or vF i v xor vF i v -xchg I v i +xchg I v i:new cmpxchg I v i:old i:new try_cmpxchg B v p:old i:new sub_and_test b i v diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire index ef764085c79a..4da0cab3604e 100755 --- a/scripts/atomic/fallbacks/acquire +++ b/scripts/atomic/fallbacks/acquire @@ -1,9 +1,5 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}_acquire(${params}) -{ ${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args}); __atomic_acquire_fence(); return ret; -} EOF diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative index e5980abf5904..1d3d4ab3a9d2 100755 --- a/scripts/atomic/fallbacks/add_negative +++ b/scripts/atomic/fallbacks/add_negative @@ -1,15 +1,3 @@ cat <<EOF -/** - * arch_${atomic}_add_negative${order} - Add and test if negative - * @i: integer value to add - * @v: pointer of type ${atomic}_t - * - * Atomically adds @i to @v and returns true if the result is negative, - * or false when the result is greater than or equal to zero. - */ -static __always_inline bool -arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v) -{ - return arch_${atomic}_add_return${order}(i, v) < 0; -} + return raw_${atomic}_add_return${order}(i, v) < 0; EOF diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless index 9e5159c2ccfc..95ecb2b7405b 100755 --- a/scripts/atomic/fallbacks/add_unless +++ b/scripts/atomic/fallbacks/add_unless @@ -1,16 +1,3 @@ cat << EOF -/** - * arch_${atomic}_add_unless - add unless the number is already a given value - * @v: pointer of type ${atomic}_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, if @v was not already @u. - * Returns true if the addition was done. - */ -static __always_inline bool -arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) -{ - return arch_${atomic}_fetch_add_unless(v, a, u) != u; -} + return raw_${atomic}_fetch_add_unless(v, a, u) != u; EOF diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot index 5a42f54a3595..66760457e67a 100755 --- a/scripts/atomic/fallbacks/andnot +++ b/scripts/atomic/fallbacks/andnot @@ -1,7 +1,3 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) -{ - ${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v); -} + ${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v); EOF diff --git a/scripts/atomic/fallbacks/cmpxchg b/scripts/atomic/fallbacks/cmpxchg new file mode 100644 index 000000000000..1c8507f62e04 --- /dev/null +++ b/scripts/atomic/fallbacks/cmpxchg @@ -0,0 +1,3 @@ +cat <<EOF + return raw_cmpxchg${order}(&v->counter, old, new); +EOF diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec index 8c144c818e9e..60d286d40300 100755 --- a/scripts/atomic/fallbacks/dec +++ b/scripts/atomic/fallbacks/dec @@ -1,7 +1,3 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) -{ - ${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v); -} + ${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v); EOF diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test index 8549f359bd0e..3a0278e0ddd7 100755 --- a/scripts/atomic/fallbacks/dec_and_test +++ b/scripts/atomic/fallbacks/dec_and_test @@ -1,15 +1,3 @@ cat <<EOF -/** - * arch_${atomic}_dec_and_test - decrement and test - * @v: pointer of type ${atomic}_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __always_inline bool -arch_${atomic}_dec_and_test(${atomic}_t *v) -{ - return arch_${atomic}_dec_return(v) == 0; -} + return raw_${atomic}_dec_return(v) == 0; EOF diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive index 86bdced3428d..f65c11b4b85b 100755 --- a/scripts/atomic/fallbacks/dec_if_positive +++ b/scripts/atomic/fallbacks/dec_if_positive @@ -1,15 +1,11 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_dec_if_positive(${atomic}_t *v) -{ - ${int} dec, c = arch_${atomic}_read(v); + ${int} dec, c = raw_${atomic}_read(v); do { dec = c - 1; if (unlikely(dec < 0)) break; - } while (!arch_${atomic}_try_cmpxchg(v, &c, dec)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, dec)); return dec; -} EOF diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive index c531d5afecc4..d025361d7b85 100755 --- a/scripts/atomic/fallbacks/dec_unless_positive +++ b/scripts/atomic/fallbacks/dec_unless_positive @@ -1,14 +1,10 @@ cat <<EOF -static __always_inline bool -arch_${atomic}_dec_unless_positive(${atomic}_t *v) -{ - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c > 0)) return false; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c - 1)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1)); return true; -} EOF diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence index 07757d8e338e..40d5b397658f 100755 --- a/scripts/atomic/fallbacks/fence +++ b/scripts/atomic/fallbacks/fence @@ -1,11 +1,7 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}(${params}) -{ ${ret} ret; __atomic_pre_full_fence(); ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args}); __atomic_post_full_fence(); return ret; -} EOF diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless index 68ce13c8b9da..8db7e9e17fac 100755 --- a/scripts/atomic/fallbacks/fetch_add_unless +++ b/scripts/atomic/fallbacks/fetch_add_unless @@ -1,23 +1,10 @@ cat << EOF -/** - * arch_${atomic}_fetch_add_unless - add unless the number is already a given value - * @v: pointer of type ${atomic}_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns original value of @v - */ -static __always_inline ${int} -arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) -{ - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c == u)) break; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c + a)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c + a)); return c; -} EOF diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc index 3c2c3739169e..56c770f5919c 100755 --- a/scripts/atomic/fallbacks/inc +++ b/scripts/atomic/fallbacks/inc @@ -1,7 +1,3 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) -{ - ${retstmt}arch_${atomic}_${pfx}add${sfx}${order}(1, v); -} + ${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v); EOF diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test index 0cf23fe1efb8..7d16a10f2257 100755 --- a/scripts/atomic/fallbacks/inc_and_test +++ b/scripts/atomic/fallbacks/inc_and_test @@ -1,15 +1,3 @@ cat <<EOF -/** - * arch_${atomic}_inc_and_test - increment and test - * @v: pointer of type ${atomic}_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_${atomic}_inc_and_test(${atomic}_t *v) -{ - return arch_${atomic}_inc_return(v) == 0; -} + return raw_${atomic}_inc_return(v) == 0; EOF diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero index ed8a1f562667..1fcef1e55bc9 100755 --- a/scripts/atomic/fallbacks/inc_not_zero +++ b/scripts/atomic/fallbacks/inc_not_zero @@ -1,14 +1,3 @@ cat <<EOF -/** - * arch_${atomic}_inc_not_zero - increment unless the number is zero - * @v: pointer of type ${atomic}_t - * - * Atomically increments @v by 1, if @v is non-zero. - * Returns true if the increment was done. - */ -static __always_inline bool -arch_${atomic}_inc_not_zero(${atomic}_t *v) -{ - return arch_${atomic}_add_unless(v, 1, 0); -} + return raw_${atomic}_add_unless(v, 1, 0); EOF diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative index 95d8ce48233f..7b4b09868842 100755 --- a/scripts/atomic/fallbacks/inc_unless_negative +++ b/scripts/atomic/fallbacks/inc_unless_negative @@ -1,14 +1,10 @@ cat <<EOF -static __always_inline bool -arch_${atomic}_inc_unless_negative(${atomic}_t *v) -{ - ${int} c = arch_${atomic}_read(v); + ${int} c = raw_${atomic}_read(v); do { if (unlikely(c < 0)) return false; - } while (!arch_${atomic}_try_cmpxchg(v, &c, c + 1)); + } while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1)); return true; -} EOF diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire index a0ea1d26e6b2..e319862d2f1a 100755 --- a/scripts/atomic/fallbacks/read_acquire +++ b/scripts/atomic/fallbacks/read_acquire @@ -1,16 +1,12 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_read_acquire(const ${atomic}_t *v) -{ ${int} ret; if (__native_word(${atomic}_t)) { ret = smp_load_acquire(&(v)->counter); } else { - ret = arch_${atomic}_read(v); + ret = raw_${atomic}_read(v); __atomic_acquire_fence(); } return ret; -} EOF diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release index b46feb56d69c..1e6daf57b4ba 100755 --- a/scripts/atomic/fallbacks/release +++ b/scripts/atomic/fallbacks/release @@ -1,8 +1,4 @@ cat <<EOF -static __always_inline ${ret} -arch_${atomic}_${pfx}${name}${sfx}_release(${params}) -{ __atomic_release_fence(); ${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args}); -} EOF diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release index 05cdb7f42477..16a374ae6bb1 100755 --- a/scripts/atomic/fallbacks/set_release +++ b/scripts/atomic/fallbacks/set_release @@ -1,12 +1,8 @@ cat <<EOF -static __always_inline void -arch_${atomic}_set_release(${atomic}_t *v, ${int} i) -{ if (__native_word(${atomic}_t)) { smp_store_release(&(v)->counter, i); } else { __atomic_release_fence(); - arch_${atomic}_set(v, i); + raw_${atomic}_set(v, i); } -} EOF diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test index 260f37341c88..d1f746fe0ca4 100755 --- a/scripts/atomic/fallbacks/sub_and_test +++ b/scripts/atomic/fallbacks/sub_and_test @@ -1,16 +1,3 @@ cat <<EOF -/** - * arch_${atomic}_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type ${atomic}_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __always_inline bool -arch_${atomic}_sub_and_test(${int} i, ${atomic}_t *v) -{ - return arch_${atomic}_sub_return(i, v) == 0; -} + return raw_${atomic}_sub_return(i, v) == 0; EOF diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg index 890f850ede37..d4da82092baf 100755 --- a/scripts/atomic/fallbacks/try_cmpxchg +++ b/scripts/atomic/fallbacks/try_cmpxchg @@ -1,11 +1,7 @@ cat <<EOF -static __always_inline bool -arch_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) -{ ${int} r, o = *old; - r = arch_${atomic}_cmpxchg${order}(v, o, new); + r = raw_${atomic}_cmpxchg${order}(v, o, new); if (unlikely(r != o)) *old = r; return likely(r == o); -} EOF diff --git a/scripts/atomic/fallbacks/xchg b/scripts/atomic/fallbacks/xchg new file mode 100644 index 000000000000..e4def1e0d092 --- /dev/null +++ b/scripts/atomic/fallbacks/xchg @@ -0,0 +1,3 @@ +cat <<EOF + return raw_xchg${order}(&v->counter, new); +EOF diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh index 6e853f0dad8d..c0c8a85d7c81 100755 --- a/scripts/atomic/gen-atomic-fallback.sh +++ b/scripts/atomic/gen-atomic-fallback.sh @@ -17,23 +17,16 @@ gen_template_fallback() local atomic="$1"; shift local int="$1"; shift - local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}" - local ret="$(gen_ret_type "${meta}" "${int}")" local retstmt="$(gen_ret_stmt "${meta}")" local params="$(gen_params "${int}" "${atomic}" "$@")" local args="$(gen_args "$@")" - if [ ! -z "${template}" ]; then - printf "#ifndef ${atomicname}\n" - . ${template} - printf "#define ${atomicname} ${atomicname}\n" - printf "#endif\n\n" - fi + . ${template} } -#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) -gen_proto_fallback() +#gen_order_fallback(meta, pfx, name, sfx, order, atomic, int, args...) +gen_order_fallback() { local meta="$1"; shift local pfx="$1"; shift @@ -41,87 +34,124 @@ gen_proto_fallback() local sfx="$1"; shift local order="$1"; shift - local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" + local tmpl_order=${order#_} + local tmpl="${ATOMICDIR}/fallbacks/${tmpl_order:-fence}" gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" } -#gen_basic_fallbacks(basename) -gen_basic_fallbacks() -{ - local basename="$1"; shift -cat << EOF -#define ${basename}_acquire ${basename} -#define ${basename}_release ${basename} -#define ${basename}_relaxed ${basename} -EOF -} - -gen_proto_order_variant() +#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) +gen_proto_fallback() { local meta="$1"; shift local pfx="$1"; shift local name="$1"; shift local sfx="$1"; shift local order="$1"; shift - local atomic="$1" - local basename="arch_${atomic}_${pfx}${name}${sfx}" - - printf "#define ${basename}${order} ${basename}${order}\n" + local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" + gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@" } -#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...) -gen_proto_order_variants() +#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, args...) +gen_proto_order_variant() { local meta="$1"; shift local pfx="$1"; shift local name="$1"; shift local sfx="$1"; shift - local atomic="$1" + local order="$1"; shift + local atomic="$1"; shift + local int="$1"; shift - local basename="arch_${atomic}_${pfx}${name}${sfx}" + local atomicname="${atomic}_${pfx}${name}${sfx}${order}" + local basename="${atomic}_${pfx}${name}${sfx}" local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" - # If we don't have relaxed atomics, then we don't bother with ordering fallbacks - # read_acquire and set_release need to be templated, though - if ! meta_has_relaxed "${meta}"; then - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" + local ret="$(gen_ret_type "${meta}" "${int}")" + local retstmt="$(gen_ret_stmt "${meta}")" + local params="$(gen_params "${int}" "${atomic}" "$@")" + local args="$(gen_args "$@")" - if meta_has_acquire "${meta}"; then - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" - fi + gen_kerneldoc "raw_" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" + + printf "static __always_inline ${ret}\n" + printf "raw_${atomicname}(${params})\n" + printf "{\n" + + # Where there is no possible fallback, this order variant is mandatory + # and must be provided by arch code. Add a comment to the header to + # make this obvious. + # + # Ideally we'd error on a missing definition, but arch code might + # define this order variant as a C function without a preprocessor + # symbol. + if [ -z ${template} ] && [ -z "${order}" ] && ! meta_has_relaxed "${meta}"; then + printf "\t${retstmt}arch_${atomicname}(${args});\n" + printf "}\n\n" + return + fi - if meta_has_release "${meta}"; then - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" - fi + printf "#if defined(arch_${atomicname})\n" + printf "\t${retstmt}arch_${atomicname}(${args});\n" - return + # Allow FULL/ACQUIRE/RELEASE ops to be defined in terms of RELAXED ops + if [ "${order}" != "_relaxed" ] && meta_has_relaxed "${meta}"; then + printf "#elif defined(arch_${basename}_relaxed)\n" + gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" fi - printf "#ifndef ${basename}_relaxed\n" + # Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops + if [ ! -z "${order}" ]; then + printf "#elif defined(arch_${basename})\n" + printf "\t${retstmt}arch_${basename}(${args});\n" + fi + printf "#else\n" if [ ! -z "${template}" ]; then - printf "#ifdef ${basename}\n" + gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" + else + printf "#error \"Unable to define raw_${atomicname}\"\n" fi - gen_basic_fallbacks "${basename}" + printf "#endif\n" + printf "}\n\n" +} - if [ ! -z "${template}" ]; then - printf "#endif /* ${basename} */\n\n" - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" - gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@" + +#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...) +gen_proto_order_variants() +{ + local meta="$1"; shift + local pfx="$1"; shift + local name="$1"; shift + local sfx="$1"; shift + local atomic="$1" + + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" + + if meta_has_acquire "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" fi - printf "#else /* ${basename}_relaxed */\n\n" + if meta_has_release "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" + fi - gen_template_fallback "${ATOMICDIR}/fallbacks/acquire" "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" - gen_template_fallback "${ATOMICDIR}/fallbacks/release" "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" - gen_template_fallback "${ATOMICDIR}/fallbacks/fence" "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" + if meta_has_relaxed "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@" + fi +} - printf "#endif /* ${basename}_relaxed */\n\n" +#gen_basic_fallbacks(basename) +gen_basic_fallbacks() +{ + local basename="$1"; shift +cat << EOF +#define raw_${basename}_acquire arch_${basename} +#define raw_${basename}_release arch_${basename} +#define raw_${basename}_relaxed arch_${basename} +EOF } gen_order_fallbacks() @@ -130,36 +160,65 @@ gen_order_fallbacks() cat <<EOF -#ifndef ${xchg}_acquire -#define ${xchg}_acquire(...) \\ - __atomic_op_acquire(${xchg}, __VA_ARGS__) +#define raw_${xchg}_relaxed arch_${xchg}_relaxed + +#ifdef arch_${xchg}_acquire +#define raw_${xchg}_acquire arch_${xchg}_acquire +#else +#define raw_${xchg}_acquire(...) \\ + __atomic_op_acquire(arch_${xchg}, __VA_ARGS__) #endif -#ifndef ${xchg}_release -#define ${xchg}_release(...) \\ - __atomic_op_release(${xchg}, __VA_ARGS__) +#ifdef arch_${xchg}_release +#define raw_${xchg}_release arch_${xchg}_release +#else +#define raw_${xchg}_release(...) \\ + __atomic_op_release(arch_${xchg}, __VA_ARGS__) #endif -#ifndef ${xchg} -#define ${xchg}(...) \\ - __atomic_op_fence(${xchg}, __VA_ARGS__) +#ifdef arch_${xchg} +#define raw_${xchg} arch_${xchg} +#else +#define raw_${xchg}(...) \\ + __atomic_op_fence(arch_${xchg}, __VA_ARGS__) #endif EOF } -gen_xchg_fallbacks() +gen_xchg_order_fallback() { local xchg="$1"; shift - printf "#ifndef ${xchg}_relaxed\n" + local order="$1"; shift + local forder="${order:-_fence}" - gen_basic_fallbacks ${xchg} + printf "#if defined(arch_${xchg}${order})\n" + printf "#define raw_${xchg}${order} arch_${xchg}${order}\n" - printf "#else /* ${xchg}_relaxed */\n" + if [ "${order}" != "_relaxed" ]; then + printf "#elif defined(arch_${xchg}_relaxed)\n" + printf "#define raw_${xchg}${order}(...) \\\\\n" + printf " __atomic_op${forder}(arch_${xchg}, __VA_ARGS__)\n" + fi + + if [ ! -z "${order}" ]; then + printf "#elif defined(arch_${xchg})\n" + printf "#define raw_${xchg}${order} arch_${xchg}\n" + fi - gen_order_fallbacks ${xchg} + printf "#else\n" + printf "extern void raw_${xchg}${order}_not_implemented(void);\n" + printf "#define raw_${xchg}${order}(...) raw_${xchg}${order}_not_implemented()\n" + printf "#endif\n\n" +} + +gen_xchg_fallbacks() +{ + local xchg="$1"; shift - printf "#endif /* ${xchg}_relaxed */\n\n" + for order in "" "_acquire" "_release" "_relaxed"; do + gen_xchg_order_fallback "${xchg}" "${order}" + done } gen_try_cmpxchg_fallback() @@ -168,40 +227,61 @@ gen_try_cmpxchg_fallback() local order="$1"; shift; cat <<EOF -#ifndef arch_try_${cmpxchg}${order} -#define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\ +#define raw_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\ ({ \\ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\ - ___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\ + ___r = raw_${cmpxchg}${order}((_ptr), ___o, (_new)); \\ if (unlikely(___r != ___o)) \\ *___op = ___r; \\ likely(___r == ___o); \\ }) -#endif /* arch_try_${cmpxchg}${order} */ - EOF } -gen_try_cmpxchg_fallbacks() +gen_try_cmpxchg_order_fallback() { - local cmpxchg="$1"; shift; + local cmpxchg="$1"; shift + local order="$1"; shift + local forder="${order:-_fence}" - printf "#ifndef arch_try_${cmpxchg}_relaxed\n" - printf "#ifdef arch_try_${cmpxchg}\n" + printf "#if defined(arch_try_${cmpxchg}${order})\n" + printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}${order}\n" + + if [ "${order}" != "_relaxed" ]; then + printf "#elif defined(arch_try_${cmpxchg}_relaxed)\n" + printf "#define raw_try_${cmpxchg}${order}(...) \\\\\n" + printf " __atomic_op${forder}(arch_try_${cmpxchg}, __VA_ARGS__)\n" + fi - gen_basic_fallbacks "arch_try_${cmpxchg}" + if [ ! -z "${order}" ]; then + printf "#elif defined(arch_try_${cmpxchg})\n" + printf "#define raw_try_${cmpxchg}${order} arch_try_${cmpxchg}\n" + fi - printf "#endif /* arch_try_${cmpxchg} */\n\n" + printf "#else\n" + gen_try_cmpxchg_fallback "${cmpxchg}" "${order}" + printf "#endif\n\n" +} + +gen_try_cmpxchg_fallbacks() +{ + local cmpxchg="$1"; shift; for order in "" "_acquire" "_release" "_relaxed"; do - gen_try_cmpxchg_fallback "${cmpxchg}" "${order}" + gen_try_cmpxchg_order_fallback "${cmpxchg}" "${order}" done +} - printf "#else /* arch_try_${cmpxchg}_relaxed */\n" - - gen_order_fallbacks "arch_try_${cmpxchg}" +gen_cmpxchg_local_fallbacks() +{ + local cmpxchg="$1"; shift - printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n" + printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n" + printf "#ifdef arch_try_${cmpxchg}\n" + printf "#define raw_try_${cmpxchg} arch_try_${cmpxchg}\n" + printf "#else\n" + gen_try_cmpxchg_fallback "${cmpxchg}" "" + printf "#endif\n\n" } cat << EOF @@ -217,16 +297,20 @@ cat << EOF EOF -for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do +for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128"; do gen_xchg_fallbacks "${xchg}" done -for cmpxchg in "cmpxchg" "cmpxchg64"; do +for cmpxchg in "cmpxchg" "cmpxchg64" "cmpxchg128"; do gen_try_cmpxchg_fallbacks "${cmpxchg}" done -for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do - gen_try_cmpxchg_fallback "${cmpxchg}" "" +for cmpxchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local"; do + gen_cmpxchg_local_fallbacks "${cmpxchg}" "" +done + +for cmpxchg in "sync_cmpxchg"; do + printf "#define raw_${cmpxchg} arch_${cmpxchg}\n\n" done grep '^[a-z]' "$1" | while read name meta args; do diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index d9ffd74f73ca..8f8f8e3b20f9 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -68,12 +68,14 @@ gen_proto_order_variant() local args="$(gen_args "$@")" local retstmt="$(gen_ret_stmt "${meta}")" + gen_kerneldoc "" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@" + cat <<EOF static __always_inline ${ret} ${atomicname}(${params}) { ${checks} - ${retstmt}arch_${atomicname}(${args}); + ${retstmt}raw_${atomicname}(${args}); } EOF @@ -84,7 +86,6 @@ gen_xchg() { local xchg="$1"; shift local order="$1"; shift - local mult="$1"; shift kcsan_barrier="" if [ "${xchg%_local}" = "${xchg}" ]; then @@ -104,9 +105,9 @@ cat <<EOF EOF [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" cat <<EOF - instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ - instrument_read_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\ - arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \\ + raw_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\ }) EOF @@ -119,8 +120,8 @@ cat <<EOF EOF [ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n" cat <<EOF - instrument_atomic_read_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\ - arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \\ + raw_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\ }) EOF @@ -134,15 +135,10 @@ cat << EOF // DO NOT MODIFY THIS FILE DIRECTLY /* - * This file provides wrappers with KASAN instrumentation for atomic operations. - * To use this functionality an arch's atomic.h file needs to define all - * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include - * this file at the end. This file provides atomic_read() that forwards to - * arch_atomic_read() for actual atomic operation. - * Note: if an arch atomic operation is implemented by means of other atomic - * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use - * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid - * double instrumentation. + * This file provoides atomic operations with explicit instrumentation (e.g. + * KASAN, KCSAN), which should be used unless it is necessary to avoid + * instrumentation. Where it is necessary to aovid instrumenation, the + * raw_atomic*() operations should be used. */ #ifndef _LINUX_ATOMIC_INSTRUMENTED_H #define _LINUX_ATOMIC_INSTRUMENTED_H @@ -166,24 +162,18 @@ grep '^[a-z]' "$1" | while read name meta args; do done -for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do +for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do for order in "" "_acquire" "_release" "_relaxed"; do - gen_xchg "${xchg}" "${order}" "" + gen_xchg "${xchg}" "${order}" printf "\n" done done -for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" ; do - gen_xchg "${xchg}" "" "" +for xchg in "cmpxchg_local" "cmpxchg64_local" "cmpxchg128_local" "sync_cmpxchg" "try_cmpxchg_local" "try_cmpxchg64_local" "try_cmpxchg128_local"; do + gen_xchg "${xchg}" "" printf "\n" done -gen_xchg "cmpxchg_double" "" "2 * " - -printf "\n\n" - -gen_xchg "cmpxchg_double_local" "" "2 * " - cat <<EOF #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh index eda89cea6e1d..9826be3ba986 100755 --- a/scripts/atomic/gen-atomic-long.sh +++ b/scripts/atomic/gen-atomic-long.sh @@ -32,24 +32,34 @@ gen_args_cast() done } -#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) +#gen_proto_order_variant(meta, pfx, name, sfx, order, arg...) gen_proto_order_variant() { local meta="$1"; shift - local name="$1$2$3$4"; shift; shift; shift; shift - local atomic="$1"; shift - local int="$1"; shift + local pfx="$1"; shift + local name="$1"; shift + local sfx="$1"; shift + local order="$1"; shift + + local atomicname="${pfx}${name}${sfx}${order}" local ret="$(gen_ret_type "${meta}" "long")" local params="$(gen_params "long" "atomic_long" "$@")" - local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")" + local argscast_32="$(gen_args_cast "int" "atomic" "$@")" + local argscast_64="$(gen_args_cast "s64" "atomic64" "$@")" local retstmt="$(gen_ret_stmt "${meta}")" + gen_kerneldoc "raw_" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "atomic_long" "long" "$@" + cat <<EOF static __always_inline ${ret} -arch_atomic_long_${name}(${params}) +raw_atomic_long_${atomicname}(${params}) { - ${retstmt}arch_${atomic}_${name}(${argscast}); +#ifdef CONFIG_64BIT + ${retstmt}raw_atomic64_${atomicname}(${argscast_64}); +#else + ${retstmt}raw_atomic_${atomicname}(${argscast_32}); +#endif } EOF @@ -79,24 +89,12 @@ typedef atomic_t atomic_long_t; #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed #endif -#ifdef CONFIG_64BIT - -EOF - -grep '^[a-z]' "$1" | while read name meta args; do - gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} -done - -cat <<EOF -#else /* CONFIG_64BIT */ - EOF grep '^[a-z]' "$1" | while read name meta args; do - gen_proto "${meta}" "${name}" "atomic" "int" ${args} + gen_proto "${meta}" "${name}" ${args} done cat <<EOF -#endif /* CONFIG_64BIT */ #endif /* _LINUX_ATOMIC_LONG_H */ EOF diff --git a/scripts/atomic/kerneldoc/add b/scripts/atomic/kerneldoc/add new file mode 100644 index 000000000000..991f3dafceea --- /dev/null +++ b/scripts/atomic/kerneldoc/add @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic add with ${desc_order} ordering + * @i: ${int} value to add + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v + @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/add_negative b/scripts/atomic/kerneldoc/add_negative new file mode 100644 index 000000000000..f4ca1f05d1d8 --- /dev/null +++ b/scripts/atomic/kerneldoc/add_negative @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic add and test if negative with ${desc_order} ordering + * @i: ${int} value to add + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v + @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if the resulting value of @v is negative, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/add_unless b/scripts/atomic/kerneldoc/add_unless new file mode 100644 index 000000000000..f828e5f6750c --- /dev/null +++ b/scripts/atomic/kerneldoc/add_unless @@ -0,0 +1,18 @@ +if [ -z "${pfx}" ]; then + desc_return="Return: @true if @v was updated, @false otherwise." +fi + +cat <<EOF +/** + * ${class}${atomicname}() - atomic add unless value with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * @a: ${int} value to add + * @u: ${int} value to compare with + * + * If (@v != @u), atomically updates @v to (@v + @a) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/and b/scripts/atomic/kerneldoc/and new file mode 100644 index 000000000000..a923574351fc --- /dev/null +++ b/scripts/atomic/kerneldoc/and @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic bitwise AND with ${desc_order} ordering + * @i: ${int} value + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v & @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/andnot b/scripts/atomic/kerneldoc/andnot new file mode 100644 index 000000000000..64bb509f866b --- /dev/null +++ b/scripts/atomic/kerneldoc/andnot @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic bitwise AND NOT with ${desc_order} ordering + * @i: ${int} value + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v & ~@i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/cmpxchg b/scripts/atomic/kerneldoc/cmpxchg new file mode 100644 index 000000000000..3bce328f50cf --- /dev/null +++ b/scripts/atomic/kerneldoc/cmpxchg @@ -0,0 +1,14 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic compare and exchange with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * @old: ${int} value to compare with + * @new: ${int} value to assign + * + * If (@v == @old), atomically updates @v to @new with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: The original value of @v. + */ +EOF diff --git a/scripts/atomic/kerneldoc/dec b/scripts/atomic/kerneldoc/dec new file mode 100644 index 000000000000..bbeecbc4c20a --- /dev/null +++ b/scripts/atomic/kerneldoc/dec @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic decrement with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v - 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/dec_and_test b/scripts/atomic/kerneldoc/dec_and_test new file mode 100644 index 000000000000..71bbd23ce4bc --- /dev/null +++ b/scripts/atomic/kerneldoc/dec_and_test @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic decrement and test if zero with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v - 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/dec_if_positive b/scripts/atomic/kerneldoc/dec_if_positive new file mode 100644 index 000000000000..04f1aed3cf83 --- /dev/null +++ b/scripts/atomic/kerneldoc/dec_if_positive @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic decrement if positive with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * If (@v > 0), atomically updates @v to (@v - 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: The old value of (@v - 1), regardless of whether @v was updated. + */ +EOF diff --git a/scripts/atomic/kerneldoc/dec_unless_positive b/scripts/atomic/kerneldoc/dec_unless_positive new file mode 100644 index 000000000000..ee73612f0354 --- /dev/null +++ b/scripts/atomic/kerneldoc/dec_unless_positive @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic decrement unless positive with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * If (@v <= 0), atomically updates @v to (@v - 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if @v was updated, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/inc b/scripts/atomic/kerneldoc/inc new file mode 100644 index 000000000000..9f14f1b3d2ef --- /dev/null +++ b/scripts/atomic/kerneldoc/inc @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic increment with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v + 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/inc_and_test b/scripts/atomic/kerneldoc/inc_and_test new file mode 100644 index 000000000000..971694d59bbd --- /dev/null +++ b/scripts/atomic/kerneldoc/inc_and_test @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic increment and test if zero with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v + 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/inc_not_zero b/scripts/atomic/kerneldoc/inc_not_zero new file mode 100644 index 000000000000..618be08e653e --- /dev/null +++ b/scripts/atomic/kerneldoc/inc_not_zero @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic increment unless zero with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * If (@v != 0), atomically updates @v to (@v + 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if @v was updated, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/inc_unless_negative b/scripts/atomic/kerneldoc/inc_unless_negative new file mode 100644 index 000000000000..597f23d4dc8d --- /dev/null +++ b/scripts/atomic/kerneldoc/inc_unless_negative @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic increment unless negative with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * If (@v >= 0), atomically updates @v to (@v + 1) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if @v was updated, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/or b/scripts/atomic/kerneldoc/or new file mode 100644 index 000000000000..55b33de50416 --- /dev/null +++ b/scripts/atomic/kerneldoc/or @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic bitwise OR with ${desc_order} ordering + * @i: ${int} value + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v | @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/read b/scripts/atomic/kerneldoc/read new file mode 100644 index 000000000000..89fe6147c964 --- /dev/null +++ b/scripts/atomic/kerneldoc/read @@ -0,0 +1,12 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic load with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * + * Atomically loads the value of @v with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: The value loaded from @v. + */ +EOF diff --git a/scripts/atomic/kerneldoc/set b/scripts/atomic/kerneldoc/set new file mode 100644 index 000000000000..e82cb9ebbc42 --- /dev/null +++ b/scripts/atomic/kerneldoc/set @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic set with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * @i: ${int} value to assign + * + * Atomically sets @v to @i with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: Nothing. + */ +EOF diff --git a/scripts/atomic/kerneldoc/sub b/scripts/atomic/kerneldoc/sub new file mode 100644 index 000000000000..3ba642d04407 --- /dev/null +++ b/scripts/atomic/kerneldoc/sub @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic subtract with ${desc_order} ordering + * @i: ${int} value to subtract + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v - @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test new file mode 100644 index 000000000000..d3760f7749d4 --- /dev/null +++ b/scripts/atomic/kerneldoc/sub_and_test @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering + * @i: ${int} value to add + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v - @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: @true if the resulting value of @v is zero, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/try_cmpxchg b/scripts/atomic/kerneldoc/try_cmpxchg new file mode 100644 index 000000000000..296553206c06 --- /dev/null +++ b/scripts/atomic/kerneldoc/try_cmpxchg @@ -0,0 +1,15 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic compare and exchange with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * @old: pointer to ${int} value to compare with + * @new: ${int} value to assign + * + * If (@v == @old), atomically updates @v to @new with ${desc_order} ordering. + * Otherwise, updates @old to the current value of @v. + * + * ${desc_noinstr} + * + * Return: @true if the exchange occured, @false otherwise. + */ +EOF diff --git a/scripts/atomic/kerneldoc/xchg b/scripts/atomic/kerneldoc/xchg new file mode 100644 index 000000000000..75f04c085f25 --- /dev/null +++ b/scripts/atomic/kerneldoc/xchg @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic exchange with ${desc_order} ordering + * @v: pointer to ${atomic}_t + * @new: ${int} value to assign + * + * Atomically updates @v to @new with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * Return: The original value of @v. + */ +EOF diff --git a/scripts/atomic/kerneldoc/xor b/scripts/atomic/kerneldoc/xor new file mode 100644 index 000000000000..8837270f2806 --- /dev/null +++ b/scripts/atomic/kerneldoc/xor @@ -0,0 +1,13 @@ +cat <<EOF +/** + * ${class}${atomicname}() - atomic bitwise XOR with ${desc_order} ordering + * @i: ${int} value + * @v: pointer to ${atomic}_t + * + * Atomically updates @v to (@v ^ @i) with ${desc_order} ordering. + * + * ${desc_noinstr} + * + * ${desc_return} + */ +EOF diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 2486689ffc7b..eb70c1fd4e86 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -64,7 +64,7 @@ my $type_constant = '\b``([^\`]+)``\b'; my $type_constant2 = '\%([-_\w]+)'; my $type_func = '(\w+)\(\)'; my $type_param = '\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)'; -my $type_param_ref = '([\!]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)'; +my $type_param_ref = '([\!~]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)'; my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params my $type_fp_param2 = '\@(\w+->\S+)\(\)'; # Special RST handling for structs with func ptr params my $type_env = '(\$\w+)'; diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh index 131be76d2130..dfd186372f63 100755 --- a/scripts/min-tool-version.sh +++ b/scripts/min-tool-version.sh @@ -17,7 +17,11 @@ binutils) echo 2.25.0 ;; gcc) - echo 5.1.0 + if [ "$SRCARCH" = parisc ]; then + echo 11.0.0 + else + echo 5.1.0 + fi ;; llvm) if [ "$SRCARCH" = s390 ]; then |