From 8f8a55f49cda5fee914bbea1ab5af8df3a6ba8af Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:40:00 +0100 Subject: locking/seqlock: Support Clang's context analysis Add support for Clang's context analysis for seqlock_t. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251219154418.3592607-12-elver@google.com --- include/linux/seqlock.h | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 221123660e71..113320911a09 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -816,6 +816,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) do { \ spin_lock_init(&(sl)->lock); \ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ + __assume_ctx_lock(sl); \ } while (0) /** @@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) * Return: count, to be passed to read_seqretry() */ static inline unsigned read_seqbegin(const seqlock_t *sl) + __acquires_shared(sl) __no_context_analysis { return read_seqcount_begin(&sl->seqcount); } @@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) * Return: true if a read section retry is required, else false */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + __releases_shared(sl) __no_context_analysis { return read_seqcount_retry(&sl->seqcount, start); } @@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) * _irqsave or _bh variants of this function instead. */ static inline void write_seqlock(seqlock_t *sl) + __acquires(sl) __no_context_analysis { spin_lock(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); @@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl) * critical section of given seqlock_t. */ static inline void write_sequnlock(seqlock_t *sl) + __releases(sl) __no_context_analysis { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock(&sl->lock); @@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl) * other write side sections, can be invoked from softirq contexts. */ static inline void write_seqlock_bh(seqlock_t *sl) + __acquires(sl) __no_context_analysis { spin_lock_bh(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); @@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) * write_seqlock_bh(). */ static inline void write_sequnlock_bh(seqlock_t *sl) + __releases(sl) __no_context_analysis { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_bh(&sl->lock); @@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) * other write sections, can be invoked from hardirq contexts. */ static inline void write_seqlock_irq(seqlock_t *sl) + __acquires(sl) __no_context_analysis { spin_lock_irq(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); @@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl) * seqlock_t write side section opened with write_seqlock_irq(). */ static inline void write_sequnlock_irq(seqlock_t *sl) + __releases(sl) __no_context_analysis { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irq(&sl->lock); } static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) + __acquires(sl) __no_context_analysis { unsigned long flags; @@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) */ static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + __releases(sl) __no_context_analysis { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irqrestore(&sl->lock, flags); @@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) * The opened read section must be closed with read_sequnlock_excl(). */ static inline void read_seqlock_excl(seqlock_t *sl) + __acquires_shared(sl) __no_context_analysis { spin_lock(&sl->lock); } @@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl) * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl(seqlock_t *sl) + __releases_shared(sl) __no_context_analysis { spin_unlock(&sl->lock); } @@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl) * from softirq contexts. */ static inline void read_seqlock_excl_bh(seqlock_t *sl) + __acquires_shared(sl) __no_context_analysis { spin_lock_bh(&sl->lock); } @@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl) * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl_bh(seqlock_t *sl) + __releases_shared(sl) __no_context_analysis { spin_unlock_bh(&sl->lock); } @@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl) * hardirq context. */ static inline void read_seqlock_excl_irq(seqlock_t *sl) + __acquires_shared(sl) __no_context_analysis { spin_lock_irq(&sl->lock); } @@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl) * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl_irq(seqlock_t *sl) + __releases_shared(sl) __no_context_analysis { spin_unlock_irq(&sl->lock); } static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) + __acquires_shared(sl) __no_context_analysis { unsigned long flags; @@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) */ static inline void read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) + __releases_shared(sl) __no_context_analysis { spin_unlock_irqrestore(&sl->lock, flags); } @@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) * parameter of the next read_seqbegin_or_lock() iteration. */ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) + __acquires_shared(lock) __no_context_analysis { if (!(*seq & 1)) /* Even */ *seq = read_seqbegin(lock); @@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) * Return: true if a read section retry is required, false otherwise */ static inline int need_seqretry(seqlock_t *lock, int seq) + __releases_shared(lock) __no_context_analysis { return !(seq & 1) && read_seqretry(lock, seq); } @@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq) * with read_seqbegin_or_lock() and validated by need_seqretry(). */ static inline void done_seqretry(seqlock_t *lock, int seq) + __no_context_analysis { if (seq & 1) read_sequnlock_excl(lock); @@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq) */ static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) + __acquires_shared(lock) __no_context_analysis { unsigned long flags = 0; @@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) */ static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) + __no_context_analysis { if (seq & 1) read_sequnlock_excl_irqrestore(lock, flags); @@ -1225,6 +1249,7 @@ struct ss_tmp { }; static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) + __no_context_analysis { if (sst->lock) spin_unlock(sst->lock); @@ -1254,6 +1279,7 @@ extern void __scoped_seqlock_bug(void); static __always_inline void __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) + __no_context_analysis { switch (sst->state) { case ss_done: @@ -1296,9 +1322,19 @@ __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) } } +/* + * Context analysis no-op helper to release seqlock at the end of the for-scope; + * the alias analysis of the compiler will recognize that the pointer @s is an + * alias to @_seqlock passed to read_seqbegin(_seqlock) below. + */ +static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s) + __releases_shared(*((seqlock_t **)s)) __no_context_analysis {} + #define __scoped_seqlock_read(_seqlock, _target, _s) \ for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \ - { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \ + { .state = ss_lockless, .data = read_seqbegin(_seqlock) }, \ + *__UNIQUE_ID(ctx) __cleanup(__scoped_seqlock_cleanup_ctx) =\ + (struct ss_tmp *)_seqlock; \ _s.state != ss_done; \ __scoped_seqlock_next(&_s, _seqlock, _target)) -- cgit v1.2.3 From f88a31308db6a856229150039b0f56d59696ed31 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 23 Jan 2026 10:37:49 -0800 Subject: seqlock: fix scoped_seqlock_read kernel-doc Eliminate all kernel-doc warnings in seqlock.h: - correct the macro to have "()" immediately following the macro name - don't include the macro parameters in the short description (first line) - make the parameter names in the comments match the actual macro parameter names. - use "::" for the Example WARNING: include/linux/seqlock.h:1341 This comment starts with '/**', but isn't a kernel-doc comment. * scoped_seqlock_read (lock, ss_state) - execute the read side critical Documentation/locking/seqlock:242: include/linux/seqlock.h:1351: WARNING: Definition list ends without a blank line; unexpected unindent. [docutils] Warning: include/linux/seqlock.h:1357 function parameter '_seqlock' not described in 'scoped_seqlock_read' Warning: include/linux/seqlock.h:1357 function parameter '_target' not described in 'scoped_seqlock_read' Fixes: cc39f3872c08 ("seqlock: Introduce scoped_seqlock_read()") Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20260123183749.3997533-1-rdunlap@infradead.org --- include/linux/seqlock.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 113320911a09..c00063dffba3 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1339,15 +1339,14 @@ static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s) __scoped_seqlock_next(&_s, _seqlock, _target)) /** - * scoped_seqlock_read (lock, ss_state) - execute the read side critical - * section without manual sequence - * counter handling or calls to other - * helpers - * @lock: pointer to seqlock_t protecting the data - * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating - * the type of critical read section - * - * Example: + * scoped_seqlock_read() - execute the read-side critical section + * without manual sequence counter handling + * or calls to other helpers + * @_seqlock: pointer to seqlock_t protecting the data + * @_target: an enum ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} + * indicating the type of critical read section + * + * Example:: * * scoped_seqlock_read (&lock, ss_lock) { * // read-side critical section -- cgit v1.2.3 From d084a73714f818ce509022e1aa9483cabf797c16 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 19 Jan 2026 10:05:52 +0100 Subject: compiler-context-analysis: Introduce scoped init guards Add scoped init guard definitions for common synchronization primitives supported by context analysis. The scoped init guards treat the context as active within initialization scope of the underlying context lock, given initialization implies exclusive access to the underlying object. This allows initialization of guarded members without disabling context analysis, while documenting initialization from subsequent usage. The documentation is updated with the new recommendation. Where scoped init guards are not provided or cannot be implemented (ww_mutex omitted for lack of multi-arg guard initializers), the alternative is to just disable context analysis where guarded members are initialized. Suggested-by: Peter Zijlstra Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/20251212095943.GM3911114@noisy.programming.kicks-ass.net/ Link: https://patch.msgid.link/20260119094029.1344361-3-elver@google.com --- Documentation/dev-tools/context-analysis.rst | 30 +++++++++++++++++++++++++--- include/linux/compiler-context-analysis.h | 9 ++------- include/linux/local_lock.h | 8 ++++++++ include/linux/local_lock_internal.h | 1 + include/linux/mutex.h | 3 +++ include/linux/rwsem.h | 4 ++++ include/linux/seqlock.h | 5 +++++ include/linux/spinlock.h | 12 +++++++++++ lib/test_context-analysis.c | 16 +++++++-------- 9 files changed, 70 insertions(+), 18 deletions(-) (limited to 'include/linux/seqlock.h') diff --git a/Documentation/dev-tools/context-analysis.rst b/Documentation/dev-tools/context-analysis.rst index e69896e597b6..54d9ee28de98 100644 --- a/Documentation/dev-tools/context-analysis.rst +++ b/Documentation/dev-tools/context-analysis.rst @@ -83,9 +83,33 @@ Currently the following synchronization primitives are supported: `bit_spinlock`, RCU, SRCU (`srcu_struct`), `rw_semaphore`, `local_lock_t`, `ww_mutex`. -For context locks with an initialization function (e.g., `spin_lock_init()`), -calling this function before initializing any guarded members or globals -prevents the compiler from issuing warnings about unguarded initialization. +To initialize variables guarded by a context lock with an initialization +function (``type_init(&lock)``), prefer using ``guard(type_init)(&lock)`` or +``scoped_guard(type_init, &lock) { ... }`` to initialize such guarded members +or globals in the enclosing scope. This initializes the context lock and treats +the context as active within the initialization scope (initialization implies +exclusive access to the underlying object). + +For example:: + + struct my_data { + spinlock_t lock; + int counter __guarded_by(&lock); + }; + + void init_my_data(struct my_data *d) + { + ... + guard(spinlock_init)(&d->lock); + d->counter = 0; + ... + } + +Alternatively, initializing guarded variables can be done with context analysis +disabled, preferably in the smallest possible scope (due to lack of any other +checking): either with a ``context_unsafe(var = init)`` expression, or by +marking small initialization functions with the ``__context_unsafe(init)`` +attribute. Lockdep assertions, such as `lockdep_assert_held()`, inform the compiler's context analysis that the associated synchronization primitive is held after diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h index e86b8a3c2f89..00c074a2ccb0 100644 --- a/include/linux/compiler-context-analysis.h +++ b/include/linux/compiler-context-analysis.h @@ -32,13 +32,8 @@ /* * The "assert_capability" attribute is a bit confusingly named. It does not * generate a check. Instead, it tells the analysis to *assume* the capability - * is held. This is used for: - * - * 1. Augmenting runtime assertions, that can then help with patterns beyond the - * compiler's static reasoning abilities. - * - * 2. Initialization of context locks, so we can access guarded variables right - * after initialization (nothing else should access the same object yet). + * is held. This is used for augmenting runtime assertions, that can then help + * with patterns beyond the compiler's static reasoning abilities. */ # define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__))) # define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__))) diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 99c06e499375..b8830148a859 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -104,6 +104,8 @@ DEFINE_LOCK_GUARD_1(local_lock_nested_bh, local_lock_t __percpu, local_lock_nested_bh(_T->lock), local_unlock_nested_bh(_T->lock)) +DEFINE_LOCK_GUARD_1(local_lock_init, local_lock_t, local_lock_init(_T->lock), /* */) + DECLARE_LOCK_GUARD_1_ATTRS(local_lock, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) #define class_local_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock, _T) DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irq, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) @@ -112,5 +114,11 @@ DECLARE_LOCK_GUARD_1_ATTRS(local_lock_irqsave, __acquires(_T), __releases(*(loca #define class_local_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_irqsave, _T) DECLARE_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, __acquires(_T), __releases(*(local_lock_t __percpu **)_T)) #define class_local_lock_nested_bh_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_nested_bh, _T) +DECLARE_LOCK_GUARD_1_ATTRS(local_lock_init, __acquires(_T), __releases(*(local_lock_t **)_T)) +#define class_local_lock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_lock_init, _T) + +DEFINE_LOCK_GUARD_1(local_trylock_init, local_trylock_t, local_trylock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(local_trylock_init, __acquires(_T), __releases(*(local_trylock_t **)_T)) +#define class_local_trylock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(local_trylock_init, _T) #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 7843ab9059c2..ed2f3fb4c360 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #ifndef CONFIG_PREEMPT_RT diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 89977c215cbd..6b12009351d2 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -254,6 +254,7 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_a DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0) +DEFINE_LOCK_GUARD_1(mutex_init, struct mutex, mutex_init(_T->lock), /* */) DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T)) #define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T) @@ -261,6 +262,8 @@ DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex #define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T) DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T)) #define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T) +DECLARE_LOCK_GUARD_1_ATTRS(mutex_init, __acquires(_T), __releases(*(struct mutex **)_T)) +#define class_mutex_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_init, _T) extern unsigned long mutex_get_owner(struct mutex *lock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8da14a08a4e1..ea1bbdb57a47 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -280,6 +280,10 @@ DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_try, __acquires(_T), __releases(*(struct DECLARE_LOCK_GUARD_1_ATTRS(rwsem_write_kill, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) #define class_rwsem_write_kill_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_write_kill, _T) +DEFINE_LOCK_GUARD_1(rwsem_init, struct rw_semaphore, init_rwsem(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(rwsem_init, __acquires(_T), __releases(*(struct rw_semaphore **)_T)) +#define class_rwsem_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwsem_init, _T) + /* * downgrade write lock to read lock */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index c00063dffba3..077c8d5b2afd 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -1358,4 +1359,8 @@ static __always_inline void __scoped_seqlock_cleanup_ctx(struct ss_tmp **s) #define scoped_seqlock_read(_seqlock, _target) \ __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock)) +DEFINE_LOCK_GUARD_1(seqlock_init, seqlock_t, seqlock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(seqlock_init, __acquires(_T), __releases(*(seqlock_t **)_T)) +#define class_seqlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(seqlock_init, _T) + #endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 396b8c5d6c1b..7b11991c742a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -582,6 +582,10 @@ DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try, DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) #define class_raw_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_irqsave_try, _T) +DEFINE_LOCK_GUARD_1(raw_spinlock_init, raw_spinlock_t, raw_spin_lock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(raw_spinlock_init, __acquires(_T), __releases(*(raw_spinlock_t **)_T)) +#define class_raw_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(raw_spinlock_init, _T) + DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_lock(_T->lock), spin_unlock(_T->lock)) @@ -626,6 +630,10 @@ DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try, DECLARE_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, __acquires(_T), __releases(*(spinlock_t **)_T)) #define class_spinlock_irqsave_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_irqsave_try, _T) +DEFINE_LOCK_GUARD_1(spinlock_init, spinlock_t, spin_lock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(spinlock_init, __acquires(_T), __releases(*(spinlock_t **)_T)) +#define class_spinlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(spinlock_init, _T) + DEFINE_LOCK_GUARD_1(read_lock, rwlock_t, read_lock(_T->lock), read_unlock(_T->lock)) @@ -664,5 +672,9 @@ DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t, DECLARE_LOCK_GUARD_1_ATTRS(write_lock_irqsave, __acquires(_T), __releases(*(rwlock_t **)_T)) #define class_write_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(write_lock_irqsave, _T) +DEFINE_LOCK_GUARD_1(rwlock_init, rwlock_t, rwlock_init(_T->lock), /* */) +DECLARE_LOCK_GUARD_1_ATTRS(rwlock_init, __acquires(_T), __releases(*(rwlock_t **)_T)) +#define class_rwlock_init_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rwlock_init, _T) + #undef __LINUX_INSIDE_SPINLOCK_H #endif /* __LINUX_SPINLOCK_H */ diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c index 1c5a381461fc..0f05943d957f 100644 --- a/lib/test_context-analysis.c +++ b/lib/test_context-analysis.c @@ -35,7 +35,7 @@ static void __used test_common_helpers(void) }; \ static void __used test_##class##_init(struct test_##class##_data *d) \ { \ - type_init(&d->lock); \ + guard(type_init)(&d->lock); \ d->counter = 0; \ } \ static void __used test_##class(struct test_##class##_data *d) \ @@ -83,7 +83,7 @@ static void __used test_common_helpers(void) TEST_SPINLOCK_COMMON(raw_spinlock, raw_spinlock_t, - raw_spin_lock_init, + raw_spinlock_init, raw_spin_lock, raw_spin_unlock, raw_spin_trylock, @@ -109,7 +109,7 @@ static void __used test_raw_spinlock_trylock_extra(struct test_raw_spinlock_data TEST_SPINLOCK_COMMON(spinlock, spinlock_t, - spin_lock_init, + spinlock_init, spin_lock, spin_unlock, spin_trylock, @@ -163,7 +163,7 @@ struct test_mutex_data { static void __used test_mutex_init(struct test_mutex_data *d) { - mutex_init(&d->mtx); + guard(mutex_init)(&d->mtx); d->counter = 0; } @@ -226,7 +226,7 @@ struct test_seqlock_data { static void __used test_seqlock_init(struct test_seqlock_data *d) { - seqlock_init(&d->sl); + guard(seqlock_init)(&d->sl); d->counter = 0; } @@ -275,7 +275,7 @@ struct test_rwsem_data { static void __used test_rwsem_init(struct test_rwsem_data *d) { - init_rwsem(&d->sem); + guard(rwsem_init)(&d->sem); d->counter = 0; } @@ -475,7 +475,7 @@ static DEFINE_PER_CPU(struct test_local_lock_data, test_local_lock_data) = { static void __used test_local_lock_init(struct test_local_lock_data *d) { - local_lock_init(&d->lock); + guard(local_lock_init)(&d->lock); d->counter = 0; } @@ -519,7 +519,7 @@ static DEFINE_PER_CPU(struct test_local_trylock_data, test_local_trylock_data) = static void __used test_local_trylock_init(struct test_local_trylock_data *d) { - local_trylock_init(&d->lock); + guard(local_trylock_init)(&d->lock); d->counter = 0; } -- cgit v1.2.3 From b682b70d016f6aee20d91dcbaa319a932008a83a Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 19 Jan 2026 10:05:56 +0100 Subject: compiler-context-analysis: Remove __assume_ctx_lock from initializers Remove __assume_ctx_lock() from lock initializers. Implicitly asserting an active context during initialization caused false-positive double-lock errors when acquiring a lock immediately after its initialization. Moving forward, guarded member initialization must either: 1. Use guard(type_init)(&lock) or scoped_guard(type_init, ...). 2. Use context_unsafe() for simple initialization. Reported-by: Bart Van Assche Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/57062131-e79e-42c2-aa0b-8f931cb8cac2@acm.org/ Link: https://patch.msgid.link/20260119094029.1344361-7-elver@google.com --- include/linux/local_lock_internal.h | 3 --- include/linux/mutex.h | 1 - include/linux/rwlock.h | 3 +-- include/linux/rwlock_rt.h | 1 - include/linux/rwsem.h | 2 -- include/linux/seqlock.h | 1 - include/linux/spinlock.h | 5 +---- include/linux/spinlock_rt.h | 1 - include/linux/ww_mutex.h | 1 - lib/test_context-analysis.c | 6 ------ 10 files changed, 2 insertions(+), 22 deletions(-) (limited to 'include/linux/seqlock.h') diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ed2f3fb4c360..eff711bf973f 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -87,13 +87,11 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_PERCPU); \ local_lock_debug_init(lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __local_trylock_init(lock) \ do { \ __local_lock_init((local_lock_t *)lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __spinlock_nested_bh_init(lock) \ @@ -105,7 +103,6 @@ do { \ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ LD_LOCK_NORMAL); \ local_lock_debug_init(lock); \ - __assume_ctx_lock(lock); \ } while (0) #define __local_lock_acquire(lock) \ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 6b12009351d2..ecaa0440f6ec 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -62,7 +62,6 @@ do { \ static struct lock_class_key __key; \ \ __mutex_init((mutex), #mutex, &__key); \ - __assume_ctx_lock(mutex); \ } while (0) /** diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 65a5b55e1bcd..3390d21c95dd 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -22,11 +22,10 @@ do { \ static struct lock_class_key __key; \ \ __rwlock_init((lock), #lock, &__key); \ - __assume_ctx_lock(lock); \ } while (0) #else # define rwlock_init(lock) \ - do { *(lock) = __RW_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0) + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index 37b387dcab21..5353abbfdc0b 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -22,7 +22,6 @@ do { \ \ init_rwbase_rt(&(rwl)->rwbase); \ __rt_rwlock_init(rwl, #rwl, &__key); \ - __assume_ctx_lock(rwl); \ } while (0) extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ea1bbdb57a47..9bf1d93d3d7b 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -121,7 +121,6 @@ do { \ static struct lock_class_key __key; \ \ __init_rwsem((sem), #sem, &__key); \ - __assume_ctx_lock(sem); \ } while (0) /* @@ -175,7 +174,6 @@ do { \ static struct lock_class_key __key; \ \ __init_rwsem((sem), #sem, &__key); \ - __assume_ctx_lock(sem); \ } while (0) static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 077c8d5b2afd..5a40252b8334 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -817,7 +817,6 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) do { \ spin_lock_init(&(sl)->lock); \ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ - __assume_ctx_lock(sl); \ } while (0) /** diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7b11991c742a..e1e2f144af9b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -106,12 +106,11 @@ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ - __assume_ctx_lock(lock); \ } while (0) #else # define raw_spin_lock_init(lock) \ - do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0) + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) #endif #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) @@ -324,7 +323,6 @@ do { \ \ __raw_spin_lock_init(spinlock_check(lock), \ #lock, &__key, LD_WAIT_CONFIG); \ - __assume_ctx_lock(lock); \ } while (0) #else @@ -333,7 +331,6 @@ do { \ do { \ spinlock_check(_lock); \ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ - __assume_ctx_lock(_lock); \ } while (0) #endif diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 0a585768358f..373618a4243c 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -20,7 +20,6 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, do { \ rt_mutex_base_init(&(slock)->lock); \ __rt_spin_lock_init(slock, name, key, percpu); \ - __assume_ctx_lock(slock); \ } while (0) #define _spin_lock_init(slock, percpu) \ diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 58e959ee10e9..c47d4b9b88b3 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -107,7 +107,6 @@ context_lock_struct(ww_acquire_ctx) { */ static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) - __assumes_ctx_lock(lock) { ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c index 0f05943d957f..140efa8a9763 100644 --- a/lib/test_context-analysis.c +++ b/lib/test_context-analysis.c @@ -542,12 +542,6 @@ struct test_ww_mutex_data { int counter __guarded_by(&mtx); }; -static void __used test_ww_mutex_init(struct test_ww_mutex_data *d) -{ - ww_mutex_init(&d->mtx, &ww_class); - d->counter = 0; -} - static void __used test_ww_mutex_lock_noctx(struct test_ww_mutex_data *d) { if (!ww_mutex_lock(&d->mtx, NULL)) { -- cgit v1.2.3