diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-21 21:10:24 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-21 21:10:24 +0300 |
commit | 8838a1a2d219a86ab05e679c73f68dd75a25aca5 (patch) | |
tree | ec0d60fb3d798111ff88feffb4a2c23c227988f1 /rust/kernel | |
parent | b9d8a295ed6bc3cee846f8e3f27c9c6e9ebe43f2 (diff) | |
parent | cb4ccc70344c3dc29a5d0045361a4f0959bc5a6b (diff) | |
download | linux-8838a1a2d219a86ab05e679c73f68dd75a25aca5.tar.xz |
Merge tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Lockdep:
- Improve and fix lockdep bitsize limits, clarify the Kconfig
documentation (Carlos Llamas)
- Fix lockdep build warning on Clang related to
chain_hlock_class_idx() inlining (Andy Shevchenko)
- Relax the requirements of PROVE_RAW_LOCK_NESTING arch support by
not tying it to ARCH_SUPPORTS_RT unnecessarily (Waiman Long)
Rust integration:
- Support lock pointers managed by the C side (Lyude Paul)
- Support guard types (Lyude Paul)
- Update MAINTAINERS file filters to include the Rust locking code
(Boqun Feng)
Wake-queues:
- Add raw_spin_*wake() helpers to simplify locking code (John Stultz)
SMP cross-calls:
- Fix potential data update race by evaluating the local cond_func()
before IPI side-effects (Mathieu Desnoyers)
Guard primitives:
- Ease [c]tags based searches by including the cleanup/guard type
primitives (Peter Zijlstra)
ww_mutexes:
- Simplify the ww_mutex self-test code via swap() (Thorsten Blum)
Static calls:
- Update the static calls MAINTAINERS file-pattern (Jiri Slaby)"
* tag 'locking-core-2025-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
MAINTAINERS: Add static_call_inline.c to STATIC BRANCH/CALL
cleanup, tags: Create tags for the cleanup primitives
sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled
rust: sync: Add lock::Backend::assert_is_held()
rust: sync: Add SpinLockGuard type alias
rust: sync: Add MutexGuard type alias
rust: sync: Make Guard::new() public
rust: sync: Add Lock::from_raw() for Lock<(), B>
locking: MAINTAINERS: Start watching Rust locking primitives
lockdep: Move lockdep_assert_locked() under #ifdef CONFIG_PROVE_LOCKING
lockdep: Mark chain_hlock_class_idx() with __maybe_unused
lockdep: Document MAX_LOCKDEP_CHAIN_HLOCKS calculation
lockdep: Clarify size for LOCKDEP_*_BITS configs
lockdep: Fix upper limit for LOCKDEP_*_BITS configs
locking/ww_mutex/test: Use swap() macro
smp/scf: Evaluate local cond_func() before IPI side-effects
locking/lockdep: Enforce PROVE_RAW_LOCK_NESTING only if ARCH_SUPPORTS_RT
Diffstat (limited to 'rust/kernel')
-rw-r--r-- | rust/kernel/sync.rs | 4 | ||||
-rw-r--r-- | rust/kernel/sync/lock.rs | 35 | ||||
-rw-r--r-- | rust/kernel/sync/lock/mutex.rs | 13 | ||||
-rw-r--r-- | rust/kernel/sync/lock/spinlock.rs | 13 |
4 files changed, 62 insertions, 3 deletions
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 1eab7ebf25fd..dffdaad972ce 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -16,8 +16,8 @@ pub mod poll; pub use arc::{Arc, ArcBorrow, UniqueArc}; pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy}; -pub use lock::mutex::{new_mutex, Mutex}; -pub use lock::spinlock::{new_spinlock, SpinLock}; +pub use lock::mutex::{new_mutex, Mutex, MutexGuard}; +pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard}; pub use locked_by::LockedBy; /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 41dcddac69e2..eb80048e0110 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -90,12 +90,20 @@ pub unsafe trait Backend { // SAFETY: The safety requirements ensure that the lock is initialised. *guard_state = unsafe { Self::lock(ptr) }; } + + /// Asserts that the lock is held using lockdep. + /// + /// # Safety + /// + /// Callers must ensure that [`Backend::init`] has been previously called. + unsafe fn assert_is_held(ptr: *mut Self::State); } /// A mutual exclusion primitive. /// /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock /// [`Backend`] specified as the generic parameter `B`. +#[repr(C)] #[pin_data] pub struct Lock<T: ?Sized, B: Backend> { /// The kernel lock object. @@ -134,6 +142,28 @@ impl<T, B: Backend> Lock<T, B> { } } +impl<B: Backend> Lock<(), B> { + /// Constructs a [`Lock`] from a raw pointer. + /// + /// This can be useful for interacting with a lock which was initialised outside of Rust. + /// + /// # Safety + /// + /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during + /// the whole lifetime of `'a`. + /// + /// [`State`]: Backend::State + pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self { + // SAFETY: + // - By the safety contract `ptr` must point to a valid initialised instance of `B::State` + // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of + // the struct + // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to + // `B::State`. + unsafe { &*ptr.cast() } + } +} + impl<T: ?Sized, B: Backend> Lock<T, B> { /// Acquires the lock and gives the caller access to the data protected by it. pub fn lock(&self) -> Guard<'_, T, B> { @@ -211,7 +241,10 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { /// # Safety /// /// The caller must ensure that it owns the lock. - pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { + pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { + // SAFETY: The caller can only hold the lock if `Backend::init` has already been called. + unsafe { B::assert_is_held(lock.state.get()) }; + Self { lock, state, diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs index 0e946ebefce1..70cadbc2e8e2 100644 --- a/rust/kernel/sync/lock/mutex.rs +++ b/rust/kernel/sync/lock/mutex.rs @@ -86,6 +86,14 @@ pub use new_mutex; /// [`struct mutex`]: srctree/include/linux/mutex.h pub type Mutex<T> = super::Lock<T, MutexBackend>; +/// A [`Guard`] acquired from locking a [`Mutex`]. +/// +/// This is simply a type alias for a [`Guard`] returned from locking a [`Mutex`]. It will unlock +/// the [`Mutex`] upon being dropped. +/// +/// [`Guard`]: super::Guard +pub type MutexGuard<'a, T> = super::Guard<'a, T, MutexBackend>; + /// A kernel `struct mutex` lock backend. pub struct MutexBackend; @@ -126,4 +134,9 @@ unsafe impl super::Backend for MutexBackend { None } } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::mutex_assert_is_held(ptr) } + } } diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index 9f4d128bed98..ab2f8d075311 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -87,6 +87,14 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>; /// A kernel `spinlock_t` lock backend. pub struct SpinLockBackend; +/// A [`Guard`] acquired from locking a [`SpinLock`]. +/// +/// This is simply a type alias for a [`Guard`] returned from locking a [`SpinLock`]. It will unlock +/// the [`SpinLock`] upon being dropped. +/// +/// [`Guard`]: super::Guard +pub type SpinLockGuard<'a, T> = super::Guard<'a, T, SpinLockBackend>; + // SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the // default implementation that always calls the same locking method. unsafe impl super::Backend for SpinLockBackend { @@ -125,4 +133,9 @@ unsafe impl super::Backend for SpinLockBackend { None } } + + unsafe fn assert_is_held(ptr: *mut Self::State) { + // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use. + unsafe { bindings::spin_assert_is_held(ptr) } + } } |