diff options
Diffstat (limited to 'rust')
| -rw-r--r-- | rust/Makefile | 2 | ||||
| -rw-r--r-- | rust/kernel/alloc/kvec.rs | 216 | ||||
| -rw-r--r-- | rust/kernel/drm/device.rs | 22 | ||||
| -rw-r--r-- | rust/kernel/drm/gem/mod.rs | 13 | ||||
| -rw-r--r-- | rust/kernel/drm/gem/shmem.rs | 6 | ||||
| -rw-r--r-- | rust/kernel/sync/aref.rs | 22 | ||||
| -rw-r--r-- | rust/kernel/task.rs | 9 | ||||
| -rw-r--r-- | rust/kernel/uaccess.rs | 2 | ||||
| -rw-r--r-- | rust/pin-init/internal/src/init.rs | 184 | ||||
| -rw-r--r-- | rust/pin-init/src/__internal.rs | 28 |
10 files changed, 381 insertions, 123 deletions
diff --git a/rust/Makefile b/rust/Makefile index b361bfedfdf0..b9e9f512cec3 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -403,6 +403,8 @@ BINDGEN_TARGET_x86 := x86_64-linux-gnu BINDGEN_TARGET_arm64 := aarch64-linux-gnu BINDGEN_TARGET_arm := arm-linux-gnueabi BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf +# This is only for i386 UM builds, which need the 32-bit target not -m32 +BINDGEN_TARGET_i386 := i386-linux-gnu BINDGEN_TARGET_um := $(BINDGEN_TARGET_$(SUBARCH)) BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH)) diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index ac8d6f763ae8..6438385e4322 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -9,7 +9,10 @@ use super::{ }; use crate::{ fmt, - page::AsPageIter, // + page::{ + AsPageIter, + PAGE_SIZE, // + }, }; use core::{ borrow::{Borrow, BorrowMut}, @@ -734,6 +737,115 @@ where self.truncate(num_kept); } } +// TODO: This is a temporary KVVec-specific implementation. It should be replaced with a generic +// `shrink_to()` for `impl<T, A: Allocator> Vec<T, A>` that uses `A::realloc()` once the +// underlying allocators properly support shrinking via realloc. +impl<T> Vec<T, KVmalloc> { + /// Shrinks the capacity of the vector with a lower bound. + /// + /// The capacity will remain at least as large as both the length and the supplied value. + /// If the current capacity is less than the lower limit, this is a no-op. + /// + /// For `kmalloc` allocations, this delegates to `realloc()`, which decides whether + /// shrinking is worthwhile. For `vmalloc` allocations, shrinking only occurs if the + /// operation would free at least one page of memory, and performs a deep copy since + /// `vrealloc` does not yet support in-place shrinking. + /// + /// # Examples + /// + /// ``` + /// // Allocate enough capacity to span multiple pages. + /// let elements_per_page = kernel::page::PAGE_SIZE / core::mem::size_of::<u32>(); + /// let mut v = KVVec::with_capacity(elements_per_page * 4, GFP_KERNEL)?; + /// v.push(1, GFP_KERNEL)?; + /// v.push(2, GFP_KERNEL)?; + /// + /// v.shrink_to(0, GFP_KERNEL)?; + /// # Ok::<(), Error>(()) + /// ``` + pub fn shrink_to(&mut self, min_capacity: usize, flags: Flags) -> Result<(), AllocError> { + let target_cap = core::cmp::max(self.len(), min_capacity); + + if self.capacity() <= target_cap { + return Ok(()); + } + + if Self::is_zst() { + return Ok(()); + } + + // For kmalloc allocations, delegate to realloc() and let the allocator decide + // whether shrinking is worthwhile. + // + // SAFETY: `self.ptr` points to a valid `KVmalloc` allocation. + if !unsafe { bindings::is_vmalloc_addr(self.ptr.as_ptr().cast()) } { + let new_layout = ArrayLayout::<T>::new(target_cap).map_err(|_| AllocError)?; + + // SAFETY: + // - `self.ptr` is valid and was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + let ptr = unsafe { + KVmalloc::realloc( + Some(self.ptr.cast()), + new_layout.into(), + self.layout.into(), + flags, + NumaNode::NO_NODE, + )? + }; + + self.ptr = ptr.cast(); + self.layout = new_layout; + return Ok(()); + } + + // Only shrink if we would free at least one page. + let current_size = self.capacity() * core::mem::size_of::<T>(); + let target_size = target_cap * core::mem::size_of::<T>(); + let current_pages = current_size.div_ceil(PAGE_SIZE); + let target_pages = target_size.div_ceil(PAGE_SIZE); + + if current_pages <= target_pages { + return Ok(()); + } + + if target_cap == 0 { + if !self.layout.is_empty() { + // SAFETY: + // - `self.ptr` was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) }; + } + self.ptr = NonNull::dangling(); + self.layout = ArrayLayout::empty(); + return Ok(()); + } + + // SAFETY: `target_cap <= self.capacity()` and original capacity was valid. + let new_layout = unsafe { ArrayLayout::<T>::new_unchecked(target_cap) }; + + let new_ptr = KVmalloc::alloc(new_layout.into(), flags, NumaNode::NO_NODE)?; + + // SAFETY: + // - `self.as_ptr()` is valid for reads of `self.len()` elements of `T`. + // - `new_ptr` is valid for writes of at least `target_cap >= self.len()` elements. + // - The two allocations do not overlap since `new_ptr` is freshly allocated. + // - Both pointers are properly aligned for `T`. + unsafe { + ptr::copy_nonoverlapping(self.as_ptr(), new_ptr.as_ptr().cast::<T>(), self.len()) + }; + + // SAFETY: + // - `self.ptr` was previously allocated with `KVmalloc`. + // - `self.layout` matches the `ArrayLayout` of the preceding allocation. + unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) }; + + self.ptr = new_ptr.cast::<T>(); + self.layout = new_layout; + + Ok(()) + } +} impl<T: Clone, A: Allocator> Vec<T, A> { /// Extend the vector by `n` clones of `value`. @@ -1398,4 +1510,106 @@ mod tests { func.push_within_capacity(false).unwrap(); } } + + #[test] + fn test_kvvec_shrink_to() { + use crate::page::PAGE_SIZE; + + // Create a vector with capacity spanning multiple pages. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + // Add a few elements. + v.push(1, GFP_KERNEL).unwrap(); + v.push(2, GFP_KERNEL).unwrap(); + v.push(3, GFP_KERNEL).unwrap(); + + let initial_capacity = v.capacity(); + assert!(initial_capacity >= PAGE_SIZE * 4); + + // Shrink to a capacity that would free at least one page. + v.shrink_to(PAGE_SIZE, GFP_KERNEL).unwrap(); + + // Capacity should have been reduced. + assert!(v.capacity() < initial_capacity); + assert!(v.capacity() >= PAGE_SIZE); + + // Elements should be preserved. + assert_eq!(v.len(), 3); + assert_eq!(v[0], 1); + assert_eq!(v[1], 2); + assert_eq!(v[2], 3); + + // Shrink to zero (should shrink to len). + v.shrink_to(0, GFP_KERNEL).unwrap(); + + // Capacity should be at least the length. + assert!(v.capacity() >= v.len()); + + // Elements should still be preserved. + assert_eq!(v.len(), 3); + assert_eq!(v[0], 1); + assert_eq!(v[1], 2); + assert_eq!(v[2], 3); + } + + #[test] + fn test_kvvec_shrink_to_empty() { + use crate::page::PAGE_SIZE; + + // Create a vector with large capacity but no elements. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + assert!(v.is_empty()); + + // Shrink empty vector to zero. + v.shrink_to(0, GFP_KERNEL).unwrap(); + + // Should have freed the allocation. + assert_eq!(v.capacity(), 0); + assert!(v.is_empty()); + } + + #[test] + fn test_kvvec_shrink_to_no_op() { + use crate::page::PAGE_SIZE; + + // Create a small vector. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE, GFP_KERNEL).unwrap(); + v.push(1, GFP_KERNEL).unwrap(); + + let capacity_before = v.capacity(); + + // Try to shrink to a capacity larger than current - should be no-op. + v.shrink_to(capacity_before + 100, GFP_KERNEL).unwrap(); + + assert_eq!(v.capacity(), capacity_before); + assert_eq!(v.len(), 1); + assert_eq!(v[0], 1); + } + + #[test] + fn test_kvvec_shrink_to_respects_min_capacity() { + use crate::page::PAGE_SIZE; + + // Create a vector with large capacity. + let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap(); + + // Add some elements. + for i in 0..10u8 { + v.push(i, GFP_KERNEL).unwrap(); + } + + // Shrink to a min_capacity larger than length. + let min_cap = PAGE_SIZE * 2; + v.shrink_to(min_cap, GFP_KERNEL).unwrap(); + + // Capacity should be at least min_capacity. + assert!(v.capacity() >= min_cap); + + // All elements preserved. + assert_eq!(v.len(), 10); + for i in 0..10u8 { + assert_eq!(v[i as usize], i); + } + } } diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs index adbafe8db54d..403fc35353c7 100644 --- a/rust/kernel/drm/device.rs +++ b/rust/kernel/drm/device.rs @@ -119,13 +119,20 @@ impl<T: drm::Driver> Device<T> { // compatible `Layout`. let layout = Kmalloc::aligned_layout(Layout::new::<Self>()); + // Use a temporary vtable without a `release` callback until `data` is initialized, so + // init failure can release the DRM device without dropping uninitialized fields. + let alloc_vtable = bindings::drm_driver { + release: None, + ..Self::VTABLE + }; + // SAFETY: - // - `VTABLE`, as a `const` is pinned to the read-only section of the compilation, + // - `alloc_vtable` reference remains valid until no longer used, // - `dev` is valid by its type invarants, let raw_drm: *mut Self = unsafe { bindings::__drm_dev_alloc( dev.as_raw(), - &Self::VTABLE, + &alloc_vtable, layout.size(), mem::offset_of!(Self, dev), ) @@ -133,6 +140,10 @@ impl<T: drm::Driver> Device<T> { .cast(); let raw_drm = NonNull::new(from_err_ptr(raw_drm)?).ok_or(ENOMEM)?; + // SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was + // successful. + let drm_dev = unsafe { Self::into_drm_device(raw_drm) }; + // SAFETY: `raw_drm` is a valid pointer to `Self`. let raw_data = unsafe { ptr::addr_of_mut!((*raw_drm.as_ptr()).data) }; @@ -140,15 +151,14 @@ impl<T: drm::Driver> Device<T> { // - `raw_data` is a valid pointer to uninitialized memory. // - `raw_data` will not move until it is dropped. unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| { - // SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was - // successful. - let drm_dev = unsafe { Self::into_drm_device(raw_drm) }; - // SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the // refcount must be non-zero. unsafe { bindings::drm_dev_put(drm_dev) }; })?; + // SAFETY: `drm_dev` is still private to this function. + unsafe { (*drm_dev).driver = const { &Self::VTABLE } }; + // SAFETY: The reference count is one, and now we take ownership of that reference as a // `drm::Device`. Ok(unsafe { ARef::from_raw(raw_drm) }) diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs index 75acda7ba500..01b5bd47a333 100644 --- a/rust/kernel/drm/gem/mod.rs +++ b/rust/kernel/drm/gem/mod.rs @@ -277,8 +277,17 @@ impl<T: DriverObject> Object<T> { // SAFETY: `obj.as_raw()` is guaranteed to be valid by the initialization above. unsafe { (*obj.as_raw()).funcs = &Self::OBJECT_FUNCS }; - // SAFETY: The arguments are all valid per the type invariants. - to_result(unsafe { bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) })?; + if let Err(err) = + // SAFETY: The arguments are all valid per the type invariants. + to_result(unsafe { + bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) + }) + { + // SAFETY: `drm_gem_object_init()` initializes the private GEM object state before + // failing, so `drm_gem_private_object_fini()` is the matching cleanup. + unsafe { bindings::drm_gem_private_object_fini(obj.obj.get()) }; + return Err(err); + } // SAFETY: We will never move out of `Self` as `ARef<Self>` is always treated as pinned. let ptr = KBox::into_raw(unsafe { Pin::into_inner_unchecked(obj) }); diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs index d025fb035195..e1b648920d2f 100644 --- a/rust/kernel/drm/gem/shmem.rs +++ b/rust/kernel/drm/gem/shmem.rs @@ -19,10 +19,8 @@ use crate::{ }, error::to_result, prelude::*, - types::{ - ARef, - Opaque, // - }, // + sync::aref::ARef, + types::Opaque, // }; use core::{ ops::{ diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs index 0616c0353c2b..9989f56d0605 100644 --- a/rust/kernel/sync/aref.rs +++ b/rust/kernel/sync/aref.rs @@ -170,3 +170,25 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> { unsafe { T::dec_ref(self.ptr) }; } } + +impl<T, U> PartialEq<ARef<U>> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, + U: AlwaysRefCounted, +{ + #[inline] + fn eq(&self, other: &ARef<U>) -> bool { + T::eq(&**self, &**other) + } +} +impl<T: AlwaysRefCounted + Eq> Eq for ARef<T> {} + +impl<T, U> PartialEq<&'_ U> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, +{ + #[inline] + fn eq(&self, other: &&U) -> bool { + T::eq(&**self, other) + } +} diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index 049c8a4d45d8..38273f4eedb5 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -361,6 +361,15 @@ unsafe impl crate::sync::aref::AlwaysRefCounted for Task { } } +impl PartialEq for Task { + #[inline] + fn eq(&self, other: &Self) -> bool { + ptr::eq(self.as_ptr(), other.as_ptr()) + } +} + +impl Eq for Task {} + impl Kuid { /// Get the current euid. #[inline] diff --git a/rust/kernel/uaccess.rs b/rust/kernel/uaccess.rs index 6c9c1cce3c63..5f6c4d7a1a51 100644 --- a/rust/kernel/uaccess.rs +++ b/rust/kernel/uaccess.rs @@ -21,7 +21,7 @@ use core::mem::{size_of, MaybeUninit}; /// /// This is the Rust equivalent to C pointers tagged with `__user`. #[repr(transparent)] -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Zeroable)] pub struct UserPtr(*mut c_void); impl UserPtr { diff --git a/rust/pin-init/internal/src/init.rs b/rust/pin-init/internal/src/init.rs index daa3f1c6466e..487ee0013faf 100644 --- a/rust/pin-init/internal/src/init.rs +++ b/rust/pin-init/internal/src/init.rs @@ -249,22 +249,6 @@ fn init_fields( }); // Again span for better diagnostics let write = quote_spanned!(ident.span()=> ::core::ptr::write); - // NOTE: the field accessor ensures that the initialized field is properly aligned. - // Unaligned fields will cause the compiler to emit E0793. We do not support - // unaligned fields since `Init::__init` requires an aligned pointer; the call to - // `ptr::write` below has the same requirement. - let accessor = if pinned { - let project_ident = format_ident!("__project_{ident}"); - quote! { - // SAFETY: TODO - unsafe { #data.#project_ident(&mut (*#slot).#ident) } - } - } else { - quote! { - // SAFETY: TODO - unsafe { &mut (*#slot).#ident } - } - }; quote! { #(#attrs)* { @@ -272,51 +256,31 @@ fn init_fields( // SAFETY: TODO unsafe { #write(&raw mut (*#slot).#ident, #value_ident) }; } - #(#cfgs)* - #[allow(unused_variables)] - let #ident = #accessor; } } InitializerKind::Init { ident, value, .. } => { // Again span for better diagnostics let init = format_ident!("init", span = value.span()); - // NOTE: the field accessor ensures that the initialized field is properly aligned. - // Unaligned fields will cause the compiler to emit E0793. We do not support - // unaligned fields since `Init::__init` requires an aligned pointer; the call to - // `ptr::write` below has the same requirement. - let (value_init, accessor) = if pinned { - let project_ident = format_ident!("__project_{ident}"); - ( - quote! { - // SAFETY: - // - `slot` is valid, because we are inside of an initializer closure, we - // return when an error/panic occurs. - // - We also use `#data` to require the correct trait (`Init` or `PinInit`) - // for `#ident`. - unsafe { #data.#ident(&raw mut (*#slot).#ident, #init)? }; - }, - quote! { - // SAFETY: TODO - unsafe { #data.#project_ident(&mut (*#slot).#ident) } - }, - ) + let value_init = if pinned { + quote! { + // SAFETY: + // - `slot` is valid, because we are inside of an initializer closure, we + // return when an error/panic occurs. + // - We also use `#data` to require the correct trait (`Init` or `PinInit`) + // for `#ident`. + unsafe { #data.#ident(&raw mut (*#slot).#ident, #init)? }; + } } else { - ( - quote! { - // SAFETY: `slot` is valid, because we are inside of an initializer - // closure, we return when an error/panic occurs. - unsafe { - ::pin_init::Init::__init( - #init, - &raw mut (*#slot).#ident, - )? - }; - }, - quote! { - // SAFETY: TODO - unsafe { &mut (*#slot).#ident } - }, - ) + quote! { + // SAFETY: `slot` is valid, because we are inside of an initializer + // closure, we return when an error/panic occurs. + unsafe { + ::pin_init::Init::__init( + #init, + &raw mut (*#slot).#ident, + )? + }; + } }; quote! { #(#attrs)* @@ -324,9 +288,6 @@ fn init_fields( let #init = #value; #value_init } - #(#cfgs)* - #[allow(unused_variables)] - let #ident = #accessor; } } InitializerKind::Code { block: value, .. } => quote! { @@ -339,18 +300,41 @@ fn init_fields( if let Some(ident) = kind.ident() { // `mixed_site` ensures that the guard is not accessible to the user-controlled code. let guard = format_ident!("__{ident}_guard", span = Span::mixed_site()); + + // NOTE: The reference is derived from the guard so that it only lives as long as the + // guard does and cannot escape the scope. If it's created via `&mut (*#slot).#ident` + // like the unaligned field guard, it will become effectively `'static`. + let accessor = if pinned { + let project_ident = format_ident!("__project_{ident}"); + quote! { + // SAFETY: the initialization is pinned. + unsafe { #data.#project_ident(#guard.let_binding()) } + } + } else { + quote! { + #guard.let_binding() + } + }; + res.extend(quote! { #(#cfgs)* - // Create the drop guard: + // Create the drop guard. // - // We rely on macro hygiene to make it impossible for users to access this local - // variable. - // SAFETY: We forget the guard later when initialization has succeeded. - let #guard = unsafe { + // SAFETY: + // - `&raw mut (*slot).#ident` is valid. + // - `make_field_check` checks that `&raw mut (*slot).#ident` is properly aligned. + // - `(*slot).#ident` has been initialized above. + // - We only need the ownership to the pointee back when initialization has + // succeeded, where we `forget` the guard. + let mut #guard = unsafe { ::pin_init::__internal::DropGuard::new( &raw mut (*slot).#ident ) }; + + #(#cfgs)* + #[allow(unused_variables)] + let #ident = #accessor; }); guards.push(guard); guard_attrs.push(cfgs); @@ -367,49 +351,49 @@ fn init_fields( } } -/// Generate the check for ensuring that every field has been initialized. +/// Generate the check for ensuring that every field has been initialized and aligned. fn make_field_check( fields: &Punctuated<InitializerField, Token![,]>, init_kind: InitKind, path: &Path, ) -> TokenStream { - let field_attrs = fields + let field_attrs: Vec<_> = fields .iter() - .filter_map(|f| f.kind.ident().map(|_| &f.attrs)); - let field_name = fields.iter().filter_map(|f| f.kind.ident()); - match init_kind { - InitKind::Normal => quote! { - // We use unreachable code to ensure that all fields have been mentioned exactly once, - // this struct initializer will still be type-checked and complain with a very natural - // error message if a field is forgotten/mentioned more than once. - #[allow(unreachable_code, clippy::diverging_sub_expression)] - // SAFETY: this code is never executed. - let _ = || unsafe { - ::core::ptr::write(slot, #path { - #( - #(#field_attrs)* - #field_name: ::core::panic!(), - )* - }) - }; - }, - InitKind::Zeroing => quote! { - // We use unreachable code to ensure that all fields have been mentioned at most once. - // Since the user specified `..Zeroable::zeroed()` at the end, all missing fields will - // be zeroed. This struct initializer will still be type-checked and complain with a - // very natural error message if a field is mentioned more than once, or doesn't exist. - #[allow(unreachable_code, clippy::diverging_sub_expression, unused_assignments)] - // SAFETY: this code is never executed. - let _ = || unsafe { - ::core::ptr::write(slot, #path { - #( - #(#field_attrs)* - #field_name: ::core::panic!(), - )* - ..::core::mem::zeroed() - }) - }; - }, + .filter_map(|f| f.kind.ident().map(|_| &f.attrs)) + .collect(); + let field_name: Vec<_> = fields.iter().filter_map(|f| f.kind.ident()).collect(); + let zeroing_trailer = match init_kind { + InitKind::Normal => None, + InitKind::Zeroing => Some(quote! { + ..::core::mem::zeroed() + }), + }; + quote! { + #[allow(unreachable_code, clippy::diverging_sub_expression)] + // We use unreachable code to perform field checks. They're still checked by the compiler. + // SAFETY: this code is never executed. + let _ = || unsafe { + // Create references to ensure that the initialized field is properly aligned. + // Unaligned fields will cause the compiler to emit E0793. We do not support + // unaligned fields since `Init::__init` requires an aligned pointer; the call to + // `ptr::write` for value-initialization case has the same requirement. + #( + #(#field_attrs)* + let _ = &(*slot).#field_name; + )* + + // If the zeroing trailer is not present, this checks that all fields have been + // mentioned exactly once. If the zeroing trailer is present, all missing fields will be + // zeroed, so this checks that all fields have been mentioned at most once. The use of + // struct initializer will still generate very natural error messages for any misuse. + ::core::ptr::write(slot, #path { + #( + #(#field_attrs)* + #field_name: ::core::panic!(), + )* + #zeroing_trailer + }) + }; } } diff --git a/rust/pin-init/src/__internal.rs b/rust/pin-init/src/__internal.rs index 90adbdc1893b..5720a621aed7 100644 --- a/rust/pin-init/src/__internal.rs +++ b/rust/pin-init/src/__internal.rs @@ -238,32 +238,42 @@ fn stack_init_reuse() { /// When a value of this type is dropped, it drops a `T`. /// /// Can be forgotten to prevent the drop. +/// +/// # Invariants +/// +/// - `ptr` is valid and properly aligned. +/// - `*ptr` is initialized and owned by this guard. pub struct DropGuard<T: ?Sized> { ptr: *mut T, } impl<T: ?Sized> DropGuard<T> { - /// Creates a new [`DropGuard<T>`]. It will [`ptr::drop_in_place`] `ptr` when it gets dropped. + /// Creates a drop guard and transfer the ownership of the pointer content. /// - /// # Safety + /// The ownership is only relinguished if the guard is forgotten via [`core::mem::forget`]. /// - /// `ptr` must be a valid pointer. + /// # Safety /// - /// It is the callers responsibility that `self` will only get dropped if the pointee of `ptr`: - /// - has not been dropped, - /// - is not accessible by any other means, - /// - will not be dropped by any other means. + /// - `ptr` is valid and properly aligned. + /// - `*ptr` is initialized, and the ownership is transferred to this guard. #[inline] pub unsafe fn new(ptr: *mut T) -> Self { + // INVARIANT: By safety requirement. Self { ptr } } + + /// Create a let binding for accessor use. + #[inline] + pub fn let_binding(&mut self) -> &mut T { + // SAFETY: Per type invariant. + unsafe { &mut *self.ptr } + } } impl<T: ?Sized> Drop for DropGuard<T> { #[inline] fn drop(&mut self) { - // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function - // ensuring that this operation is safe. + // SAFETY: `self.ptr` is valid, properly aligned and `*self.ptr` is owned by this guard. unsafe { ptr::drop_in_place(self.ptr) } } } |
