summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
Diffstat (limited to 'rust')
-rw-r--r--rust/.gitignore2
-rw-r--r--rust/Makefile63
-rw-r--r--rust/alloc/README.md3
-rw-r--r--rust/alloc/alloc.rs75
-rw-r--r--rust/alloc/boxed.rs567
-rw-r--r--rust/alloc/collections/mod.rs5
-rw-r--r--rust/alloc/lib.rs105
-rw-r--r--rust/alloc/raw_vec.rs34
-rw-r--r--rust/alloc/slice.rs480
-rw-r--r--rust/alloc/vec/drain.rs83
-rw-r--r--rust/alloc/vec/drain_filter.rs60
-rw-r--r--rust/alloc/vec/into_iter.rs160
-rw-r--r--rust/alloc/vec/is_zero.rs96
-rw-r--r--rust/alloc/vec/mod.rs538
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs5
-rw-r--r--rust/alloc/vec/spec_extend.rs63
-rw-r--r--rust/bindings/bindings_helper.h3
-rw-r--r--rust/bindings/lib.rs1
-rw-r--r--rust/compiler_builtins.rs7
-rw-r--r--rust/helpers.c35
-rw-r--r--rust/kernel/allocator.rs98
-rw-r--r--rust/kernel/build_assert.rs2
-rw-r--r--rust/kernel/error.rs61
-rw-r--r--rust/kernel/init.rs675
-rw-r--r--rust/kernel/init/__internal.rs39
-rw-r--r--rust/kernel/init/macros.rs570
-rw-r--r--rust/kernel/kunit.rs163
-rw-r--r--rust/kernel/lib.rs9
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/str.rs26
-rw-r--r--rust/kernel/sync/arc.rs37
-rw-r--r--rust/kernel/sync/lock.rs6
-rw-r--r--rust/kernel/sync/lock/mutex.rs1
-rw-r--r--rust/kernel/sync/lock/spinlock.rs1
-rw-r--r--rust/kernel/task.rs10
-rw-r--r--rust/kernel/types.rs62
-rw-r--r--rust/macros/helpers.rs86
-rw-r--r--rust/macros/lib.rs117
-rw-r--r--rust/macros/module.rs2
-rw-r--r--rust/macros/paste.rs96
-rw-r--r--rust/macros/pin_data.rs168
-rw-r--r--rust/macros/quote.rs26
-rw-r--r--rust/macros/vtable.rs1
-rw-r--r--rust/macros/zeroable.rs72
-rw-r--r--rust/uapi/lib.rs1
46 files changed, 3210 insertions, 1508 deletions
diff --git a/rust/.gitignore b/rust/.gitignore
index 21552992b401..d3829ffab80b 100644
--- a/rust/.gitignore
+++ b/rust/.gitignore
@@ -2,6 +2,8 @@
bindings_generated.rs
bindings_helpers_generated.rs
+doctests_kernel_generated.rs
+doctests_kernel_generated_kunit.c
uapi_generated.rs
exports_*_generated.h
doc/
diff --git a/rust/Makefile b/rust/Makefile
index 7c9d9f11aec5..87958e864be0 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+# Where to place rustdoc generated documentation
+rustdoc_output := $(objtree)/Documentation/output/rust/rustdoc
+
obj-$(CONFIG_RUST) += core.o compiler_builtins.o
always-$(CONFIG_RUST) += exports_core_generated.h
@@ -27,6 +30,12 @@ endif
obj-$(CONFIG_RUST) += exports.o
+always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.rs
+always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.c
+
+obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.o
+obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o
+
# Avoids running `$(RUSTC)` for the sysroot when it may not be available.
ifdef CONFIG_RUST
@@ -39,9 +48,11 @@ ifeq ($(quiet),silent_)
cargo_quiet=-q
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
else ifeq ($(quiet),quiet_)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
else
cargo_quiet=--verbose
endif
@@ -65,7 +76,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
- --output $(objtree)/$(obj)/doc \
+ --output $(rustdoc_output) \
--crate-name $(subst rustdoc-,,$@) \
@$(objtree)/include/generated/rustc_cfg $<
@@ -82,15 +93,15 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
rustdoc-alloc rustdoc-kernel
- $(Q)cp $(srctree)/Documentation/images/logo.svg $(objtree)/$(obj)/doc
- $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(objtree)/$(obj)/doc
- $(Q)find $(objtree)/$(obj)/doc -name '*.html' -type f -print0 | xargs -0 sed -Ei \
+ $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)
+ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)
+ $(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
-e 's:rust-logo\.svg:logo.svg:g' \
-e 's:rust-logo\.png:logo.svg:g' \
-e 's:favicon\.svg:logo.svg:g' \
-e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
$(Q)echo '.logo-container > img { object-fit: contain; }' \
- >> $(objtree)/$(obj)/doc/rustdoc.css
+ >> $(rustdoc_output)/rustdoc.css
rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
@@ -154,9 +165,30 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
--sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
- -L$(objtree)/$(obj)/test --output $(objtree)/$(obj)/doc \
+ -L$(objtree)/$(obj)/test --output $(rustdoc_output) \
--crate-name $(subst rusttest-,,$@) $<
+quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ cmd_rustdoc_test_kernel = \
+ rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
+ mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) --test $(rust_flags) \
+ @$(objtree)/include/generated/rustc_cfg \
+ -L$(objtree)/$(obj) --extern alloc --extern kernel \
+ --extern build_error --extern macros \
+ --extern bindings --extern uapi \
+ --no-run --crate-name kernel -Zunstable-options \
+ --test-builder $(objtree)/scripts/rustdoc_test_builder \
+ $< $(rustdoc_test_kernel_quiet); \
+ $(objtree)/scripts/rustdoc_test_gen
+
+%/doctests_kernel_generated.rs %/doctests_kernel_generated_kunit.c: \
+ $(src)/kernel/lib.rs $(obj)/kernel.o \
+ $(objtree)/scripts/rustdoc_test_builder \
+ $(objtree)/scripts/rustdoc_test_gen FORCE
+ $(call if_changed,rustdoc_test_kernel)
+
# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
# so for the moment we skip `-Cpanic=abort`.
quiet_cmd_rustc_test = RUSTC T $<
@@ -257,7 +289,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
- -fno-inline-functions-called-once \
+ -fno-inline-functions-called-once -fsanitize=bounds-strict \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
@@ -300,7 +332,7 @@ quiet_cmd_bindgen = BINDGEN $@
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
--no-debug '.*' \
- --size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE \
+ -o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
@@ -320,8 +352,8 @@ $(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
# given it is `libclang`; but for consistency, future Clang changes and/or
# a potential future GCC backend for `bindgen`, we disable it too.
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_flags = \
- --blacklist-type '.*' --whitelist-var '' \
- --whitelist-function 'rust_helper_.*'
+ --blocklist-type '.*' --allowlist-var '' \
+ --allowlist-function 'rust_helper_.*'
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
@@ -373,12 +405,15 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
rust-analyzer:
- $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
- $(RUST_LIB_SRC) > $(objtree)/rust-project.json
+ $(Q)$(srctree)/scripts/generate_rust_analyzer.py \
+ --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
+ $(abs_srctree) $(abs_objtree) \
+ $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
+ $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
redirect-intrinsics = \
- __eqsf2 __gesf2 __lesf2 __nesf2 __unordsf2 \
- __unorddf2 \
+ __addsf3 __eqsf2 __gesf2 __lesf2 __ltsf2 __mulsf3 __nesf2 __unordsf2 \
+ __adddf3 __ledf2 __ltdf2 __muldf3 __unorddf2 \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
diff --git a/rust/alloc/README.md b/rust/alloc/README.md
index c89c753720b5..eb6f22e94ebf 100644
--- a/rust/alloc/README.md
+++ b/rust/alloc/README.md
@@ -10,6 +10,9 @@ upstream. In general, only additions should be performed (e.g. new
methods). Eventually, changes should make it into upstream so that,
at some point, this fork can be dropped from the kernel tree.
+The Rust upstream version on top of which these files are based matches
+the output of `scripts/min-tool-version.sh rustc`.
+
## Rationale
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index ca224a541770..0b6bf5b6da43 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -16,28 +16,32 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
-use core::marker::Destruct;
-
#[cfg(test)]
mod tests;
extern "Rust" {
- // These are the magic symbols to call the global allocator. rustc generates
+ // These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
- // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
- // The rustc fork of LLVM also special-cases these function names to be able to optimize them
+ // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
- #[rustc_allocator_nounwind]
+ #[rustc_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
- #[rustc_allocator_nounwind]
+ #[rustc_deallocator]
+ #[rustc_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
- #[rustc_allocator_nounwind]
+ #[rustc_reallocator]
+ #[rustc_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
- #[rustc_allocator_nounwind]
+ #[rustc_allocator_zeroed]
+ #[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+
+ #[cfg(not(bootstrap))]
+ static __rust_no_alloc_shim_is_unstable: u8;
}
/// The global memory allocator.
@@ -72,11 +76,14 @@ pub use std::alloc::Global;
/// # Examples
///
/// ```
-/// use std::alloc::{alloc, dealloc, Layout};
+/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
+/// if ptr.is_null() {
+/// handle_alloc_error(layout);
+/// }
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
@@ -88,7 +95,14 @@ pub use std::alloc::Global;
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
- unsafe { __rust_alloc(layout.size(), layout.align()) }
+ unsafe {
+ // Make sure we don't accidentally allow omitting the allocator shim in
+ // stable code until it is actually stabilized.
+ #[cfg(not(bootstrap))]
+ core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
+
+ __rust_alloc(layout.size(), layout.align())
+ }
}
/// Deallocate memory with the global allocator.
@@ -327,16 +341,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
- ptr: Unique<T>,
- alloc: A,
-) {
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
@@ -349,7 +359,7 @@ pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Dest
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
- // This is the magic symbol to call the global alloc error handler. rustc generates
+ // This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
@@ -394,25 +404,24 @@ pub use std::alloc::handle_alloc_error;
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
- use crate::alloc::Layout;
-
- // called via generated `__rust_alloc_error_handler`
-
- // if there is no `#[alloc_error_handler]`
- #[rustc_std_internal_symbol]
- pub unsafe extern "C-unwind" fn __rdl_oom(size: usize, _align: usize) -> ! {
- panic!("memory allocation of {size} bytes failed")
- }
-
- // if there is an `#[alloc_error_handler]`
+ // called via generated `__rust_alloc_error_handler` if there is no
+ // `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
- pub unsafe extern "C-unwind" fn __rg_oom(size: usize, align: usize) -> ! {
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
extern "Rust" {
- #[lang = "oom"]
- fn oom_impl(layout: Layout) -> !;
+ // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+ // Its value depends on the -Zoom={panic,abort} compiler option.
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+
+ #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {size} bytes failed")
+ } else {
+ core::panicking::panic_nounwind_fmt(format_args!(
+ "memory allocation of {size} bytes failed"
+ ))
}
- unsafe { oom_impl(layout) }
}
}
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index dcfe87b14f3a..c8173cea8317 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-//! A pointer type for heap allocation.
+//! The `Box<T>` type for heap allocation.
//!
//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
@@ -124,7 +124,21 @@
//! definition is just using `T*` can lead to undefined behavior, as
//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
//!
+//! # Considerations for unsafe code
+//!
+//! **Warning: This section is not normative and is subject to change, possibly
+//! being relaxed in the future! It is a simplified summary of the rules
+//! currently implemented in the compiler.**
+//!
+//! The aliasing rules for `Box<T>` are the same as for `&mut T`. `Box<T>`
+//! asserts uniqueness over its content. Using raw pointers derived from a box
+//! after that box has been mutated through, moved or borrowed as `&mut T`
+//! is not allowed. For more guidance on working with box from unsafe code, see
+//! [rust-lang/unsafe-code-guidelines#326][ucg#326].
+//!
+//!
//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [ucg#326]: https://github.com/rust-lang/unsafe-code-guidelines/issues/326
//! [dereferencing]: core::ops::Deref
//! [`Box::<T>::from_raw(value)`]: Box::from_raw
//! [`Global`]: crate::alloc::Global
@@ -138,14 +152,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
+use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
-use core::iter::{FusedIterator, Iterator};
-use core::marker::{Destruct, Unpin, Unsize};
+use core::iter::FusedIterator;
+use core::marker::Tuple;
+use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@@ -163,6 +176,8 @@ use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
use crate::vec::Vec;
#[cfg(not(no_thin))]
@@ -172,7 +187,7 @@ pub use thin::ThinBox;
#[cfg(not(no_thin))]
mod thin;
-/// A pointer type for heap allocation.
+/// A pointer type that uniquely owns a heap allocation of type `T`.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
@@ -196,12 +211,14 @@ impl<T> Box<T> {
/// ```
/// let five = Box::new(5);
/// ```
- #[cfg(not(no_global_oom_handling))]
+ #[cfg(all(not(no_global_oom_handling)))]
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
+ #[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
- box x
+ #[rustc_box]
+ Box::new(x)
}
/// Constructs a new box with uninitialized contents.
@@ -256,14 +273,19 @@ impl<T> Box<T> {
Self::new_zeroed_in(Global)
}
- /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
/// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin(x)`
+ /// does the same as <code>[Box::into_pin]\([Box::new]\(x))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new`].
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "pin", since = "1.33.0")]
#[must_use]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
- (box x).into()
+ Box::new(x).into()
}
/// Allocates memory on the heap then places `x` into it,
@@ -355,12 +377,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
- pub const fn new_in(x: T, alloc: A) -> Self
+ pub fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@@ -385,12 +406,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Destruct,
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@@ -420,13 +439,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
- pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -461,10 +479,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@@ -492,13 +509,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -533,26 +549,29 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
}
- /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement `Unpin`, then
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
/// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin_in(x, alloc)`
+ /// does the same as <code>[Box::into_pin]\([Box::new_in]\(x, alloc))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T, A>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
- pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Destruct,
+ A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@@ -561,8 +580,7 @@ impl<T, A: Allocator> Box<T, A> {
///
/// This conversion does not allocate on the heap and happens in place.
#[unstable(feature = "box_into_boxed_slice", issue = "71582")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
let (raw, alloc) = Box::into_raw_with_allocator(boxed);
unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
}
@@ -579,12 +597,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_inner(boxed: Self) -> T
- where
- Self: ~const Destruct,
- {
+ pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
@@ -798,9 +812,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const unsafe fn assume_init(self) -> Box<T, A> {
+ pub unsafe fn assume_init(self) -> Box<T, A> {
let (raw, alloc) = Box::into_raw_with_allocator(self);
unsafe { Box::from_raw_in(raw as *mut T, alloc) }
}
@@ -833,9 +846,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// }
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
unsafe {
(*boxed).write(value);
boxed.assume_init()
@@ -926,6 +938,7 @@ impl<T: ?Sized> Box<T> {
/// [`Layout`]: crate::Layout
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
+ #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
pub unsafe fn from_raw(raw: *mut T) -> Self {
unsafe { Self::from_raw_in(raw, Global) }
}
@@ -1078,9 +1091,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
///
/// [memory layout]: self#memory-layout
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
let (leaked, alloc) = Box::into_unique(b);
(leaked.as_ptr(), alloc)
}
@@ -1090,10 +1102,9 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
issue = "none",
reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
)]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
#[doc(hidden)]
- pub const fn into_unique(b: Self) -> (Unique<T>, A) {
+ pub fn into_unique(b: Self) -> (Unique<T>, A) {
// Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
// raw pointer for the type system. Turning it directly into a raw pointer would not be
// recognized as "releasing" the unique pointer to permit aliased raw accesses,
@@ -1151,28 +1162,52 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// assert_eq!(*static_ref, [4, 2, 3]);
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn leak<'a>(b: Self) -> &'a mut T
+ pub fn leak<'a>(b: Self) -> &'a mut T
where
A: 'a,
{
unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
}
- /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
///
/// This conversion does not allocate on the heap and happens in place.
///
/// This is also available via [`From`].
- #[unstable(feature = "box_into_pin", issue = "62370")]
+ ///
+ /// Constructing and pinning a `Box` with <code>Box::into_pin([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `into_pin` method is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ ///
+ /// # Notes
+ ///
+ /// It's not recommended that crates add an impl like `From<Box<T>> for Pin<T>`,
+ /// as it'll introduce an ambiguity when calling `Pin::from`.
+ /// A demonstration of such a poor impl is shown below.
+ ///
+ /// ```compile_fail
+ /// # use std::pin::Pin;
+ /// struct Foo; // A type defined in this crate.
+ /// impl From<Box<()>> for Pin<Foo> {
+ /// fn from(_: Box<()>) -> Pin<Foo> {
+ /// Pin::new(Foo)
+ /// }
+ /// }
+ ///
+ /// let foo = Box::new(());
+ /// let bar = Pin::from(foo);
+ /// ```
+ #[stable(feature = "box_into_pin", since = "1.63.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
pub const fn into_pin(boxed: Self) -> Pin<Self>
where
A: 'static,
{
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
- // when `T: !Unpin`, so it's safe to pin it directly without any
+ // when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
@@ -1189,15 +1224,16 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
+ #[inline]
fn default() -> Self {
- box T::default()
+ Box::new(T::default())
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Box<[T]> {
+impl<T> Default for Box<[T]> {
+ #[inline]
fn default() -> Self {
let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
Box(ptr, Global)
@@ -1206,8 +1242,8 @@ impl<T> const Default for Box<[T]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "default_box_extra", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl const Default for Box<str> {
+impl Default for Box<str> {
+ #[inline]
fn default() -> Self {
// SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
let ptr: Unique<str> = unsafe {
@@ -1403,26 +1439,60 @@ impl<T> From<T> for Box<T> {
}
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
where
A: 'static,
{
- /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
///
/// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`Box::into_pin`].
+ ///
+ /// Constructing and pinning a `Box` with <code><Pin<Box\<T>>>::from([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `From` implementation is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
fn from(boxed: Box<T, A>) -> Self {
Box::into_pin(boxed)
}
}
+/// Specialization trait used for `From<&[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait BoxFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ default fn from_slice(slice: &[T]) -> Self {
+ slice.to_vec().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ fn from_slice(slice: &[T]) -> Self {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_slice", since = "1.17.0")]
-impl<T: Copy> From<&[T]> for Box<[T]> {
+impl<T: Clone> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
- /// and performs a copy of `slice`.
+ /// and performs a copy of `slice` and its contents.
///
/// # Examples
/// ```rust
@@ -1432,19 +1502,15 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
///
/// println!("{boxed_slice:?}");
/// ```
+ #[inline]
fn from(slice: &[T]) -> Box<[T]> {
- let len = slice.len();
- let buf = RawVec::with_capacity(len);
- unsafe {
- ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
- buf.into_box(slice.len()).assume_init()
- }
+ <Self as BoxFromSlice<T>>::from_slice(slice)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_cow", since = "1.45.0")]
-impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+impl<T: Clone> From<Cow<'_, [T]>> for Box<[T]> {
/// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
///
/// When `cow` is the `Cow::Borrowed` variant, this
@@ -1554,10 +1620,26 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
/// println!("{boxed:?}");
/// ```
fn from(array: [T; N]) -> Box<[T]> {
- box array
+ Box::new(array)
}
}
+/// Casts a boxed slice to a boxed array.
+///
+/// # Safety
+///
+/// `boxed_slice.len()` must be exactly `N`.
+unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>(
+ boxed_slice: Box<[T], A>,
+) -> Box<[T; N], A> {
+ debug_assert_eq!(boxed_slice.len(), N);
+
+ let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
+ // SAFETY: Pointer and allocator came from an existing box,
+ // and our safety condition requires that the length is exactly `N`
+ unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) }
+}
+
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
type Error = Box<[T]>;
@@ -1573,13 +1655,46 @@ impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
/// `boxed_slice.len()` does not equal `N`.
fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
- Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
} else {
Err(boxed_slice)
}
}
}
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")]
+impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> {
+ type Error = Vec<T>;
+
+ /// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`.
+ ///
+ /// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`,
+ /// but will require a reallocation otherwise.
+ ///
+ /// # Errors
+ ///
+ /// Returns the original `Vec<T>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ ///
+ /// # Examples
+ ///
+ /// This can be used with [`vec!`] to create an array on the heap:
+ ///
+ /// ```
+ /// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap();
+ /// assert_eq!(state.len(), 100);
+ /// ```
+ fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
+ if vec.len() == N {
+ let boxed_slice = vec.into_boxed_slice();
+ Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
+ } else {
+ Err(vec)
+ }
+ }
+}
+
impl<A: Allocator> Box<dyn Any, A> {
/// Attempt to downcast the box to a concrete type.
///
@@ -1783,8 +1898,7 @@ impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
+impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
type Target = T;
fn deref(&self) -> &T {
@@ -1793,8 +1907,7 @@ impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
+impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
@@ -1869,7 +1982,7 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
@@ -1878,20 +1991,20 @@ impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
<F as FnMut<Args>>::call_mut(self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
<F as Fn<Args>>::call(self, args)
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -1973,8 +2086,7 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
+impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
@@ -2026,3 +2138,292 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
(**self).size_hint()
}
}
+
+impl dyn Error {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
+ if self.is::<T>() {
+ unsafe {
+ let raw: *mut dyn Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+impl dyn Error + Send {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ })
+ }
+}
+
+impl dyn Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ })
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for Box<dyn Error + Send + Sync> {
+ /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: String) -> Box<dyn Error + Send + Sync> {
+ struct StringError(String);
+
+ impl Error for StringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ &self.0
+ }
+ }
+
+ impl fmt::Display for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+ }
+
+ // Purposefully skip printing "StringError(..)"
+ impl fmt::Debug for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+ }
+
+ Box::new(StringError(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<String> for Box<dyn Error> {
+ /// Converts a [`String`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(str_err: String) -> Box<dyn Error> {
+ let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
+ let err2: Box<dyn Error> = err1;
+ err2
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<&str> for Box<dyn Error> {
+ /// Converts a [`str`] into a box of dyn [`Error`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: &str) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'a, str>) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[stable(feature = "box_error", since = "1.8.0")]
+impl<T: core::error::Error> core::error::Error for Box<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
index 1eec265b28f8..2506065d158a 100644
--- a/rust/alloc/collections/mod.rs
+++ b/rust/alloc/collections/mod.rs
@@ -141,7 +141,7 @@ impl Display for TryReserveError {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
- " because the memory allocator returned a error"
+ " because the memory allocator returned an error"
}
};
fmt.write_str(reason)
@@ -154,3 +154,6 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl core::error::Error for TryReserveError {}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 3aebf83c9967..85e91356ecb3 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -5,7 +5,7 @@
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
-//! This library, like libcore, normally doesn’t need to be used directly
+//! This library, like core, normally doesn’t need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so they’d use this crate instead.
@@ -58,10 +58,6 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@@ -75,107 +71,134 @@
any(not(feature = "miri-test-libstd"), test, doctest),
no_global_oom_handling,
not(no_global_oom_handling),
+ not(no_rc),
+ not(no_sync),
target_has_atomic = "ptr"
))]
#![no_std]
#![needs_allocator]
+// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
+#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
-#![cfg_attr(not(no_global_oom_handling), feature(alloc_c_string))]
+// tidy-alphabetical-start
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(is_sorted))]
+#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
+#![feature(array_into_iter_constructors)]
#![feature(array_methods)]
#![feature(array_windows)]
+#![feature(ascii_char)]
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_align_of_val)]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
#![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))]
-#![feature(const_convert)]
-#![feature(const_size_of_val)]
-#![feature(const_align_of_val)]
-#![feature(const_ptr_read)]
-#![feature(const_maybe_uninit_write)]
+#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_zeroed)]
+#![feature(const_pin)]
#![feature(const_refs_to_cell)]
-#![feature(core_c_str)]
+#![feature(const_size_of_val)]
+#![feature(const_waker)]
#![feature(core_intrinsics)]
-#![feature(core_ffi_c)]
-#![feature(const_eval_select)]
-#![feature(const_pin)]
-#![feature(cstr_from_bytes_until_nul)]
+#![feature(core_panic)]
#![feature(dispatch_from_dyn)]
+#![feature(error_generic_member_access)]
+#![feature(error_in_core)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
+#![feature(inline_const)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
+#![feature(iter_next_chunk)]
+#![feature(iter_repeat_n)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(nonnull_slice_from_raw_parts)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
+#![feature(pointer_byte_offsets)]
+#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
+#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
+#![feature(sized_type_properties)]
+#![feature(slice_from_ptr_range)]
#![feature(slice_group_by)]
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
+#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
+#![feature(tuple_trait)]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
+#![feature(utf8_chunks)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(panic_update_hook))]
+#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
-#![feature(box_syntax)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
-#![feature(const_deref)]
#![feature(const_mut_refs)]
-#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
+#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
-#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
-#![feature(let_else)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
-#![feature(nll)] // Not necessary, but here to test the `nll` feature.
+#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
#![feature(slice_internals)]
#![feature(staged_api)]
-#![cfg_attr(test, feature(test))]
+#![feature(stmt_expr_attributes)]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(c_unwind)]
+#![feature(with_negative_coherence)]
+// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
@@ -192,6 +215,8 @@
extern crate std;
#[cfg(test)]
extern crate test;
+#[cfg(test)]
+mod testing;
// Module with internal macros used by other modules (needs to be included before other modules).
#[cfg(not(no_macros))]
@@ -218,7 +243,7 @@ mod boxed {
#[cfg(not(no_borrow))]
pub mod borrow;
pub mod collections;
-#[cfg(not(no_global_oom_handling))]
+#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
pub mod ffi;
#[cfg(not(no_fmt))]
pub mod fmt;
@@ -229,10 +254,9 @@ pub mod slice;
pub mod str;
#[cfg(not(no_string))]
pub mod string;
-#[cfg(not(no_sync))]
-#[cfg(target_has_atomic = "ptr")]
+#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod sync;
-#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod task;
#[cfg(test)]
mod tests;
@@ -243,3 +267,20 @@ pub mod vec;
pub mod __export {
pub use core::format_args;
}
+
+#[cfg(test)]
+#[allow(dead_code)] // Not used in all configurations
+pub(crate) mod test_helpers {
+ /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
+ /// seed not being the same for every RNG invocation too.
+ pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use std::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ std::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec =
+ hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+ }
+}
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index eb77db5def55..65d5ce15828e 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -5,8 +5,7 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
-use core::ops::Drop;
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::slice;
@@ -177,7 +176,7 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if mem::size_of::<T>() == 0 || capacity == 0 {
+ if T::IS_ZST || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
@@ -212,7 +211,7 @@ impl<T, A: Allocator> RawVec<T, A> {
fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if mem::size_of::<T>() == 0 || capacity == 0 {
+ if T::IS_ZST || capacity == 0 {
return Ok(Self::new_in(alloc));
}
@@ -262,7 +261,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -271,13 +270,18 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if mem::size_of::<T>() == 0 || self.cap == 0 {
+ if T::IS_ZST || self.cap == 0 {
None
} else {
- // We have an allocated chunk of memory, so we can bypass runtime
- // checks to get our current layout.
+ // We could use Layout::array here which ensures the absence of isize and usize overflows
+ // and could hypothetically handle differences between stride and size, but this memory
+ // has already been allocated so we know it can't overflow and currently rust does not
+ // support such types. So we can do better by skipping some checks and avoid an unwrap.
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
- let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ let align = mem::align_of::<T>();
+ let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
@@ -419,7 +423,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@@ -445,7 +449,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@@ -460,16 +464,18 @@ impl<T, A: Allocator> RawVec<T, A> {
Ok(())
}
- #[allow(dead_code)]
+ #[cfg(not(no_global_oom_handling))]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
-
+ // See current_memory() why this assert is here
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
- let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ let new_size = mem::size_of::<T>().unchecked_mul(cap);
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
index e444e97fa145..6ac463bd3edc 100644
--- a/rust/alloc/slice.rs
+++ b/rust/alloc/slice.rs
@@ -1,84 +1,14 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
-//! Slices are a view into a block of memory represented as a pointer and a
-//! length.
+//! Most of the structs in this module are iterator types which can only be created
+//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
-//! ```
-//! // slicing a Vec
-//! let vec = vec![1, 2, 3];
-//! let int_slice = &vec[..];
-//! // coercing an array to a slice
-//! let str_slice: &[&str] = &["one", "two", "three"];
-//! ```
-//!
-//! Slices are either mutable or shared. The shared slice type is `&[T]`,
-//! while the mutable slice type is `&mut [T]`, where `T` represents the element
-//! type. For example, you can mutate the block of memory that a mutable slice
-//! points to:
-//!
-//! ```
-//! let x = &mut [1, 2, 3];
-//! x[1] = 7;
-//! assert_eq!(x, &[1, 7, 3]);
-//! ```
-//!
-//! Here are some of the things this module contains:
-//!
-//! ## Structs
-//!
-//! There are several structs that are useful for slices, such as [`Iter`], which
-//! represents iteration over a slice.
-//!
-//! ## Trait Implementations
-//!
-//! There are several implementations of common traits for slices. Some examples
-//! include:
-//!
-//! * [`Clone`]
-//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
-//! * [`Hash`] - for slices whose element type is [`Hash`].
-//!
-//! ## Iteration
-//!
-//! The slices implement `IntoIterator`. The iterator yields references to the
-//! slice elements.
-//!
-//! ```
-//! let numbers = &[0, 1, 2];
-//! for n in numbers {
-//! println!("{n} is a number!");
-//! }
-//! ```
-//!
-//! The mutable slice yields mutable references to the elements:
-//!
-//! ```
-//! let mut scores = [7, 8, 9];
-//! for score in &mut scores[..] {
-//! *score += 1;
-//! }
-//! ```
-//!
-//! This iterator yields mutable references to the slice's elements, so while
-//! the element type of the slice is `i32`, the element type of the iterator is
-//! `&mut i32`.
-//!
-//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
-//! iterators.
-//! * Further methods that return iterators are [`.split`], [`.splitn`],
-//! [`.chunks`], [`.windows`] and more.
-//!
-//! [`Hash`]: core::hash::Hash
-//! [`.iter`]: slice::iter
-//! [`.iter_mut`]: slice::iter_mut
-//! [`.split`]: slice::split
-//! [`.splitn`]: slice::splitn
-//! [`.chunks`]: slice::chunks
-//! [`.windows`]: slice::windows
+//! A few functions are provided to create a slice from a value reference
+//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
@@ -88,20 +18,23 @@ use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
-use core::mem;
-#[cfg(not(no_global_oom_handling))]
-use core::mem::size_of;
+use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
+#[cfg(not(no_global_oom_handling))]
+use core::slice::sort;
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
-use crate::alloc::Global;
+use crate::alloc::{self, Global};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::vec::Vec;
+#[cfg(test)]
+mod tests;
+
#[unstable(feature = "slice_range", issue = "76393")]
pub use core::slice::range;
#[unstable(feature = "array_chunks", issue = "74985")]
@@ -116,6 +49,8 @@ pub use core::slice::EscapeAscii;
pub use core::slice::SliceIndex;
#[stable(feature = "from_ref", since = "1.28.0")]
pub use core::slice::{from_mut, from_ref};
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+pub use core::slice::{from_mut_ptr_range, from_ptr_range};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::slice::{from_raw_parts, from_raw_parts_mut};
#[stable(feature = "rust1", since = "1.0.0")]
@@ -275,7 +210,7 @@ impl<T> [T] {
where
T: Ord,
{
- merge_sort(self, |a, b| a.lt(b));
+ stable_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@@ -331,7 +266,7 @@ impl<T> [T] {
where
F: FnMut(&T, &T) -> Ordering,
{
- merge_sort(self, |a, b| compare(a, b) == Less);
+ stable_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
@@ -374,7 +309,7 @@ impl<T> [T] {
F: FnMut(&T) -> K,
K: Ord,
{
- merge_sort(self, |a, b| f(a).lt(&f(b)));
+ stable_sort(self, |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice with a key extraction function.
@@ -530,7 +465,7 @@ impl<T> [T] {
hack::into_vec(self)
}
- /// Creates a vector by repeating a slice `n` times.
+ /// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
@@ -725,7 +660,7 @@ impl [u8] {
///
/// ```error
/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
-/// --> src/liballoc/slice.rs:608:6
+/// --> library/alloc/src/slice.rs:608:6
/// |
/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
/// | ^ unconstrained type parameter
@@ -836,34 +771,30 @@ impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Borrow<[T]> for Vec<T> {
+impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
fn borrow(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> BorrowMut<[T]> for Vec<T> {
+impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
+// Specializable trait for implementing ToOwned::clone_into. This is
+// public in the crate and has the Allocator parameter so that
+// vec::clone_from use it too.
#[cfg(not(no_global_oom_handling))]
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone> ToOwned for [T] {
- type Owned = Vec<T>;
- #[cfg(not(test))]
- fn to_owned(&self) -> Vec<T> {
- self.to_vec()
- }
-
- #[cfg(test)]
- fn to_owned(&self) -> Vec<T> {
- hack::to_vec(self, Global)
- }
+pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
+ fn clone_into(&self, target: &mut Vec<T, A>);
+}
- fn clone_into(&self, target: &mut Vec<T>) {
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ default fn clone_into(&self, target: &mut Vec<T, A>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
@@ -877,328 +808,83 @@ impl<T: Clone> ToOwned for [T] {
}
}
-////////////////////////////////////////////////////////////////////////////////
-// Sorting
-////////////////////////////////////////////////////////////////////////////////
-
-/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
-///
-/// This is the integral subroutine of insertion sort.
#[cfg(not(no_global_oom_handling))]
-fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- if v.len() >= 2 && is_less(&v[1], &v[0]) {
- unsafe {
- // There are three ways to implement insertion here:
- //
- // 1. Swap adjacent elements until the first one gets to its final destination.
- // However, this way we copy data around more than is necessary. If elements are big
- // structures (costly to copy), this method will be slow.
- //
- // 2. Iterate until the right place for the first element is found. Then shift the
- // elements succeeding it to make room for it and finally place it into the
- // remaining hole. This is a good method.
- //
- // 3. Copy the first element into a temporary variable. Iterate until the right place
- // for it is found. As we go along, copy every traversed element into the slot
- // preceding it. Finally, copy data from the temporary variable into the remaining
- // hole. This method is very good. Benchmarks demonstrated slightly better
- // performance than with the 2nd method.
- //
- // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
- let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
-
- // Intermediate state of the insertion process is always tracked by `hole`, which
- // serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` in the end.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and
- // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
- // initially held exactly once.
- let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
- ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
-
- for i in 2..v.len() {
- if !is_less(&v[i], &*tmp) {
- break;
- }
- ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
- hole.dest = &mut v[i];
- }
- // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
- }
- }
-
- // When dropped, copies from `src` into `dest`.
- struct InsertionHole<T> {
- src: *const T,
- dest: *mut T,
- }
-
- impl<T> Drop for InsertionHole<T> {
- fn drop(&mut self) {
- unsafe {
- ptr::copy_nonoverlapping(self.src, self.dest, 1);
- }
- }
+impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ fn clone_into(&self, target: &mut Vec<T, A>) {
+ target.clear();
+ target.extend_from_slice(self);
}
}
-/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
-/// stores the result into `v[..]`.
-///
-/// # Safety
-///
-/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
-/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
#[cfg(not(no_global_oom_handling))]
-unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- let len = v.len();
- let v = v.as_mut_ptr();
- let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
-
- // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
- // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
- // copying the lesser (or greater) one into `v`.
- //
- // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
- // consumed first, then we must copy whatever is left of the shorter run into the remaining
- // hole in `v`.
- //
- // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
- // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
- // object it initially held exactly once.
- let mut hole;
-
- if mid <= len - mid {
- // The left run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v, buf, mid);
- hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
- }
-
- // Initially, these pointers point to the beginnings of their arrays.
- let left = &mut hole.start;
- let mut right = v_mid;
- let out = &mut hole.dest;
-
- while *left < hole.end && right < v_end {
- // Consume the lesser side.
- // If equal, prefer the left run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right, &**left) {
- get_and_increment(&mut right)
- } else {
- get_and_increment(left)
- };
- ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
- }
- }
- } else {
- // The right run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v_mid, buf, len - mid);
- hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
- }
-
- // Initially, these pointers point past the ends of their arrays.
- let left = &mut hole.dest;
- let right = &mut hole.end;
- let mut out = v_end;
-
- while v < *left && buf < *right {
- // Consume the greater side.
- // If equal, prefer the right run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
- decrement_and_get(left)
- } else {
- decrement_and_get(right)
- };
- ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
- }
- }
- }
- // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
- // it will now be copied into the hole in `v`.
-
- unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
- let old = *ptr;
- *ptr = unsafe { ptr.offset(1) };
- old
- }
-
- unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.offset(-1) };
- *ptr
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> ToOwned for [T] {
+ type Owned = Vec<T>;
+ #[cfg(not(test))]
+ fn to_owned(&self) -> Vec<T> {
+ self.to_vec()
}
- // When dropped, copies the range `start..end` into `dest..`.
- struct MergeHole<T> {
- start: *mut T,
- end: *mut T,
- dest: *mut T,
+ #[cfg(test)]
+ fn to_owned(&self) -> Vec<T> {
+ hack::to_vec(self, Global)
}
- impl<T> Drop for MergeHole<T> {
- fn drop(&mut self) {
- // `T` is not a zero-sized type, and these are pointers into a slice's elements.
- unsafe {
- let len = self.end.sub_ptr(self.start);
- ptr::copy_nonoverlapping(self.start, self.dest, len);
- }
- }
+ fn clone_into(&self, target: &mut Vec<T>) {
+ SpecCloneIntoVec::clone_into(self, target);
}
}
-/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
-/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
-///
-/// The algorithm identifies strictly descending and non-descending subsequences, which are called
-/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
-/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
-/// satisfied:
-///
-/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
-/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
-///
-/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+////////////////////////////////////////////////////////////////////////////////
+// Sorting
+////////////////////////////////////////////////////////////////////////////////
+
+#[inline]
#[cfg(not(no_global_oom_handling))]
-fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
- // Slices of up to this length get sorted using insertion sort.
- const MAX_INSERTION: usize = 20;
- // Very short runs are extended using insertion sort to span at least this many elements.
- const MIN_RUN: usize = 10;
-
- // Sorting has no meaningful behavior on zero-sized types.
- if size_of::<T>() == 0 {
+ if T::IS_ZST {
+ // Sorting has no meaningful behavior on zero-sized types. Do nothing.
return;
}
- let len = v.len();
-
- // Short arrays get sorted in-place via insertion sort to avoid allocations.
- if len <= MAX_INSERTION {
- if len >= 2 {
- for i in (0..len - 1).rev() {
- insert_head(&mut v[i..], &mut is_less);
- }
- }
- return;
- }
-
- // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
- // shallow copies of the contents of `v` without risking the dtors running on copies if
- // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
- // which will always have length at most `len / 2`.
- let mut buf = Vec::with_capacity(len / 2);
-
- // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
- // strange decision, but consider the fact that merges more often go in the opposite direction
- // (forwards). According to benchmarks, merging forwards is slightly faster than merging
- // backwards. To conclude, identifying runs by traversing backwards improves performance.
- let mut runs = vec![];
- let mut end = len;
- while end > 0 {
- // Find the next natural run, and reverse it if it's strictly descending.
- let mut start = end - 1;
- if start > 0 {
- start -= 1;
- unsafe {
- if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
- while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
- start -= 1;
- }
- v[start..end].reverse();
- } else {
- while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
- {
- start -= 1;
- }
- }
- }
- }
+ let elem_alloc_fn = |len: usize| -> *mut T {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
+ // elements.
+ unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
+ };
- // Insert some more elements into the run if it's too short. Insertion sort is faster than
- // merge sort on short sequences, so this significantly improves performance.
- while start > 0 && end - start < MIN_RUN {
- start -= 1;
- insert_head(&mut v[start..end], &mut is_less);
+ let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
}
+ };
- // Push this run onto the stack.
- runs.push(Run { start, len: end - start });
- end = start;
-
- // Merge some pairs of adjacent runs to satisfy the invariants.
- while let Some(r) = collapse(&runs) {
- let left = runs[r + 1];
- let right = runs[r];
- unsafe {
- merge(
- &mut v[left.start..right.start + right.len],
- left.len,
- buf.as_mut_ptr(),
- &mut is_less,
- );
- }
- runs[r] = Run { start: left.start, len: left.len + right.len };
- runs.remove(r + 1);
+ let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
+ // obscene length or 0.
+ unsafe {
+ alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
+ as *mut sort::TimSortRun
}
- }
+ };
- // Finally, exactly one run must remain in the stack.
- debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
-
- // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
- // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
- // algorithm should continue building a new run instead, `None` is returned.
- //
- // TimSort is infamous for its buggy implementations, as described here:
- // http://envisage-project.eu/timsort-specification-and-verification/
- //
- // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
- // Enforcing them on just top three is not sufficient to ensure that the invariants will still
- // hold for *all* runs in the stack.
- //
- // This function correctly checks invariants for the top four runs. Additionally, if the top
- // run starts at index 0, it will always demand a merge operation until the stack is fully
- // collapsed, in order to complete the sort.
- #[inline]
- fn collapse(runs: &[Run]) -> Option<usize> {
- let n = runs.len();
- if n >= 2
- && (runs[n - 1].start == 0
- || runs[n - 2].len <= runs[n - 1].len
- || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
- || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
- {
- if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
- } else {
- None
+ let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
+ // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(
+ buf_ptr as *mut u8,
+ alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
+ );
}
- }
+ };
- #[derive(Clone, Copy)]
- struct Run {
- start: usize,
- len: usize,
- }
+ sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
index b6a5f98e4fcd..78177a9e2ad0 100644
--- a/rust/alloc/vec/drain.rs
+++ b/rust/alloc/vec/drain.rs
@@ -3,7 +3,7 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
-use core::mem;
+use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self, NonNull};
use core::slice::{self};
@@ -18,7 +18,7 @@ use super::Vec;
///
/// ```
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::Drain<_> = v.drain(..);
+/// let iter: std::vec::Drain<'_, _> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
@@ -67,6 +67,75 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+ // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
+ // ^-- unyielded_ptr ^-- tail
+ //
+ // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+ // Here we want to
+ // 1. Move [unyielded] to `start`
+ // 2. Move [tail] to a new start at `start + len(unyielded)`
+ // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+ // a. In case of ZST, this is the only thing we want to do
+ // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ let source_vec = this.vec.as_mut();
+
+ let start = source_vec.len();
+ let tail = this.tail_start;
+
+ let unyielded_len = this.iter.len();
+ let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+ // ZSTs have no identity, so we don't need to move them around.
+ if !T::IS_ZST {
+ let start_ptr = source_vec.as_mut_ptr().add(start);
+
+ // memmove back unyielded elements
+ if unyielded_ptr != start_ptr {
+ let src = unyielded_ptr;
+ let dst = start_ptr;
+
+ ptr::copy(src, dst, unyielded_len);
+ }
+
+ // memmove back untouched tail
+ if tail != (start + unyielded_len) {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = start_ptr.add(unyielded_len);
+ ptr::copy(src, dst, this.tail_len);
+ }
+ }
+
+ source_vec.set_len(start + unyielded_len + this.tail_len);
+ }
+ }
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
@@ -128,12 +197,12 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
}
- let iter = mem::replace(&mut self.iter, (&mut []).iter());
+ let iter = mem::take(&mut self.iter);
let drop_len = iter.len();
let mut vec = self.vec;
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
@@ -154,9 +223,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
// as_slice() must only be called when iter.len() is > 0 because
- // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
- // the iterator's internal pointers. Creating a reference to deallocated memory
- // is invalid even when it is zero-length
+ // it also gets touched by vec::Splice which may turn it into a dangling pointer
+ // which would make it and the vec pointer point to different allocations which would
+ // lead to invalid pointer arithmetic below.
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
index b04fce041622..09efff090e42 100644
--- a/rust/alloc/vec/drain_filter.rs
+++ b/rust/alloc/vec/drain_filter.rs
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
-use core::ptr::{self};
-use core::slice::{self};
+use core::mem::{ManuallyDrop, SizedTypeProperties};
+use core::ptr;
+use core::slice;
use super::Vec;
@@ -17,7 +18,7 @@ use super::Vec;
/// #![feature(drain_filter)]
///
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
#[derive(Debug)]
@@ -56,6 +57,59 @@ where
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain_filter(|_| true);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // _____________________/-- old_len
+ // / \
+ // [kept] [yielded] [tail]
+ // \_______/ ^-- idx
+ // \-- del
+ //
+ // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+ //
+ // 1. Move [tail] after [kept]
+ // 2. Update length of the original vec to `old_len - del`
+ // a. In case of ZST, this is the only thing we want to do
+ // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ // ZSTs have no identity, so we don't need to move them around.
+ if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
+ let ptr = this.vec.as_mut_ptr();
+ let src = ptr.add(this.idx);
+ let dst = src.sub(this.del);
+ let tail_len = this.old_len - this.idx;
+ src.copy_to(dst, tail_len);
+ }
+
+ let new_len = this.old_len - this.del;
+ this.vec.set_len(new_len);
+ }
+ }
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index f7a50e76691e..aac0ec16aef1 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -3,14 +3,17 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
+#[cfg(not(no_global_oom_handling))]
+use crate::collections::VecDeque;
use crate::raw_vec::RawVec;
+use core::array;
use core::fmt;
-use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -40,7 +43,9 @@ pub struct IntoIter<
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
- pub(super) end: *const T,
+ pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
@@ -97,13 +102,16 @@ impl<T, A: Allocator> IntoIter<T, A> {
}
/// Drops remaining elements and relinquishes the backing allocation.
+ /// This method guarantees it won't panic before relinquishing
+ /// the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
- /// (&mut into_iter).for_each(core::mem::drop);
- /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
+ /// (&mut into_iter).for_each(drop);
+ /// std::mem::forget(into_iter);
/// ```
///
/// This method is used by in-place iteration, refer to the vec::in_place_collect
@@ -120,15 +128,45 @@ impl<T, A: Allocator> IntoIter<T, A> {
self.ptr = self.buf.as_ptr();
self.end = self.buf.as_ptr();
+ // Dropping the remaining elements can panic, so this needs to be
+ // done only after updating the other fields.
unsafe {
ptr::drop_in_place(remaining);
}
}
/// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
- #[allow(dead_code)]
pub(crate) fn forget_remaining_elements(&mut self) {
- self.ptr = self.end;
+ // For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
+ // `ptr` must stay aligned, while `end` may be unaligned.
+ self.end = self.ptr;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
+ // Keep our `Drop` impl from dropping the elements and the allocator
+ let mut this = ManuallyDrop::new(self);
+
+ // SAFETY: This allocation originally came from a `Vec`, so it passes
+ // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
+ // so the `sub_ptr`s below cannot wrap, and will produce a well-formed
+ // range. `end` ≤ `buf + cap`, so the range will be in-bounds.
+ // Taking `alloc` is ok because nothing else is going to look at it,
+ // since our `Drop` impl isn't going to run so there's no more code.
+ unsafe {
+ let buf = this.buf.as_ptr();
+ let initialized = if T::IS_ZST {
+ // All the pointers are the same for ZSTs, so it's fine to
+ // say that they're all at the beginning of the "allocation".
+ 0..this.len()
+ } else {
+ this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf)
+ };
+ let cap = this.cap;
+ let alloc = ManuallyDrop::take(&mut this.alloc);
+ VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
+ }
}
}
@@ -150,19 +188,18 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.ptr as *const _ == self.end {
+ if self.ptr == self.end {
None
- } else if mem::size_of::<T>() == 0 {
- // purposefully don't use 'ptr.offset' because for
- // vectors with 0-size elements this would return the
- // same pointer.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+ } else if T::IS_ZST {
+ // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
+ // reducing the `end`.
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
- self.ptr = unsafe { self.ptr.offset(1) };
+ self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@@ -170,7 +207,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = if mem::size_of::<T>() == 0 {
+ let exact = if T::IS_ZST {
self.end.addr().wrapping_sub(self.ptr.addr())
} else {
unsafe { self.end.sub_ptr(self.ptr) }
@@ -179,14 +216,12 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
- if mem::size_of::<T>() == 0 {
- // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
- // effectively results in unsigned pointers representing positions 0..usize::MAX,
- // which is valid for ZSTs.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ if T::IS_ZST {
+ // See `next` for why we sub `end` here.
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -195,10 +230,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@@ -206,6 +238,43 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
self.len()
}
+ #[inline]
+ fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
+ let mut raw_ary = MaybeUninit::uninit_array();
+
+ let len = self.len();
+
+ if T::IS_ZST {
+ if len < N {
+ self.forget_remaining_elements();
+ // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
+ return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
+ }
+
+ self.end = self.end.wrapping_byte_sub(N);
+ // Safety: ditto
+ return Ok(unsafe { raw_ary.transpose().assume_init() });
+ }
+
+ if len < N {
+ // Safety: `len` indicates that this many elements are available and we just checked that
+ // it fits into the array.
+ unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len);
+ self.forget_remaining_elements();
+ return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
+ }
+ }
+
+ // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
+ // the array.
+ return unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
+ self.ptr = self.ptr.add(N);
+ Ok(raw_ary.transpose().assume_init())
+ };
+ }
+
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
@@ -219,7 +288,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
- if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
@@ -230,40 +299,35 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
- } else if mem::size_of::<T>() == 0 {
+ } else if T::IS_ZST {
// See above for why 'ptr.offset' isn't used
- self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
- self.end = unsafe { self.end.offset(-1) };
+ self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// SAFETY: same as for advance_by()
- self.end = unsafe {
- arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
- }
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
- self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@@ -280,6 +344,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default,
+{
+ /// Creates an empty `vec::IntoIter`.
+ ///
+ /// ```
+ /// # use std::vec;
+ /// let iter: vec::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// assert_eq!(iter.as_slice(), &[]);
+ /// ```
+ fn default() -> Self {
+ super::Vec::new_in(Default::default()).into_iter()
+ }
+}
+
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs
index 377f3d172777..d928dcf90e80 100644
--- a/rust/alloc/vec/is_zero.rs
+++ b/rust/alloc/vec/is_zero.rs
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+use core::num::{Saturating, Wrapping};
+
use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
- /// Whether this value's representation is all zeros
+ /// Whether this value's representation is all zeros,
+ /// or can be represented with all zeroes.
fn is_zero(&self) -> bool;
}
@@ -19,12 +22,14 @@ macro_rules! impl_is_zero {
};
}
+impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
impl_is_zero!(i16, |x| x == 0);
impl_is_zero!(i32, |x| x == 0);
impl_is_zero!(i64, |x| x == 0);
impl_is_zero!(i128, |x| x == 0);
impl_is_zero!(isize, |x| x == 0);
+impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
impl_is_zero!(u16, |x| x == 0);
impl_is_zero!(u32, |x| x == 0);
impl_is_zero!(u64, |x| x == 0);
@@ -55,16 +60,42 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
#[inline]
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
- // it's worth doing if the array is really long. The threshold here
- // is largely arbitrary, but was picked because as of 2022-05-01 LLVM
- // can const-fold the check in `vec![[0; 32]; n]` but not in
- // `vec![[0; 64]; n]`: https://godbolt.org/z/WTzjzfs5b
+ // it's worth doing if the array is really long. The threshold here
+ // is largely arbitrary, but was picked because as of 2022-07-01 LLVM
+ // fails to const-fold the check in `vec![[1; 32]; n]`
+ // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
// Feel free to tweak if you have better evidence.
- N <= 32 && self.iter().all(IsZero::is_zero)
+ N <= 16 && self.iter().all(IsZero::is_zero)
+ }
+}
+
+// This is recursive macro.
+macro_rules! impl_for_tuples {
+ // Stopper
+ () => {
+ // No use for implementing for empty tuple because it is ZST.
+ };
+ ($first_arg:ident $(,$rest:ident)*) => {
+ unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
+ #[inline]
+ fn is_zero(&self) -> bool{
+ // Destructure tuple to N references
+ // Rust allows to hide generic params by local variable names.
+ #[allow(non_snake_case)]
+ let ($first_arg, $($rest,)*) = self;
+
+ $first_arg.is_zero()
+ $( && $rest.is_zero() )*
+ }
+ }
+
+ impl_for_tuples!($($rest),*);
}
}
+impl_for_tuples!(A, B, C, D, E, F, G, H);
+
// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some`
// variant are padding in the `None` variant, so ignoring them and
@@ -118,3 +149,56 @@ impl_is_zero_option_of_nonzero!(
NonZeroUsize,
NonZeroIsize,
);
+
+macro_rules! impl_is_zero_option_of_num {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for Option<$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ const {
+ let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+ assert!(none.is_none());
+ }
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,);
+
+unsafe impl<T: IsZero> IsZero for Wrapping<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+unsafe impl<T: IsZero> IsZero for Saturating<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+macro_rules! impl_for_optional_bool {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ // SAFETY: This is *not* a stable layout guarantee, but
+ // inside `core` we're allowed to rely on the current rustc
+ // behaviour that options of bools will be one byte with
+ // no padding, so long as they're nested less than 254 deep.
+ let raw: u8 = unsafe { core::mem::transmute(*self) };
+ raw == 0
+ }
+ }
+ )+};
+}
+impl_for_optional_bool! {
+ Option<bool>,
+ Option<Option<bool>>,
+ Option<Option<Option<bool>>>,
+ // Could go further, but not worth the metadata overhead
+}
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index fe4fff5064bc..05c70de0227e 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -58,15 +58,11 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
-use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::{arith_offset, assume};
use core::iter;
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
@@ -75,7 +71,7 @@ use crate::alloc::{Allocator, Global};
#[cfg(not(no_borrow))]
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
-use crate::collections::TryReserveError;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
use crate::raw_vec::RawVec;
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
@@ -127,7 +123,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::InPlaceDrop;
+use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -169,7 +165,7 @@ mod spec_extend;
/// vec[0] = 7;
/// assert_eq!(vec[0], 7);
///
-/// vec.extend([1, 2, 3].iter().copied());
+/// vec.extend([1, 2, 3]);
///
/// for x in &vec {
/// println!("{x}");
@@ -381,8 +377,8 @@ mod spec_extend;
/// Currently, `Vec` does not guarantee the order in which elements are dropped.
/// The order has changed in the past and may change again.
///
-/// [`get`]: ../../std/vec/struct.Vec.html#method.get
-/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`get`]: slice::get
+/// [`get_mut`]: slice::get_mut
/// [`String`]: crate::string::String
/// [`&str`]: type@str
/// [`shrink_to_fit`]: Vec::shrink_to_fit
@@ -428,17 +424,25 @@ impl<T> Vec<T> {
Vec { buf: RawVec::NEW, len: 0 }
}
- /// Constructs a new, empty `Vec<T>` with the specified capacity.
+ /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Panics
///
@@ -451,19 +455,24 @@ impl<T> Vec<T> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::with_capacity(10);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
@@ -473,17 +482,25 @@ impl<T> Vec<T> {
Self::with_capacity_in(capacity, Global)
}
- /// Tries to construct a new, empty `Vec<T>` with the specified capacity.
+ /// Tries to construct a new, empty `Vec<T>` with at least the specified capacity.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Examples
///
@@ -492,14 +509,14 @@ impl<T> Vec<T> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -508,6 +525,11 @@ impl<T> Vec<T> {
///
/// let mut result = Vec::try_with_capacity(usize::MAX);
/// assert!(result.is_err());
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::try_with_capacity(10).unwrap();
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[inline]
#[stable(feature = "kernel", since = "1.0.0")]
@@ -515,15 +537,15 @@ impl<T> Vec<T> {
Self::try_with_capacity_in(capacity, Global)
}
- /// Creates a `Vec<T>` directly from the raw components of another vector.
+ /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `ptr` must have been allocated using the global allocator, such as via
+ /// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -532,6 +554,14 @@ impl<T> Vec<T> {
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is normally **not** safe
@@ -552,6 +582,7 @@ impl<T> Vec<T> {
/// function.
///
/// [`String`]: crate::string::String
+ /// [`alloc::alloc`]: crate::alloc::alloc
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
///
/// # Examples
@@ -574,8 +605,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -583,6 +614,32 @@ impl<T> Vec<T> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
+ /// let vec = unsafe {
+ /// let mem = match Global.allocate(layout) {
+ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+ /// Err(AllocError) => return,
+ /// };
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
@@ -611,18 +668,26 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::new_in(alloc), len: 0 }
}
- /// Constructs a new, empty `Vec<T, A>` with the specified capacity with the provided
- /// allocator.
+ /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
+ /// with the provided allocator.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Panics
///
@@ -639,19 +704,24 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
/// assert_eq!(vec.len(), 11);
/// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
@@ -660,18 +730,26 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
- /// Tries to construct a new, empty `Vec<T, A>` with the specified capacity
+ /// Tries to construct a new, empty `Vec<T, A>` with at least the specified capacity
/// with the provided allocator.
///
- /// The vector will be able to hold exactly `capacity` elements without
- /// reallocating. If `capacity` is 0, the vector will not allocate.
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
///
/// It is important to note that although the returned vector has the
- /// *capacity* specified, the vector will have a zero *length*. For an
- /// explanation of the difference between length and capacity, see
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
/// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
///
/// # Examples
///
@@ -684,14 +762,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -700,6 +778,11 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// let mut result = Vec::try_with_capacity_in(usize::MAX, System);
/// assert!(result.is_err());
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::try_with_capacity_in(10, System).unwrap();
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
#[inline]
#[stable(feature = "kernel", since = "1.0.0")]
@@ -707,21 +790,31 @@ impl<T, A: Allocator> Vec<T, A> {
Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
}
- /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
+ /// and an allocator.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
- /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
- /// (at least, it's highly likely to be incorrect if it wasn't).
- /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
- /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is **not** safe
@@ -739,6 +832,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
+ /// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
///
@@ -768,8 +863,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -777,6 +872,29 @@ impl<T, A: Allocator> Vec<T, A> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ /// let vec = unsafe {
+ /// let mem = alloc(layout).cast::<u32>();
+ /// if mem.is_null() {
+ /// return;
+ /// }
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts(mem, 1, 16)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
@@ -869,14 +987,15 @@ impl<T, A: Allocator> Vec<T, A> {
(ptr, len, capacity, alloc)
}
- /// Returns the number of elements the vector can hold without
+ /// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
- /// let vec: Vec<i32> = Vec::with_capacity(10);
- /// assert_eq!(vec.capacity(), 10);
+ /// let mut vec: Vec<i32> = Vec::with_capacity(10);
+ /// vec.push(42);
+ /// assert!(vec.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -885,10 +1004,10 @@ impl<T, A: Allocator> Vec<T, A> {
}
/// Reserves capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to avoid
- /// frequent reallocations. After calling `reserve`, capacity will be
- /// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// in the given `Vec<T>`. The collection may reserve more space to
+ /// speculatively avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
///
/// # Panics
///
@@ -907,10 +1026,12 @@ impl<T, A: Allocator> Vec<T, A> {
self.buf.reserve(self.len, additional);
}
- /// Reserves the minimum capacity for exactly `additional` more elements to
- /// be inserted in the given `Vec<T>`. After calling `reserve_exact`,
- /// capacity will be greater than or equal to `self.len() + additional`.
- /// Does nothing if the capacity is already sufficient.
+ /// Reserves the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore, capacity can not be relied upon to be precisely
@@ -936,10 +1057,11 @@ impl<T, A: Allocator> Vec<T, A> {
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
- /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
- /// greater than or equal to `self.len() + additional`. Does nothing if
- /// capacity is already sufficient.
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -971,10 +1093,11 @@ impl<T, A: Allocator> Vec<T, A> {
self.buf.try_reserve(self.len, additional)
}
- /// Tries to reserve the minimum capacity for exactly `additional`
- /// elements to be inserted in the given `Vec<T>`. After calling
- /// `try_reserve_exact`, capacity will be greater than or equal to
- /// `self.len() + additional` if it returns `Ok(())`.
+ /// Tries to reserve the minimum capacity for at least `additional`
+ /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
+ /// this will not deliberately over-allocate to speculatively avoid frequent
+ /// allocations. After calling `try_reserve_exact`, capacity will be greater
+ /// than or equal to `self.len() + additional` if it returns `Ok(())`.
/// Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it
@@ -1023,7 +1146,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to_fit();
/// assert!(vec.capacity() >= 3);
/// ```
@@ -1050,7 +1173,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to(4);
/// assert!(vec.capacity() >= 4);
/// vec.shrink_to(0);
@@ -1066,7 +1189,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// Converts the vector into [`Box<[T]>`][owned slice].
///
- /// Note that this will drop any excess capacity.
+ /// If the vector has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
///
/// [owned slice]: Box
///
@@ -1084,7 +1208,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
///
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// let slice = vec.into_boxed_slice();
/// assert_eq!(slice.into_vec().capacity(), 3);
/// ```
@@ -1199,7 +1323,8 @@ impl<T, A: Allocator> Vec<T, A> {
self
}
- /// Returns a raw pointer to the vector's buffer.
+ /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
+ /// valid for zero sized reads if the vector didn't allocate.
///
/// The caller must ensure that the vector outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
@@ -1229,14 +1354,11 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
- /// Returns an unsafe mutable pointer to the vector's buffer.
+ /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
+ /// raw pointer valid for zero sized reads if the vector didn't allocate.
///
/// The caller must ensure that the vector outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
@@ -1265,11 +1387,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@@ -1440,9 +1558,6 @@ impl<T, A: Allocator> Vec<T, A> {
}
let len = self.len();
- if index > len {
- assert_failed(index, len);
- }
// space for the new element
if len == self.buf.capacity() {
@@ -1454,9 +1569,15 @@ impl<T, A: Allocator> Vec<T, A> {
// The spot to put the new value
{
let p = self.as_mut_ptr().add(index);
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
+ if index < len {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.add(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ assert_failed(index, len);
+ }
// Write it in, overwriting the first copy of the `index`th
// element.
ptr::write(p, element);
@@ -1513,7 +1634,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
- ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@@ -1562,11 +1683,11 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// ```
/// let mut vec = vec![1, 2, 3, 4];
- /// vec.retain_mut(|x| if *x > 3 {
- /// false
- /// } else {
+ /// vec.retain_mut(|x| if *x <= 3 {
/// *x += 1;
/// true
+ /// } else {
+ /// false
/// });
/// assert_eq!(vec, [2, 3, 4]);
/// ```
@@ -1854,6 +1975,51 @@ impl<T, A: Allocator> Vec<T, A> {
Ok(())
}
+ /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
+ /// with the element.
+ ///
+ /// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
+ /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
+ ///
+ /// [`push`]: Vec::push
+ /// [`reserve`]: Vec::reserve
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Examples
+ ///
+ /// A manual, panic-free alternative to [`FromIterator`]:
+ ///
+ /// ```
+ /// #![feature(vec_push_within_capacity)]
+ ///
+ /// use std::collections::TryReserveError;
+ /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
+ /// let mut vec = Vec::new();
+ /// for value in iter {
+ /// if let Err(value) = vec.push_within_capacity(value) {
+ /// vec.try_reserve(1)?;
+ /// // this cannot fail, the previous line either returned or added at least 1 free slot
+ /// let _ = vec.push_within_capacity(value);
+ /// }
+ /// }
+ /// Ok(vec)
+ /// }
+ /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
+ /// ```
+ #[inline]
+ #[unstable(feature = "vec_push_within_capacity", issue = "100486")]
+ pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
+ if self.len == self.buf.capacity() {
+ return Err(value);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
/// Removes the last element from a vector and returns it, or [`None`] if it
/// is empty.
///
@@ -1886,7 +2052,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// # Panics
///
- /// Panics if the number of elements in the vector overflows a `usize`.
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Examples
///
@@ -1980,9 +2146,7 @@ impl<T, A: Allocator> Vec<T, A> {
unsafe {
// set self.vec length's to start, to be safe in case Drain is leaked
self.set_len(start);
- // Use the borrow in the IterMut to indicate borrowing behavior of the
- // whole Drain iterator (like &mut T).
- let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
Drain {
tail_start: end,
tail_len: len - end,
@@ -2145,7 +2309,7 @@ impl<T, A: Allocator> Vec<T, A> {
{
let len = self.len();
if new_len > len {
- self.extend_with(new_len - len, ExtendFunc(f));
+ self.extend_trusted(iter::repeat_with(f).take(new_len - len));
} else {
self.truncate(new_len);
}
@@ -2174,7 +2338,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// static_ref[0] += 1;
/// assert_eq!(static_ref, &[2, 2, 3]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_leak", since = "1.47.0")]
#[inline]
pub fn leak<'a>(self) -> &'a mut [T]
@@ -2469,7 +2632,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.reserve(range.len());
// SAFETY:
- // - `slice::range` guarantees that the given range is valid for indexing self
+ // - `slice::range` guarantees that the given range is valid for indexing self
unsafe {
self.spec_extend_from_within(range);
}
@@ -2501,7 +2664,7 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn into_flattened(self) -> Vec<T, A> {
let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
- let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
+ let (new_len, new_cap) = if T::IS_ZST {
(len.checked_mul(N).expect("vec len overflow"), usize::MAX)
} else {
// SAFETY:
@@ -2537,16 +2700,6 @@ impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
}
}
-struct ExtendFunc<F>(F);
-impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
- fn next(&mut self) -> T {
- (self.0)()
- }
- fn last(mut self) -> T {
- (self.0)()
- }
-}
-
impl<T, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` values, using the given generator.
@@ -2563,7 +2716,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
@@ -2592,7 +2745,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
@@ -2664,7 +2817,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
// SAFETY:
- // - caller guaratees that src is a valid index
+ // - caller guarantees that src is a valid index
let to_clone = unsafe { this.get_unchecked(src) };
iter::zip(to_clone, spare)
@@ -2683,13 +2836,13 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (init, spare) = self.split_at_spare_mut();
// SAFETY:
- // - caller guaratees that `src` is a valid index
+ // - caller guarantees that `src` is a valid index
let source = unsafe { init.get_unchecked(src) };
// SAFETY:
// - Both pointers are created from unique slice references (`&mut [_]`)
// so they are valid and do not overlap.
- // - Elements are :Copy so it's OK to to copy them, without doing
+ // - Elements are :Copy so it's OK to copy them, without doing
// anything with the original values
// - `count` is equal to the len of `source`, so source is valid for
// `count` reads
@@ -2712,6 +2865,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
impl<T, A: Allocator> ops::Deref for Vec<T, A> {
type Target = [T];
+ #[inline]
fn deref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
}
@@ -2719,41 +2873,13 @@ impl<T, A: Allocator> ops::Deref for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ #[inline]
fn deref_mut(&mut self) -> &mut [T] {
unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
}
}
#[cfg(not(no_global_oom_handling))]
-trait SpecCloneFrom {
- fn clone_from(this: &mut Self, other: &Self);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
- default fn clone_from(this: &mut Self, other: &Self) {
- // drop anything that will not be overwritten
- this.truncate(other.len());
-
- // self.len <= other.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = other.split_at(this.len());
-
- // reuse the contained values' allocations/resources.
- this.clone_from_slice(init);
- this.extend_from_slice(tail);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
- fn clone_from(this: &mut Self, other: &Self) {
- this.clear();
- this.extend_from_slice(other);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
#[cfg(not(test))]
@@ -2764,7 +2890,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Instead use the
- // `slice::to_vec` function which is only available with cfg(test)
+ // `slice::to_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
fn clone(&self) -> Self {
@@ -2773,7 +2899,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
}
fn clone_from(&mut self, other: &Self) {
- SpecCloneFrom::clone_from(self, other)
+ crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self);
}
}
@@ -2781,7 +2907,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// as required by the `core::borrow::Borrow` implementation.
///
/// ```
-/// #![feature(build_hasher_simple_hash_one)]
/// use std::hash::BuildHasher;
///
/// let b = std::collections::hash_map::RandomState::new();
@@ -2845,19 +2970,22 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
///
/// ```
/// let v = vec!["a".to_string(), "b".to_string()];
- /// for s in v.into_iter() {
- /// // s has type String, not &String
- /// println!("{s}");
- /// }
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<String> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, Some("a".to_string()));
+ /// assert_eq!(v_iter.next(), Some("b".to_string()));
+ /// assert_eq!(v_iter.next(), None);
/// ```
#[inline]
- fn into_iter(self) -> IntoIter<T, A> {
+ fn into_iter(self) -> Self::IntoIter {
unsafe {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
let begin = me.as_mut_ptr();
- let end = if mem::size_of::<T>() == 0 {
- arith_offset(begin as *const i8, me.len() as isize) as *const T
+ let end = if T::IS_ZST {
+ begin.wrapping_byte_add(me.len())
} else {
begin.add(me.len()) as *const T
};
@@ -2879,7 +3007,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
- fn into_iter(self) -> slice::Iter<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
@@ -2889,7 +3017,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
- fn into_iter(self) -> slice::IterMut<'a, T> {
+ fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
@@ -2969,6 +3097,69 @@ impl<T, A: Allocator> Vec<T, A> {
Ok(())
}
+ // specific extend for `TrustedLen` iterators, called both by the specializations
+ // and internal places where resolving specialization makes compilation slower
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let ptr = self.as_mut_ptr();
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr.add(local_len.current_len()), element);
+ // Since the loop executes user code which can panic we have to update
+ // the length every step to correctly drop what we've written.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+
+ // specific extend for `TrustedLen` iterators, called both by the specializations
+ // and internal places where resolving specialization makes compilation slower
+ fn try_extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) -> Result<(), TryReserveError> {
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.try_reserve(additional)?;
+ unsafe {
+ let ptr = self.as_mut_ptr();
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr.add(local_len.current_len()), element);
+ // Since the loop executes user code which can panic we have to update
+ // the length every step to correctly drop what we've written.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ Ok(())
+ } else {
+ Err(TryReserveErrorKind::CapacityOverflow.into())
+ }
+ }
+
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
@@ -3097,7 +3288,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
-/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@@ -3109,7 +3300,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]
@@ -3132,9 +3323,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Vec<T> {
+impl<T> Default for Vec<T> {
/// Creates an empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
fn default() -> Vec<T> {
Vec::new()
}
@@ -3227,12 +3419,12 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
#[cfg(not(test))]
fn from(s: [T; N]) -> Vec<T> {
- <[T]>::into_vec(box s)
+ <[T]>::into_vec(Box::new(s))
}
#[cfg(test)]
fn from(s: [T; N]) -> Vec<T> {
- crate::slice::into_vec(box s)
+ crate::slice::into_vec(Box::new(s))
}
}
@@ -3252,8 +3444,8 @@ where
///
/// ```
/// # use std::borrow::Cow;
- /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
- /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// let o: Cow<'_, [i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
/// assert_eq!(Vec::from(o), Vec::from(b));
/// ```
fn from(s: Cow<'a, [T]>) -> Vec<T> {
@@ -3261,7 +3453,7 @@ where
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "vec_from_box", since = "1.18.0")]
impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
@@ -3279,7 +3471,7 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(no_global_oom_handling))]
#[cfg(not(test))]
#[stable(feature = "box_from_vec", since = "1.20.0")]
@@ -3294,6 +3486,14 @@ impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
/// ```
/// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
/// ```
+ ///
+ /// Any excess capacity is removed:
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
+ /// ```
fn from(v: Vec<T, A>) -> Self {
v.into_boxed_slice()
}
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
index 448bf5076a0b..d3c7297b80ec 100644
--- a/rust/alloc/vec/set_len_on_drop.rs
+++ b/rust/alloc/vec/set_len_on_drop.rs
@@ -20,6 +20,11 @@ impl<'a> SetLenOnDrop<'a> {
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
+
+ #[inline]
+ pub(super) fn current_len(&self) -> usize {
+ self.local_len
+ }
}
impl Drop for SetLenOnDrop<'_> {
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
index 5ce2d00991bc..a6a735201e59 100644
--- a/rust/alloc/vec/spec_extend.rs
+++ b/rust/alloc/vec/spec_extend.rs
@@ -1,12 +1,11 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::Allocator;
-use crate::collections::{TryReserveError, TryReserveErrorKind};
+use crate::collections::TryReserveError;
use core::iter::TrustedLen;
-use core::ptr::{self};
use core::slice::{self};
-use super::{IntoIter, SetLenOnDrop, Vec};
+use super::{IntoIter, Vec};
// Specialization trait used for Vec::extend
#[cfg(not(no_global_oom_handling))]
@@ -44,36 +43,7 @@ where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iterator: I) {
- // This is the case for a TrustedLen iterator.
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.reserve(additional);
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr, element);
- ptr = ptr.offset(1);
- // Since the loop executes user code which can panic we have to bump the pointer
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- } else {
- // Per TrustedLen contract a `None` upper bound means that the iterator length
- // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
- // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
- // This avoids additional codegen for a fallback code path which would eventually
- // panic anyway.
- panic!("capacity overflow");
- }
+ self.extend_trusted(iterator)
}
}
@@ -82,32 +52,7 @@ where
I: TrustedLen<Item = T>,
{
default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
- // This is the case for a TrustedLen iterator.
- let (low, high) = iterator.size_hint();
- if let Some(additional) = high {
- debug_assert_eq!(
- low,
- additional,
- "TrustedLen iterator's size hint is not exact: {:?}",
- (low, high)
- );
- self.try_reserve(additional)?;
- unsafe {
- let mut ptr = self.as_mut_ptr().add(self.len());
- let mut local_len = SetLenOnDrop::new(&mut self.len);
- iterator.for_each(move |element| {
- ptr::write(ptr, element);
- ptr = ptr.offset(1);
- // Since the loop executes user code which can panic we have to bump the pointer
- // after each step.
- // NB can't overflow since we would have had to alloc the address space
- local_len.increment_len(1);
- });
- }
- Ok(())
- } else {
- Err(TryReserveErrorKind::CapacityOverflow.into())
- }
+ self.try_extend_trusted(iterator)
}
}
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 50e7a76d5455..c91a3c24f607 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -6,11 +6,14 @@
* Sorted alphabetically.
*/
+#include <kunit/test.h>
+#include <linux/errname.h>
#include <linux/slab.h>
#include <linux/refcount.h>
#include <linux/wait.h>
#include <linux/sched.h>
/* `bindgen` gets confused at certain things. */
+const size_t BINDINGS_ARCH_SLAB_MINALIGN = ARCH_SLAB_MINALIGN;
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
index 7b246454e009..9bcbea04dac3 100644
--- a/rust/bindings/lib.rs
+++ b/rust/bindings/lib.rs
@@ -9,7 +9,6 @@
//! using this crate.
#![no_std]
-#![feature(core_ffi_c)]
// See <https://github.com/rust-lang/rust-bindgen/issues/1651>.
#![cfg_attr(test, allow(deref_nullptr))]
#![cfg_attr(test, allow(unaligned_references))]
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index 43378357ece9..fb8ac3f211de 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -37,14 +37,21 @@ macro_rules! define_panicking_intrinsics(
);
define_panicking_intrinsics!("`f32` should not be used", {
+ __addsf3,
__eqsf2,
__gesf2,
__lesf2,
+ __ltsf2,
+ __mulsf3,
__nesf2,
__unordsf2,
});
define_panicking_intrinsics!("`f64` should not be used", {
+ __adddf3,
+ __ledf2,
+ __ltdf2,
+ __muldf3,
__unorddf2,
});
diff --git a/rust/helpers.c b/rust/helpers.c
index 81e80261d597..4c86fe4a7e05 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -16,15 +16,19 @@
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
+ *
+ * Sorted alphabetically.
*/
+#include <kunit/test-bug.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
#include <linux/err.h>
-#include <linux/refcount.h>
+#include <linux/errname.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/refcount.h>
#include <linux/sched/signal.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
__noreturn void rust_helper_BUG(void)
@@ -110,6 +114,12 @@ long rust_helper_PTR_ERR(__force const void *ptr)
}
EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
+EXPORT_SYMBOL_GPL(rust_helper_errname);
+
struct task_struct *rust_helper_get_current(void)
{
return current;
@@ -128,20 +138,25 @@ void rust_helper_put_task_struct(struct task_struct *t)
}
EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
+
/*
- * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
- * as the Rust `usize` type, so we can use it in contexts where Rust
- * expects a `usize` like slice (array) indices. `usize` is defined to be
- * the same as C's `uintptr_t` type (can hold any pointer) but not
- * necessarily the same as `size_t` (can hold the size of any single
- * object). Most modern platforms use the same concrete integer type for
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
* both of them, but in case we find ourselves on a platform where
* that's not true, fail early instead of risking ABI or
* integer-overflow issues.
*
* If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to remove
- * `--size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
* your platform such that `size_t` matches `uintptr_t` (i.e., to increase
* `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
*/
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
index 397a3dd57a9b..a8f3d5be1af1 100644
--- a/rust/kernel/allocator.rs
+++ b/rust/kernel/allocator.rs
@@ -9,11 +9,41 @@ use crate::bindings;
struct KernelAllocator;
+/// Calls `krealloc` with a proper size to alloc a new object aligned to `new_layout`'s alignment.
+///
+/// # Safety
+///
+/// - `ptr` can be either null or a pointer which has been allocated by this allocator.
+/// - `new_layout` must have a non-zero size.
+unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gfp_t) -> *mut u8 {
+ // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+ let layout = new_layout.pad_to_align();
+
+ let mut size = layout.size();
+
+ if layout.align() > bindings::BINDINGS_ARCH_SLAB_MINALIGN {
+ // The alignment requirement exceeds the slab guarantee, thus try to enlarge the size
+ // to use the "power-of-two" size/alignment guarantee (see comments in `kmalloc()` for
+ // more information).
+ //
+ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
+ // `layout.align()`, so `next_power_of_two` gives enough alignment guarantee.
+ size = size.next_power_of_two();
+ }
+
+ // SAFETY:
+ // - `ptr` is either null or a pointer returned from a previous `k{re}alloc()` by the
+ // function safety requirement.
+ // - `size` is greater than 0 since it's either a `layout.size()` (which cannot be zero
+ // according to the function safety requirement) or a result from `next_power_of_two()`.
+ unsafe { bindings::krealloc(ptr as *const core::ffi::c_void, size, flags) as *mut u8 }
+}
+
unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- // `krealloc()` is used instead of `kmalloc()` because the latter is
- // an inline function and cannot be bound to as a result.
- unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
@@ -21,44 +51,38 @@ unsafe impl GlobalAlloc for KernelAllocator {
bindings::kfree(ptr as *const core::ffi::c_void);
}
}
-}
-
-#[global_allocator]
-static ALLOCATOR: KernelAllocator = KernelAllocator;
-// `rustc` only generates these for some crate types. Even then, we would need
-// to extract the object file that has them from the archive. For the moment,
-// let's generate them ourselves instead.
-//
-// Note that `#[no_mangle]` implies exported too, nowadays.
-#[no_mangle]
-fn __rust_alloc(size: usize, _align: usize) -> *mut u8 {
- unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 }
-}
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY:
+ // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not
+ // overflow `isize` by the function safety requirement.
+ // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two).
+ let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
-#[no_mangle]
-fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
- unsafe { bindings::kfree(ptr as *const core::ffi::c_void) };
-}
+ // SAFETY:
+ // - `ptr` is either null or a pointer allocated by this allocator by the function safety
+ // requirement.
+ // - the size of `layout` is not zero because `new_size` is not zero by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr, layout, bindings::GFP_KERNEL) }
+ }
-#[no_mangle]
-fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 {
- unsafe {
- bindings::krealloc(
- ptr as *const core::ffi::c_void,
- new_size,
- bindings::GFP_KERNEL,
- ) as *mut u8
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe {
+ krealloc_aligned(
+ ptr::null_mut(),
+ layout,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ )
+ }
}
}
+#[global_allocator]
+static ALLOCATOR: KernelAllocator = KernelAllocator;
+
+// See <https://github.com/rust-lang/rust/pull/86844>.
#[no_mangle]
-fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 {
- unsafe {
- bindings::krealloc(
- core::ptr::null(),
- size,
- bindings::GFP_KERNEL | bindings::__GFP_ZERO,
- ) as *mut u8
- }
-}
+static __rust_no_alloc_shim_is_unstable: u8 = 0;
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
index 659542393c09..9e37120bc69c 100644
--- a/rust/kernel/build_assert.rs
+++ b/rust/kernel/build_assert.rs
@@ -67,6 +67,8 @@ macro_rules! build_error {
/// assert!(n > 1); // Run-time check
/// }
/// ```
+///
+/// [`static_assert!`]: crate::static_assert!
#[macro_export]
macro_rules! build_assert {
($cond:expr $(,)?) => {{
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 5f4114b30b94..05fcab6abfe6 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -4,16 +4,20 @@
//!
//! C header: [`include/uapi/asm-generic/errno-base.h`](../../../include/uapi/asm-generic/errno-base.h)
+use crate::str::CStr;
+
use alloc::{
alloc::{AllocError, LayoutError},
collections::TryReserveError,
};
use core::convert::From;
+use core::fmt;
use core::num::TryFromIntError;
use core::str::Utf8Error;
/// Contains the C-compatible error codes.
+#[rustfmt::skip]
pub mod code {
macro_rules! declare_err {
($err:tt $(,)? $($doc:expr),+) => {
@@ -58,6 +62,25 @@ pub mod code {
declare_err!(EPIPE, "Broken pipe.");
declare_err!(EDOM, "Math argument out of domain of func.");
declare_err!(ERANGE, "Math result not representable.");
+ declare_err!(ERESTARTSYS, "Restart the system call.");
+ declare_err!(ERESTARTNOINTR, "System call was interrupted by a signal and will be restarted.");
+ declare_err!(ERESTARTNOHAND, "Restart if no handler.");
+ declare_err!(ENOIOCTLCMD, "No ioctl command.");
+ declare_err!(ERESTART_RESTARTBLOCK, "Restart by calling sys_restart_syscall.");
+ declare_err!(EPROBE_DEFER, "Driver requests probe retry.");
+ declare_err!(EOPENSTALE, "Open found a stale dentry.");
+ declare_err!(ENOPARAM, "Parameter not supported.");
+ declare_err!(EBADHANDLE, "Illegal NFS file handle.");
+ declare_err!(ENOTSYNC, "Update synchronization mismatch.");
+ declare_err!(EBADCOOKIE, "Cookie is stale.");
+ declare_err!(ENOTSUPP, "Operation is not supported.");
+ declare_err!(ETOOSMALL, "Buffer or request is too small.");
+ declare_err!(ESERVERFAULT, "An untranslatable error occurred.");
+ declare_err!(EBADTYPE, "Type not supported by server.");
+ declare_err!(EJUKEBOX, "Request initiated, but will not complete before timeout.");
+ declare_err!(EIOCBQUEUED, "iocb queued, will get completion event.");
+ declare_err!(ERECALLCONFLICT, "Conflict with recalled state.");
+ declare_err!(ENOGRACE, "NFS file lock reclaim refused.");
}
/// Generic integer kernel error.
@@ -113,6 +136,42 @@ impl Error {
// SAFETY: self.0 is a valid error due to its invariant.
unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
}
+
+ /// Returns a string representing the error, if one exists.
+ #[cfg(not(testlib))]
+ pub fn name(&self) -> Option<&'static CStr> {
+ // SAFETY: Just an FFI call, there are no extra safety requirements.
+ let ptr = unsafe { bindings::errname(-self.0) };
+ if ptr.is_null() {
+ None
+ } else {
+ // SAFETY: The string returned by `errname` is static and `NUL`-terminated.
+ Some(unsafe { CStr::from_char_ptr(ptr) })
+ }
+ }
+
+ /// Returns a string representing the error, if one exists.
+ ///
+ /// When `testlib` is configured, this always returns `None` to avoid the dependency on a
+ /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
+ /// run in userspace.
+ #[cfg(testlib)]
+ pub fn name(&self) -> Option<&'static CStr> {
+ None
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.name() {
+ // Print out number if no name can be found.
+ None => f.debug_tuple("Error").field(&-self.0).finish(),
+ // SAFETY: These strings are ASCII-only.
+ Some(name) => f
+ .debug_tuple(unsafe { core::str::from_utf8_unchecked(name) })
+ .finish(),
+ }
+ }
}
impl From<AllocError> for Error {
@@ -177,7 +236,7 @@ impl From<core::convert::Infallible> for Error {
/// Note that even if a function does not return anything when it succeeds,
/// it should still be modeled as returning a `Result` rather than
/// just an [`Error`].
-pub type Result<T = ()> = core::result::Result<T, Error>;
+pub type Result<T = (), E = Error> = core::result::Result<T, E>;
/// Converts an integer as returned by a C kernel function to an error if it's negative, and
/// `Ok(())` otherwise.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 4ebfb08dab11..4ebb6f23fc2e 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -120,14 +120,24 @@
//! `slot` gets called.
//!
//! ```rust
-//! use kernel::{prelude::*, init};
+//! # #![allow(unreachable_pub, clippy::disallowed_names)]
+//! use kernel::{prelude::*, init, types::Opaque};
//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
//! # mod bindings {
+//! # #![allow(non_camel_case_types)]
//! # pub struct foo;
//! # pub unsafe fn init_foo(_ptr: *mut foo) {}
//! # pub unsafe fn destroy_foo(_ptr: *mut foo) {}
//! # pub unsafe fn enable_foo(_ptr: *mut foo, _flags: u32) -> i32 { 0 }
//! # }
+//! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.
+//! # trait FromErrno {
+//! # fn from_errno(errno: core::ffi::c_int) -> Error {
+//! # // Dummy error that can be constructed outside the `kernel` crate.
+//! # Error::from(core::fmt::Error)
+//! # }
+//! # }
+//! # impl FromErrno for Error {}
//! /// # Invariants
//! ///
//! /// `foo` is always initialized
@@ -158,7 +168,7 @@
//! if err != 0 {
//! // Enabling has failed, first clean up the foo and then return the error.
//! bindings::destroy_foo(Opaque::raw_get(foo));
-//! return Err(Error::from_kernel_errno(err));
+//! return Err(Error::from_errno(err));
//! }
//!
//! // All fields of `RawFoo` have been initialized, since `_p` is a ZST.
@@ -197,15 +207,17 @@
//! [`Opaque`]: kernel::types::Opaque
//! [`Opaque::ffi_init`]: kernel::types::Opaque::ffi_init
//! [`pin_data`]: ::macros::pin_data
+//! [`pin_init!`]: crate::pin_init!
use crate::{
error::{self, Error},
sync::UniqueArc,
+ types::{Opaque, ScopeGuard},
};
use alloc::boxed::Box;
use core::{
alloc::AllocError,
- cell::Cell,
+ cell::UnsafeCell,
convert::Infallible,
marker::PhantomData,
mem::MaybeUninit,
@@ -225,8 +237,7 @@ pub mod macros;
///
/// ```rust
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
-/// # use kernel::{init, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
-/// # use macros::pin_data;
+/// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
/// # use core::pin::Pin;
/// #[pin_data]
/// struct Foo {
@@ -255,6 +266,8 @@ pub mod macros;
/// A normal `let` binding with optional type annotation. The expression is expected to implement
/// [`PinInit`]/[`Init`] with the error type [`Infallible`]. If you want to use a different error
/// type, then use [`stack_try_pin_init!`].
+///
+/// [`stack_try_pin_init!`]: crate::stack_try_pin_init!
#[macro_export]
macro_rules! stack_pin_init {
(let $var:ident $(: $t:ty)? = $val:expr) => {
@@ -274,7 +287,7 @@ macro_rules! stack_pin_init {
///
/// # Examples
///
-/// ```rust
+/// ```rust,ignore
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
@@ -300,7 +313,7 @@ macro_rules! stack_pin_init {
/// pr_info!("a: {}", &*foo.a.lock());
/// ```
///
-/// ```rust
+/// ```rust,ignore
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
@@ -506,14 +519,17 @@ macro_rules! stack_try_pin_init {
/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
/// pointer named `this` inside of the initializer.
+/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
+/// struct, this initializes every field with 0 and then runs all initializers specified in the
+/// body. This can only be done if [`Zeroable`] is implemented for the struct.
///
/// For instance:
///
/// ```rust
-/// # use kernel::pin_init;
-/// # use macros::pin_data;
+/// # use kernel::{macros::{Zeroable, pin_data}, pin_init};
/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
/// #[pin_data]
+/// #[derive(Zeroable)]
/// struct Buf {
/// // `ptr` points into `buf`.
/// ptr: *mut u8,
@@ -526,6 +542,10 @@ macro_rules! stack_try_pin_init {
/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
/// pin: PhantomPinned,
/// });
+/// pin_init!(Buf {
+/// buf: [1; 64],
+/// ..Zeroable::zeroed()
+/// });
/// ```
///
/// [`try_pin_init!`]: kernel::try_pin_init
@@ -537,11 +557,15 @@ macro_rules! pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
}
@@ -590,205 +614,31 @@ macro_rules! try_pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($err),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the pin data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasPinData;
- $t$(::<$($generics),*>)?::__pin_data()
- };
- // Ensure that `data` really is of type `PinData` and help with type inference:
- let init = $crate::init::__internal::PinData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_pin_init!(init_slot:
- @data(data),
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_pin_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::pin_init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- @munch_fields($(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // In-place initialization syntax.
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
- unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), $field)? };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be forgotten via safe code.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // Direct value init, this is safe for every field.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Initialize the field.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard:
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// Construct an in-place initializer for `struct`s.
@@ -804,6 +654,8 @@ macro_rules! try_pin_init {
///
/// This initializer is for initializing data in-place that might later be moved. If you want to
/// pin-initialize, use [`pin_init!`].
+///
+/// [`try_init!`]: crate::try_init!
// For a detailed example of how this macro works, see the module documentation of the hidden
// module `__internal` inside of `init/__internal.rs`.
#[macro_export]
@@ -811,11 +663,15 @@ macro_rules! init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
}
}
@@ -836,7 +692,7 @@ macro_rules! init {
/// # Examples
///
/// ```rust
-/// use kernel::{init::PinInit, error::Error, InPlaceInit};
+/// use kernel::{init::{PinInit, zeroed}, error::Error};
/// struct BigBuf {
/// big: Box<[u8; 1024 * 1024 * 1024]>,
/// small: [u8; 1024 * 1024],
@@ -858,199 +714,31 @@ macro_rules! try_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($err),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the init data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasInitData;
- $t$(::<$($generics),*>)?::__init_data()
- };
- // Ensure that `data` really is of type `InitData` and help with type inference:
- let init = $crate::init::__internal::InitData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_init!(init_slot:
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @slot($slot:ident),
- @munch_fields( $(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @slot($slot:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- unsafe {
- $crate::init::Init::__init($field, ::core::ptr::addr_of_mut!((*$slot).$field))?;
- }
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @slot($slot:ident),
- // Direct value init.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Call the initializer.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields( $(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// A pin-initializer for the type `T`.
@@ -1087,6 +775,79 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
/// deallocate.
/// - `slot` will not move until it is dropped, i.e. it will be pinned.
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::pin_init_from_closure};
+ /// #[repr(C)]
+ /// struct RawFoo([u8; 16]);
+ /// extern {
+ /// fn init_foo(_: *mut RawFoo);
+ /// }
+ ///
+ /// #[pin_data]
+ /// struct Foo {
+ /// #[pin]
+ /// raw: Opaque<RawFoo>,
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(self: Pin<&mut Self>) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = pin_init!(Foo {
+ /// raw <- unsafe {
+ /// Opaque::ffi_init(|s| {
+ /// init_foo(s);
+ /// })
+ /// },
+ /// }).pin_chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
+ where
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+ {
+ ChainPinInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`PinInit::pin_chain`].
+pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__pinned_init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+// - considers `slot` pinned.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E>
+where
+ I: PinInit<T, E>,
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__pinned_init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ let val = unsafe { &mut *slot };
+ // SAFETY: `slot` is considered pinned.
+ let val = unsafe { Pin::new_unchecked(val) };
+ (self.1)(val).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
}
/// An initializer for `T`.
@@ -1119,7 +880,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
///
/// [`Arc<T>`]: crate::sync::Arc
#[must_use = "An initializer must be used in order to create its value."]
-pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
+pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
/// Initializes `slot`.
///
/// # Safety
@@ -1128,16 +889,73 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
/// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
/// deallocate.
unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::{self, init_from_closure}};
+ /// struct Foo {
+ /// buf: [u8; 1_000_000],
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(&mut self) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = init!(Foo {
+ /// buf <- init::zeroed()
+ /// }).chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
+ where
+ F: FnOnce(&mut T) -> Result<(), E>,
+ {
+ ChainInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`Init::chain`].
+pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E>
+where
+ I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
+{
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
}
-// SAFETY: Every in-place initializer can also be used as a pin-initializer.
-unsafe impl<T: ?Sized, E, I> PinInit<T, E> for I
+// SAFETY: `__pinned_init` behaves exactly the same as `__init`.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E>
where
I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
{
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: `__init` meets the same requirements as `__pinned_init`, except that it does not
- // require `slot` to not move after init.
+ // SAFETY: `__init` has less strict requirements compared to `__pinned_init`.
unsafe { self.__init(slot) }
}
}
@@ -1189,6 +1007,93 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
unsafe { init_from_closure(|_| Ok(())) }
}
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{error::Error, init::init_array_from_fn};
+/// let array: Box<[usize; 1_000]>= Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl Init<[T; N], E>
+where
+ I: Init<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { init_from_closure(init) }
+}
+
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};
+/// let array: Arc<[Mutex<usize>; 1_000]>=
+/// Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl PinInit<[T; N], E>
+where
+ I: PinInit<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__pinned_init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { pin_init_from_closure(init) }
+}
+
// SAFETY: Every type can be initialized by-value.
unsafe impl<T, E> Init<T, E> for T {
unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
@@ -1197,6 +1102,13 @@ unsafe impl<T, E> Init<T, E> for T {
}
}
+// SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
+unsafe impl<T, E> PinInit<T, E> for T {
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ unsafe { self.__init(slot) }
+ }
+}
+
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
@@ -1385,6 +1297,11 @@ impl_zeroable! {
// SAFETY: Type is allowed to take any value, including all zeros.
{<T>} MaybeUninit<T>,
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} Opaque<T>,
+
+ // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
+ {<T: ?Sized + Zeroable>} UnsafeCell<T>,
// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index 44751fb62b51..db3372619ecd 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -13,7 +13,7 @@ use super::*;
///
/// [nomicon]: https://doc.rust-lang.org/nomicon/subtyping.html
/// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
-type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
/// type, since the closure needs to fulfill the same safety requirement as the
@@ -32,6 +32,18 @@ where
}
}
+// SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
+// `__pinned_init` invariants.
+unsafe impl<T: ?Sized, F, E> PinInit<T, E> for InitClosure<F, T, E>
+where
+ F: FnOnce(*mut T) -> Result<(), E>,
+{
+ #[inline]
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ (self.0)(slot)
+ }
+}
+
/// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate
/// the pin projections within the initializers.
///
@@ -174,7 +186,6 @@ impl<T> StackInit<T> {
/// Can be forgotten to prevent the drop.
pub struct DropGuard<T: ?Sized> {
ptr: *mut T,
- do_drop: Cell<bool>,
}
impl<T: ?Sized> DropGuard<T> {
@@ -190,32 +201,16 @@ impl<T: ?Sized> DropGuard<T> {
/// - will not be dropped by any other means.
#[inline]
pub unsafe fn new(ptr: *mut T) -> Self {
- Self {
- ptr,
- do_drop: Cell::new(true),
- }
- }
-
- /// Prevents this guard from dropping the supplied pointer.
- ///
- /// # Safety
- ///
- /// This function is unsafe in order to prevent safe code from forgetting this guard. It should
- /// only be called by the macros in this module.
- #[inline]
- pub unsafe fn forget(&self) {
- self.do_drop.set(false);
+ Self { ptr }
}
}
impl<T: ?Sized> Drop for DropGuard<T> {
#[inline]
fn drop(&mut self) {
- if self.do_drop.get() {
- // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
- // ensuring that this operation is safe.
- unsafe { ptr::drop_in_place(self.ptr) }
- }
+ // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+ // ensuring that this operation is safe.
+ unsafe { ptr::drop_in_place(self.ptr) }
}
}
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
index 541cfad1d8be..cb6e61b6c50b 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/kernel/init/macros.rs
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! This module provides the macros that actually implement the proc-macros `pin_data` and
-//! `pinned_drop`.
+//! `pinned_drop`. It also contains `__init_internal` the implementation of the `{try_}{pin_}init!`
+//! macros.
//!
//! These macros should never be called directly, since they expect their input to be
-//! in a certain format which is internal. Use the proc-macros instead.
+//! in a certain format which is internal. If used incorrectly, these macros can lead to UB even in
+//! safe code! Use the public facing macros instead.
//!
//! This architecture has been chosen because the kernel does not yet have access to `syn` which
//! would make matters a lot easier for implementing these as proc-macros.
@@ -16,8 +18,9 @@
//!
//! We will look at the following example:
//!
-//! ```rust
+//! ```rust,ignore
//! # use kernel::init::*;
+//! # use core::pin::Pin;
//! #[pin_data]
//! #[repr(C)]
//! struct Bar<T> {
@@ -42,7 +45,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//!
@@ -71,11 +74,12 @@
//!
//! Here is the definition of `Bar` from our example:
//!
-//! ```rust
+//! ```rust,ignore
//! # use kernel::init::*;
//! #[pin_data]
//! #[repr(C)]
//! struct Bar<T> {
+//! #[pin]
//! t: T,
//! pub x: usize,
//! }
@@ -83,7 +87,7 @@
//!
//! This expands to the following code:
//!
-//! ```rust
+//! ```rust,ignore
//! // Firstly the normal definition of the struct, attributes are preserved:
//! #[repr(C)]
//! struct Bar<T> {
@@ -116,20 +120,22 @@
//! unsafe fn t<E>(
//! self,
//! slot: *mut T,
-//! init: impl ::kernel::init::Init<T, E>,
+//! // Since `t` is `#[pin]`, this is `PinInit`.
+//! init: impl ::kernel::init::PinInit<T, E>,
//! ) -> ::core::result::Result<(), E> {
-//! unsafe { ::kernel::init::Init::__init(init, slot) }
+//! unsafe { ::kernel::init::PinInit::__pinned_init(init, slot) }
//! }
//! pub unsafe fn x<E>(
//! self,
//! slot: *mut usize,
+//! // Since `x` is not `#[pin]`, this is `Init`.
//! init: impl ::kernel::init::Init<usize, E>,
//! ) -> ::core::result::Result<(), E> {
//! unsafe { ::kernel::init::Init::__init(init, slot) }
//! }
//! }
//! // Implement the internal `HasPinData` trait that associates `Bar` with the pin-data struct
-//! // that we constructed beforehand.
+//! // that we constructed above.
//! unsafe impl<T> ::kernel::init::__internal::HasPinData for Bar<T> {
//! type PinData = __ThePinData<T>;
//! unsafe fn __pin_data() -> Self::PinData {
@@ -160,10 +166,14 @@
//! struct __Unpin<'__pin, T> {
//! __phantom_pin: ::core::marker::PhantomData<fn(&'__pin ()) -> &'__pin ()>,
//! __phantom: ::core::marker::PhantomData<fn(Bar<T>) -> Bar<T>>,
+//! // Our only `#[pin]` field is `t`.
+//! t: T,
//! }
//! #[doc(hidden)]
-//! impl<'__pin, T>
-//! ::core::marker::Unpin for Bar<T> where __Unpin<'__pin, T>: ::core::marker::Unpin {}
+//! impl<'__pin, T> ::core::marker::Unpin for Bar<T>
+//! where
+//! __Unpin<'__pin, T>: ::core::marker::Unpin,
+//! {}
//! // Now we need to ensure that `Bar` does not implement `Drop`, since that would give users
//! // access to `&mut self` inside of `drop` even if the struct was pinned. This could lead to
//! // UB with only safe code, so we disallow this by giving a trait implementation error using
@@ -180,8 +190,9 @@
//! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
//! #[allow(non_camel_case_types)]
//! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
-//! impl<T: ::kernel::init::PinnedDrop>
-//! UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+//! impl<
+//! T: ::kernel::init::PinnedDrop,
+//! > UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
//! impl<T> UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for Bar<T> {}
//! };
//! ```
@@ -193,7 +204,7 @@
//!
//! Here is the impl on `Bar` defining the new function:
//!
-//! ```rust
+//! ```rust,ignore
//! impl<T> Bar<T> {
//! fn new(t: T) -> impl PinInit<Self> {
//! pin_init!(Self { t, x: 0 })
@@ -203,7 +214,7 @@
//!
//! This expands to the following code:
//!
-//! ```rust
+//! ```rust,ignore
//! impl<T> Bar<T> {
//! fn new(t: T) -> impl PinInit<Self> {
//! {
@@ -211,7 +222,7 @@
//! // return type and shadow it later when we insert the arbitrary user code. That way
//! // there will be no possibility of returning without `unsafe`.
//! struct __InitOk;
-//! // Get the pin-data type from the initialized type.
+//! // Get the data about fields from the supplied type.
//! // - the function is unsafe, hence the unsafe block
//! // - we `use` the `HasPinData` trait in the block, it is only available in that
//! // scope.
@@ -219,8 +230,7 @@
//! use ::kernel::init::__internal::HasPinData;
//! Self::__pin_data()
//! };
-//! // Use `data` to help with type inference, the closure supplied will have the type
-//! // `FnOnce(*mut Self) -> Result<__InitOk, Infallible>`.
+//! // Ensure that `data` really is of type `PinData` and help with type inference:
//! let init = ::kernel::init::__internal::PinData::make_closure::<
//! _,
//! __InitOk,
@@ -228,65 +238,75 @@
//! >(data, move |slot| {
//! {
//! // Shadow the structure so it cannot be used to return early. If a user
-//! // tries to write `return Ok(__InitOk)`, then they get a type error, since
-//! // that will refer to this struct instead of the one defined above.
+//! // tries to write `return Ok(__InitOk)`, then they get a type error,
+//! // since that will refer to this struct instead of the one defined
+//! // above.
//! struct __InitOk;
//! // This is the expansion of `t,`, which is syntactic sugar for `t: t,`.
-//! unsafe { ::core::ptr::write(&raw mut (*slot).t, t) };
-//! // Since initialization could fail later (not in this case, since the error
-//! // type is `Infallible`) we will need to drop this field if it fails. This
-//! // `DropGuard` will drop the field when it gets dropped and has not yet
-//! // been forgotten. We make a reference to it, so users cannot `mem::forget`
-//! // it from the initializer, since the name is the same as the field.
-//! let t = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).t)
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).t), t) };
+//! }
+//! // Since initialization could fail later (not in this case, since the
+//! // error type is `Infallible`) we will need to drop this field if there
+//! // is an error later. This `DropGuard` will drop the field when it gets
+//! // dropped and has not yet been forgotten.
+//! let t = unsafe {
+//! ::pinned_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))
//! };
//! // Expansion of `x: 0,`:
-//! // Since this can be an arbitrary expression we cannot place it inside of
-//! // the `unsafe` block, so we bind it here.
-//! let x = 0;
-//! unsafe { ::core::ptr::write(&raw mut (*slot).x, x) };
-//! let x = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).x)
+//! // Since this can be an arbitrary expression we cannot place it inside
+//! // of the `unsafe` block, so we bind it here.
+//! {
+//! let x = 0;
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
+//! }
+//! // We again create a `DropGuard`.
+//! let x = unsafe {
+//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))
//! };
-//!
-//! // Here we use the type checker to ensuer that every field has been
+//! // Since initialization has successfully completed, we can now forget
+//! // the guards. This is not `mem::forget`, since we only have
+//! // `&DropGuard`.
+//! ::core::mem::forget(x);
+//! ::core::mem::forget(t);
+//! // Here we use the type checker to ensure that every field has been
//! // initialized exactly once, since this is `if false` it will never get
//! // executed, but still type-checked.
-//! // Additionally we abuse `slot` to automatically infer the correct type for
-//! // the struct. This is also another check that every field is accessible
-//! // from this scope.
+//! // Additionally we abuse `slot` to automatically infer the correct type
+//! // for the struct. This is also another check that every field is
+//! // accessible from this scope.
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
//! Self {
-//! // We only care about typecheck finding every field here,
-//! // the expression does not matter, just conjure one using
-//! // `panic!()`:
+//! // We only care about typecheck finding every field
+//! // here, the expression does not matter, just conjure
+//! // one using `panic!()`:
//! t: ::core::panic!(),
//! x: ::core::panic!(),
//! },
//! );
//! };
-//! }
-//! // Since initialization has successfully completed, we can now forget the
-//! // guards.
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(t) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(x) };
+//! };
//! }
//! // We leave the scope above and gain access to the previously shadowed
//! // `__InitOk` that we need to return.
//! Ok(__InitOk)
//! });
-//! // Change the return type of the closure.
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! // Change the return type from `__InitOk` to `()`.
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! // Construct the initializer.
//! let init = unsafe {
-//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! ::kernel::init::pin_init_from_closure::<
+//! _,
+//! ::core::convert::Infallible,
+//! >(init)
//! };
//! init
//! }
@@ -299,7 +319,7 @@
//! Since we already took a look at `#[pin_data]` on `Bar`, this section will only explain the
//! differences/new things in the expansion of the `Foo` definition:
//!
-//! ```rust
+//! ```rust,ignore
//! #[pin_data(PinnedDrop)]
//! struct Foo {
//! a: usize,
@@ -310,7 +330,7 @@
//!
//! This expands to the following code:
//!
-//! ```rust
+//! ```rust,ignore
//! struct Foo {
//! a: usize,
//! b: Bar<u32>,
@@ -330,8 +350,6 @@
//! unsafe fn b<E>(
//! self,
//! slot: *mut Bar<u32>,
-//! // Note that this is `PinInit` instead of `Init`, this is because `b` is
-//! // structurally pinned, as marked by the `#[pin]` attribute.
//! init: impl ::kernel::init::PinInit<Bar<u32>, E>,
//! ) -> ::core::result::Result<(), E> {
//! unsafe { ::kernel::init::PinInit::__pinned_init(init, slot) }
@@ -359,14 +377,16 @@
//! struct __Unpin<'__pin> {
//! __phantom_pin: ::core::marker::PhantomData<fn(&'__pin ()) -> &'__pin ()>,
//! __phantom: ::core::marker::PhantomData<fn(Foo) -> Foo>,
-//! // Since this field is `#[pin]`, it is listed here.
//! b: Bar<u32>,
//! }
//! #[doc(hidden)]
-//! impl<'__pin> ::core::marker::Unpin for Foo where __Unpin<'__pin>: ::core::marker::Unpin {}
+//! impl<'__pin> ::core::marker::Unpin for Foo
+//! where
+//! __Unpin<'__pin>: ::core::marker::Unpin,
+//! {}
//! // Since we specified `PinnedDrop` as the argument to `#[pin_data]`, we expect `Foo` to
//! // implement `PinnedDrop`. Thus we do not need to prevent `Drop` implementations like
-//! // before, instead we implement it here and delegate to `PinnedDrop`.
+//! // before, instead we implement `Drop` here and delegate to `PinnedDrop`.
//! impl ::core::ops::Drop for Foo {
//! fn drop(&mut self) {
//! // Since we are getting dropped, no one else has a reference to `self` and thus we
@@ -388,32 +408,32 @@
//!
//! Here is the `PinnedDrop` impl for `Foo`:
//!
-//! ```rust
+//! ```rust,ignore
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
//!
//! This expands to the following code:
//!
-//! ```rust
+//! ```rust,ignore
//! // `unsafe`, full path and the token parameter are added, everything else stays the same.
//! unsafe impl ::kernel::init::PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
//!
//! ## `pin_init!` on `Foo`
//!
-//! Since we already took a look at `pin_init!` on `Bar`, this section will only explain the
-//! differences/new things in the expansion of `pin_init!` on `Foo`:
+//! Since we already took a look at `pin_init!` on `Bar`, this section will only show the expansion
+//! of `pin_init!` on `Foo`:
//!
-//! ```rust
+//! ```rust,ignore
//! let a = 42;
//! let initializer = pin_init!(Foo {
//! a,
@@ -423,7 +443,7 @@
//!
//! This expands to the following code:
//!
-//! ```rust
+//! ```rust,ignore
//! let a = 42;
//! let initializer = {
//! struct __InitOk;
@@ -438,16 +458,21 @@
//! >(data, move |slot| {
//! {
//! struct __InitOk;
-//! unsafe { ::core::ptr::write(&raw mut (*slot).a, a) };
-//! let a = &unsafe { ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).a) };
-//! let b = Bar::new(36);
-//! // Here we use `data` to access the correct field and require that `b` is of type
-//! // `PinInit<Bar<u32>, Infallible>`.
-//! unsafe { data.b(&raw mut (*slot).b, b)? };
-//! let b = &unsafe { ::kernel::init::__internal::DropGuard::new(&raw mut (*slot).b) };
-//!
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
+//! }
+//! let a = unsafe {
+//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))
+//! };
+//! let init = Bar::new(36);
+//! unsafe { data.b(::core::addr_of_mut!((*slot).b), b)? };
+//! let b = unsafe {
+//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))
+//! };
+//! ::core::mem::forget(b);
+//! ::core::mem::forget(a);
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
@@ -457,13 +482,13 @@
//! },
//! );
//! };
-//! }
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(a) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(b) };
+//! };
//! }
//! Ok(__InitOk)
//! });
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! let init = unsafe {
@@ -949,6 +974,7 @@ macro_rules! __pin_data {
where $($whr)*
{
$(
+ $(#[$($p_attr)*])*
$pvis unsafe fn $p_field<E>(
self,
slot: *mut $p_type,
@@ -958,6 +984,7 @@ macro_rules! __pin_data {
}
)*
$(
+ $(#[$($attr)*])*
$fvis unsafe fn $field<E>(
self,
slot: *mut $type,
@@ -969,3 +996,388 @@ macro_rules! __pin_data {
}
};
}
+
+/// The internal init macro. Do not call manually!
+///
+/// This is called by the `{try_}{pin_}init!` macros with various inputs.
+///
+/// This macro has multiple internal call configurations, these are always the very first ident:
+/// - nothing: this is the base case and called by the `{try_}{pin_}init!` macros.
+/// - `with_update_parsed`: when the `..Zeroable::zeroed()` syntax has been handled.
+/// - `init_slot`: recursively creates the code that initializes all fields in `slot`.
+/// - `make_initializer`: recursively create the struct initializer that guarantees that every
+/// field has been initialized exactly once.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __init_internal {
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(), // Nothing means default behavior.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(..Zeroable::zeroed()),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(()), // `()` means zero all fields not mentioned.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields($ignore:tt $($rest:tt)*),
+ ) => {
+ $crate::__init_internal!(
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @munch_fields($($rest)*),
+ )
+ };
+ (with_update_parsed:
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @zeroed($($init_zeroed:expr)?),
+ ) => {{
+ // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
+ // type and shadow it later when we insert the arbitrary user code. That way there will be
+ // no possibility of returning without `unsafe`.
+ struct __InitOk;
+ // Get the data about fields from the supplied type.
+ let data = unsafe {
+ use $crate::init::__internal::$has_data;
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!($t::$get_data())
+ };
+ // Ensure that `data` really is of type `$data` and help with type inference:
+ let init = $crate::init::__internal::$data::make_closure::<_, __InitOk, $err>(
+ data,
+ move |slot| {
+ {
+ // Shadow the structure so it cannot be used to return early.
+ struct __InitOk;
+ // If `$init_zeroed` is present we should zero the slot now and not emit an
+ // error when fields are missing (since they will be zeroed). We also have to
+ // check that the type actually implements `Zeroable`.
+ $({
+ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+ // Ensure that the struct is indeed `Zeroable`.
+ assert_zeroable(slot);
+ // SAFETY: The type implements `Zeroable` by the check above.
+ unsafe { ::core::ptr::write_bytes(slot, 0, 1) };
+ $init_zeroed // This will be `()` if set.
+ })?
+ // Create the `this` so it can be referenced by the user inside of the
+ // expressions creating the individual fields.
+ $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
+ // Initialize every field.
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data(data),
+ @slot(slot),
+ @guards(),
+ @munch_fields($($fields)*,),
+ );
+ // We use unreachable code to ensure that all fields have been mentioned exactly
+ // once, this struct initializer will still be type-checked and complain with a
+ // very natural error message if a field is forgotten/mentioned more than once.
+ #[allow(unreachable_code, clippy::diverging_sub_expression)]
+ let _ = || {
+ $crate::__init_internal!(make_initializer:
+ @slot(slot),
+ @type_name($t),
+ @munch_fields($($fields)*,),
+ @acc(),
+ );
+ };
+ }
+ Ok(__InitOk)
+ }
+ );
+ let init = move |slot| -> ::core::result::Result<(), $err> {
+ init(slot).map(|__InitOk| ())
+ };
+ let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
+ init
+ }};
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ @munch_fields($(..Zeroable::zeroed())? $(,)?),
+ ) => {
+ // Endpoint of munching, no fields are left. If execution reaches this point, all fields
+ // have been initialized. Therefore we can now dismiss the guards by forgetting them.
+ $(::core::mem::forget($guards);)*
+ };
+ (init_slot($use_data:ident): // `use_data` is present, so we use the `data` to init fields.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($use_data):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot(): // No `use_data`, so we use `Init::__init` directly.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields(..Zeroable::zeroed() $(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer. Since the users specified
+ // `..Zeroable::zeroed()`, the slot will already have been zeroed and all field that have
+ // not been overwritten are thus zero and initialized. We still check that all fields are
+ // actually accessible by using the struct update syntax ourselves.
+ // We are inside of a closure that is never executed and thus we can abuse `slot` to
+ // get the correct type inference here:
+ #[allow(unused_assignments)]
+ unsafe {
+ let mut zeroed = ::core::mem::zeroed();
+ // We have to use type inference here to make zeroed have the correct type. This does
+ // not get executed, so it has no effect.
+ ::core::ptr::write($slot, zeroed);
+ zeroed = ::core::mem::zeroed();
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ ..zeroed
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer.
+ // Since we are in the closure that is never called, this will never get executed.
+ // We abuse `slot` to get the correct type inference here:
+ unsafe {
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __derive_zeroable {
+ (parse_input:
+ @sig(
+ $(#[$($struct_attr:tt)*])*
+ $vis:vis struct $name:ident
+ $(where $($whr:tt)*)?
+ ),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @body({
+ $(
+ $(#[$($field_attr:tt)*])*
+ $field:ident : $field_ty:ty
+ ),* $(,)?
+ }),
+ ) => {
+ // SAFETY: Every field type implements `Zeroable` and padding bytes may be zero.
+ #[automatically_derived]
+ unsafe impl<$($impl_generics)*> $crate::init::Zeroable for $name<$($ty_generics)*>
+ where
+ $($($whr)*)?
+ {}
+ const _: () = {
+ fn assert_zeroable<T: ?::core::marker::Sized + $crate::init::Zeroable>() {}
+ fn ensure_zeroable<$($impl_generics)*>()
+ where $($($whr)*)?
+ {
+ $(assert_zeroable::<$field_ty>();)*
+ }
+ };
+ };
+}
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
new file mode 100644
index 000000000000..722655b2d62d
--- /dev/null
+++ b/rust/kernel/kunit.rs
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! KUnit-based macros for Rust unit tests.
+//!
+//! C header: [`include/kunit/test.h`](../../../../../include/kunit/test.h)
+//!
+//! Reference: <https://docs.kernel.org/dev-tools/kunit/index.html>
+
+use core::{ffi::c_void, fmt};
+
+/// Prints a KUnit error-level message.
+///
+/// Public but hidden since it should only be used from KUnit generated code.
+#[doc(hidden)]
+pub fn err(args: fmt::Arguments<'_>) {
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ b"\x013%pA\0".as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Prints a KUnit info-level message.
+///
+/// Public but hidden since it should only be used from KUnit generated code.
+#[doc(hidden)]
+pub fn info(args: fmt::Arguments<'_>) {
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ b"\x016%pA\0".as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Asserts that a boolean expression is `true` at runtime.
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert {
+ ($name:literal, $file:literal, $diff:expr, $condition:expr $(,)?) => {
+ 'out: {
+ // Do nothing if the condition is `true`.
+ if $condition {
+ break 'out;
+ }
+
+ static FILE: &'static $crate::str::CStr = $crate::c_str!($file);
+ static LINE: i32 = core::line!() as i32 - $diff;
+ static CONDITION: &'static $crate::str::CStr = $crate::c_str!(stringify!($condition));
+
+ // SAFETY: FFI call without safety requirements.
+ let kunit_test = unsafe { $crate::bindings::kunit_get_current_test() };
+ if kunit_test.is_null() {
+ // The assertion failed but this task is not running a KUnit test, so we cannot call
+ // KUnit, but at least print an error to the kernel log. This may happen if this
+ // macro is called from an spawned thread in a test (see
+ // `scripts/rustdoc_test_gen.rs`) or if some non-test code calls this macro by
+ // mistake (it is hidden to prevent that).
+ //
+ // This mimics KUnit's failed assertion format.
+ $crate::kunit::err(format_args!(
+ " # {}: ASSERTION FAILED at {FILE}:{LINE}\n",
+ $name
+ ));
+ $crate::kunit::err(format_args!(
+ " Expected {CONDITION} to be true, but is false\n"
+ ));
+ $crate::kunit::err(format_args!(
+ " Failure not reported to KUnit since this is a non-KUnit task\n"
+ ));
+ break 'out;
+ }
+
+ #[repr(transparent)]
+ struct Location($crate::bindings::kunit_loc);
+
+ #[repr(transparent)]
+ struct UnaryAssert($crate::bindings::kunit_unary_assert);
+
+ // SAFETY: There is only a static instance and in that one the pointer field points to
+ // an immutable C string.
+ unsafe impl Sync for Location {}
+
+ // SAFETY: There is only a static instance and in that one the pointer field points to
+ // an immutable C string.
+ unsafe impl Sync for UnaryAssert {}
+
+ static LOCATION: Location = Location($crate::bindings::kunit_loc {
+ file: FILE.as_char_ptr(),
+ line: LINE,
+ });
+ static ASSERTION: UnaryAssert = UnaryAssert($crate::bindings::kunit_unary_assert {
+ assert: $crate::bindings::kunit_assert {},
+ condition: CONDITION.as_char_ptr(),
+ expected_true: true,
+ });
+
+ // SAFETY:
+ // - FFI call.
+ // - The `kunit_test` pointer is valid because we got it from
+ // `kunit_get_current_test()` and it was not null. This means we are in a KUnit
+ // test, and that the pointer can be passed to KUnit functions and assertions.
+ // - The string pointers (`file` and `condition` above) point to null-terminated
+ // strings since they are `CStr`s.
+ // - The function pointer (`format`) points to the proper function.
+ // - The pointers passed will remain valid since they point to `static`s.
+ // - The format string is allowed to be null.
+ // - There are, however, problems with this: first of all, this will end up stopping
+ // the thread, without running destructors. While that is problematic in itself,
+ // it is considered UB to have what is effectively a forced foreign unwind
+ // with `extern "C"` ABI. One could observe the stack that is now gone from
+ // another thread. We should avoid pinning stack variables to prevent library UB,
+ // too. For the moment, given that test failures are reported immediately before the
+ // next test runs, that test failures should be fixed and that KUnit is explicitly
+ // documented as not suitable for production environments, we feel it is reasonable.
+ unsafe {
+ $crate::bindings::__kunit_do_failed_assertion(
+ kunit_test,
+ core::ptr::addr_of!(LOCATION.0),
+ $crate::bindings::kunit_assert_type_KUNIT_ASSERTION,
+ core::ptr::addr_of!(ASSERTION.0.assert),
+ Some($crate::bindings::kunit_unary_assert_format),
+ core::ptr::null(),
+ );
+ }
+
+ // SAFETY: FFI call; the `test` pointer is valid because this hidden macro should only
+ // be called by the generated documentation tests which forward the test pointer given
+ // by KUnit.
+ unsafe {
+ $crate::bindings::__kunit_abort(kunit_test);
+ }
+ }
+ };
+}
+
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert_eq {
+ ($name:literal, $file:literal, $diff:expr, $left:expr, $right:expr $(,)?) => {{
+ // For the moment, we just forward to the expression assert because, for binary asserts,
+ // KUnit supports only a few types (e.g. integers).
+ $crate::kunit_assert!($name, $file, $diff, $left == $right);
+ }};
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 676995d4e460..e8811700239a 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -14,12 +14,8 @@
#![no_std]
#![feature(allocator_api)]
#![feature(coerce_unsized)]
-#![feature(core_ffi_c)]
#![feature(dispatch_from_dyn)]
-#![feature(explicit_generic_args_with_impl_trait)]
-#![feature(generic_associated_types)]
#![feature(new_uninit)]
-#![feature(pin_macro)]
#![feature(receiver_trait)]
#![feature(unsize)]
@@ -38,6 +34,8 @@ mod build_assert;
pub mod error;
pub mod init;
pub mod ioctl;
+#[cfg(CONFIG_KUNIT)]
+pub mod kunit;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -97,7 +95,4 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
pr_emerg!("{}\n", info);
// SAFETY: FFI call.
unsafe { bindings::BUG() };
- // Bindgen currently does not recognize `__noreturn` so `BUG` returns `()`
- // instead of `!`. See <https://github.com/rust-lang/rust-bindgen/issues/2094>.
- loop {}
}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index c28587d68ebc..ae21600970b3 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -18,7 +18,7 @@ pub use core::pin::Pin;
pub use alloc::{boxed::Box, vec::Vec};
#[doc(no_inline)]
-pub use macros::{module, pin_data, pinned_drop, vtable};
+pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
pub use super::build_assert;
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index b3e68b24a8c6..388d6a5147a2 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -137,6 +137,8 @@
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+/// [`pr_info`]: crate::pr_info!
+/// [`pr_debug`]: crate::pr_debug!
#[macro_export]
macro_rules! dbg {
// NOTE: We cannot use `concat!` to make a static string as a format argument
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index cd3d2a6cf1fc..c41607b2e4fe 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -2,6 +2,7 @@
//! String representations.
+use alloc::alloc::AllocError;
use alloc::vec::Vec;
use core::fmt::{self, Write};
use core::ops::{self, Deref, Index};
@@ -199,6 +200,12 @@ impl CStr {
pub unsafe fn as_str_unchecked(&self) -> &str {
unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }
}
+
+ /// Convert this [`CStr`] into a [`CString`] by allocating memory and
+ /// copying over the string data.
+ pub fn to_cstring(&self) -> Result<CString, AllocError> {
+ CString::try_from(self)
+ }
}
impl fmt::Display for CStr {
@@ -206,6 +213,7 @@ impl fmt::Display for CStr {
///
/// ```
/// # use kernel::c_str;
+ /// # use kernel::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
@@ -234,6 +242,7 @@ impl fmt::Debug for CStr {
///
/// ```
/// # use kernel::c_str;
+ /// # use kernel::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
@@ -522,7 +531,7 @@ impl fmt::Write for Formatter {
/// # Examples
///
/// ```
-/// use kernel::str::CString;
+/// use kernel::{str::CString, fmt};
///
/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20)).unwrap();
/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
@@ -584,6 +593,21 @@ impl Deref for CString {
}
}
+impl<'a> TryFrom<&'a CStr> for CString {
+ type Error = AllocError;
+
+ fn try_from(cstr: &'a CStr) -> Result<CString, AllocError> {
+ let mut buf = Vec::new();
+
+ buf.try_extend_from_slice(cstr.as_bytes_with_nul())
+ .map_err(|_| AllocError)?;
+
+ // INVARIANT: The `CStr` and `CString` types have the same invariants for
+ // the string data, and we copied it over without changes.
+ Ok(CString { buf })
+ }
+}
+
/// A convenience alias for [`core::format_args`].
#[macro_export]
macro_rules! fmt {
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index e6d206242465..3d496391a9bd 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -73,6 +73,7 @@ mod std_vendor;
/// assert_eq!(cloned.b, 20);
///
/// // The refcount drops to zero when `cloned` goes out of scope, and the memory is freed.
+/// # Ok::<(), Error>(())
/// ```
///
/// Using `Arc<T>` as the type of `self`:
@@ -98,6 +99,7 @@ mod std_vendor;
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
/// obj.use_reference();
/// obj.take_over();
+/// # Ok::<(), Error>(())
/// ```
///
/// Coercion from `Arc<Example>` to `Arc<dyn MyTrait>`:
@@ -121,6 +123,7 @@ mod std_vendor;
///
/// // `coerced` has type `Arc<dyn MyTrait>`.
/// let coerced: Arc<dyn MyTrait> = obj;
+/// # Ok::<(), Error>(())
/// ```
pub struct Arc<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
@@ -146,13 +149,15 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Ar
// SAFETY: It is safe to send `Arc<T>` to another thread when the underlying `T` is `Sync` because
// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
-// `T` to be `Send` because any thread that has an `Arc<T>` may ultimately access `T` directly, for
-// example, when the reference count reaches zero and `T` is dropped.
+// `T` to be `Send` because any thread that has an `Arc<T>` may ultimately access `T` using a
+// mutable reference when the reference count reaches zero and `T` is dropped.
unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
-// SAFETY: It is safe to send `&Arc<T>` to another thread when the underlying `T` is `Sync` for the
-// same reason as above. `T` needs to be `Send` as well because a thread can clone an `&Arc<T>`
-// into an `Arc<T>`, which may lead to `T` being accessed by the same reasoning as above.
+// SAFETY: It is safe to send `&Arc<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
+// it needs `T` to be `Send` because any thread that has a `&Arc<T>` may clone it and get an
+// `Arc<T>` on that thread, so the thread may ultimately access `T` using a mutable reference when
+// the reference count reaches zero and `T` is dropped.
unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
impl<T> Arc<T> {
@@ -185,7 +190,7 @@ impl<T> Arc<T> {
/// Use the given initializer to in-place initialize a `T`.
///
- /// This is equivalent to [`pin_init`], since an [`Arc`] is always pinned.
+ /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
#[inline]
pub fn init<E>(init: impl Init<T, E>) -> error::Result<Self>
where
@@ -221,6 +226,11 @@ impl<T: ?Sized> Arc<T> {
// reference can be created.
unsafe { ArcBorrow::new(self.ptr) }
}
+
+ /// Compare whether two [`Arc`] pointers reference the same underlying object.
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ core::ptr::eq(this.ptr.as_ptr(), other.ptr.as_ptr())
+ }
}
impl<T: 'static> ForeignOwnable for Arc<T> {
@@ -236,8 +246,7 @@ impl<T: 'static> ForeignOwnable for Arc<T> {
let inner = NonNull::new(ptr as *mut ArcInner<T>).unwrap();
// SAFETY: The safety requirements of `from_foreign` ensure that the object remains alive
- // for the lifetime of the returned value. Additionally, the safety requirements of
- // `ForeignOwnable::borrow_mut` ensure that no new mutable references are created.
+ // for the lifetime of the returned value.
unsafe { ArcBorrow::new(inner) }
}
@@ -259,6 +268,12 @@ impl<T: ?Sized> Deref for Arc<T> {
}
}
+impl<T: ?Sized> AsRef<T> for Arc<T> {
+ fn as_ref(&self) -> &T {
+ self.deref()
+ }
+}
+
impl<T: ?Sized> Clone for Arc<T> {
fn clone(&self) -> Self {
// INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
@@ -324,7 +339,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// # Example
///
/// ```
-/// use crate::sync::{Arc, ArcBorrow};
+/// use kernel::sync::{Arc, ArcBorrow};
///
/// struct Example;
///
@@ -337,12 +352,13 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
///
/// // Assert that both `obj` and `cloned` point to the same underlying object.
/// assert!(core::ptr::eq(&*obj, &*cloned));
+/// # Ok::<(), Error>(())
/// ```
///
/// Using `ArcBorrow<T>` as the type of `self`:
///
/// ```
-/// use crate::sync::{Arc, ArcBorrow};
+/// use kernel::sync::{Arc, ArcBorrow};
///
/// struct Example {
/// a: u32,
@@ -357,6 +373,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
///
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
/// obj.as_arc_borrow().use_reference();
+/// # Ok::<(), Error>(())
/// ```
pub struct ArcBorrow<'a, T: ?Sized + 'a> {
inner: NonNull<ArcInner<T>>,
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index a2216325632d..70a785f04754 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -72,8 +72,8 @@ pub unsafe trait Backend {
/// A mutual exclusion primitive.
///
-/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend
-/// specified as the generic parameter `B`.
+/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
+/// [`Backend`] specified as the generic parameter `B`.
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -126,7 +126,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// A lock guard.
///
-/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock
+/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
/// protected by the lock.
#[must_use = "the lock unlocks immediately when the guard is unused"]
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 923472f04af4..09276fedc091 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -63,6 +63,7 @@ macro_rules! new_mutex {
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
+/// # Ok::<(), Error>(())
/// ```
///
/// The following example shows how to use interior mutability to modify the contents of a struct
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 979b56464a4e..91eb2c9e9123 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -61,6 +61,7 @@ macro_rules! new_spinlock {
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
+/// # Ok::<(), Error>(())
/// ```
///
/// The following example shows how to use interior mutability to modify the contents of a struct
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index 526d29a0ae27..7eda15e5f1b3 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -64,8 +64,14 @@ macro_rules! current {
#[repr(transparent)]
pub struct Task(pub(crate) Opaque<bindings::task_struct>);
-// SAFETY: It's OK to access `Task` through references from other threads because we're either
-// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
+// SAFETY: By design, the only way to access a `Task` is via the `current` function or via an
+// `ARef<Task>` obtained through the `AlwaysRefCounted` impl. This means that the only situation in
+// which a `Task` can be accessed mutably is when the refcount drops to zero and the destructor
+// runs. It is safe for that to happen on any thread, so it is ok for this type to be `Send`.
+unsafe impl Send for Task {}
+
+// SAFETY: It's OK to access `Task` through shared references from other threads because we're
+// either accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
// synchronised by C code (e.g., `signal_pending`).
unsafe impl Sync for Task {}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index 29db59d6119a..fdb778e65d79 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -6,7 +6,7 @@ use crate::init::{self, PinInit};
use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
- marker::PhantomData,
+ marker::{PhantomData, PhantomPinned},
mem::MaybeUninit,
ops::{Deref, DerefMut},
ptr::NonNull,
@@ -35,34 +35,16 @@ pub trait ForeignOwnable: Sized {
///
/// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
/// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
- /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow_mut`]
- /// for this object must have been dropped.
unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
- /// Mutably borrows a foreign-owned object.
- ///
- /// # Safety
- ///
- /// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
- /// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
- /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and
- /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped.
- unsafe fn borrow_mut(ptr: *const core::ffi::c_void) -> ScopeGuard<Self, fn(Self)> {
- // SAFETY: The safety requirements ensure that `ptr` came from a previous call to
- // `into_foreign`.
- ScopeGuard::new_with_data(unsafe { Self::from_foreign(ptr) }, |d| {
- d.into_foreign();
- })
- }
-
/// Converts a foreign-owned object back to a Rust-owned one.
///
/// # Safety
///
/// `ptr` must have been returned by a previous call to [`ForeignOwnable::into_foreign`] for
/// which a previous matching [`ForeignOwnable::from_foreign`] hasn't been called yet.
- /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] and
- /// [`ForeignOwnable::borrow_mut`] for this object must have been dropped.
+ /// Additionally, all instances (if any) of values returned by [`ForeignOwnable::borrow`] for
+ /// this object must have been dropped.
unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self;
}
@@ -109,7 +91,7 @@ impl ForeignOwnable for () {
/// In the example below, we have multiple exit paths and we want to log regardless of which one is
/// taken:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example1(arg: bool) {
/// let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
///
@@ -127,7 +109,7 @@ impl ForeignOwnable for () {
/// In the example below, we want to log the same message on all early exits but a different one on
/// the main exit path:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example2(arg: bool) {
/// let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
///
@@ -148,7 +130,7 @@ impl ForeignOwnable for () {
/// In the example below, we need a mutable object (the vector) to be accessible within the log
/// function, so we wrap it in the [`ScopeGuard`]:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example3(arg: bool) -> Result {
/// let mut vec =
/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
@@ -224,17 +206,26 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
///
/// This is meant to be used with FFI objects that are never interpreted by Rust code.
#[repr(transparent)]
-pub struct Opaque<T>(MaybeUninit<UnsafeCell<T>>);
+pub struct Opaque<T> {
+ value: UnsafeCell<MaybeUninit<T>>,
+ _pin: PhantomPinned,
+}
impl<T> Opaque<T> {
/// Creates a new opaque value.
pub const fn new(value: T) -> Self {
- Self(MaybeUninit::new(UnsafeCell::new(value)))
+ Self {
+ value: UnsafeCell::new(MaybeUninit::new(value)),
+ _pin: PhantomPinned,
+ }
}
/// Creates an uninitialised value.
pub const fn uninit() -> Self {
- Self(MaybeUninit::uninit())
+ Self {
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _pin: PhantomPinned,
+ }
}
/// Creates a pin-initializer from the given initializer closure.
@@ -258,7 +249,7 @@ impl<T> Opaque<T> {
/// Returns a raw pointer to the opaque data.
pub fn get(&self) -> *mut T {
- UnsafeCell::raw_get(self.0.as_ptr())
+ UnsafeCell::get(&self.value).cast::<T>()
}
/// Gets the value behind `this`.
@@ -266,7 +257,7 @@ impl<T> Opaque<T> {
/// This function is useful to get access to the value without creating intermediate
/// references.
pub const fn raw_get(this: *const Self) -> *mut T {
- UnsafeCell::raw_get(this.cast::<UnsafeCell<T>>())
+ UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
}
}
@@ -321,6 +312,19 @@ pub struct ARef<T: AlwaysRefCounted> {
_p: PhantomData<T>,
}
+// SAFETY: It is safe to send `ARef<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has an `ARef<T>` may ultimately access `T` using a
+// mutable reference, for example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
+
+// SAFETY: It is safe to send `&ARef<T>` to another thread when the underlying `T` is `Sync`
+// because it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally,
+// it needs `T` to be `Send` because any thread that has a `&ARef<T>` may clone it and get an
+// `ARef<T>` on that thread, so the thread may ultimately access `T` using a mutable reference, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+
impl<T: AlwaysRefCounted> ARef<T> {
/// Creates a new instance of [`ARef`].
///
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
index b2bdd4d8c958..afb0f2e3a36a 100644
--- a/rust/macros/helpers.rs
+++ b/rust/macros/helpers.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use proc_macro::{token_stream, Group, TokenTree};
+use proc_macro::{token_stream, Group, Punct, Spacing, TokenStream, TokenTree};
pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
if let Some(TokenTree::Ident(ident)) = it.next() {
@@ -69,3 +69,87 @@ pub(crate) fn expect_end(it: &mut token_stream::IntoIter) {
panic!("Expected end");
}
}
+
+pub(crate) struct Generics {
+ pub(crate) impl_generics: Vec<TokenTree>,
+ pub(crate) ty_generics: Vec<TokenTree>,
+}
+
+/// Parses the given `TokenStream` into `Generics` and the rest.
+///
+/// The generics are not present in the rest, but a where clause might remain.
+pub(crate) fn parse_generics(input: TokenStream) -> (Generics, Vec<TokenTree>) {
+ // `impl_generics`, the declared generics with their bounds.
+ let mut impl_generics = vec![];
+ // Only the names of the generics, without any bounds.
+ let mut ty_generics = vec![];
+ // Tokens not related to the generics e.g. the `where` token and definition.
+ let mut rest = vec![];
+ // The current level of `<`.
+ let mut nesting = 0;
+ let mut toks = input.into_iter();
+ // If we are at the beginning of a generic parameter.
+ let mut at_start = true;
+ for tt in &mut toks {
+ match tt.clone() {
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ if nesting >= 1 {
+ // This is inside of the generics and part of some bound.
+ impl_generics.push(tt);
+ }
+ nesting += 1;
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ // This is a parsing error, so we just end it here.
+ if nesting == 0 {
+ break;
+ } else {
+ nesting -= 1;
+ if nesting >= 1 {
+ // We are still inside of the generics and part of some bound.
+ impl_generics.push(tt);
+ }
+ if nesting == 0 {
+ break;
+ }
+ }
+ }
+ tt => {
+ if nesting == 1 {
+ // Here depending on the token, it might be a generic variable name.
+ match &tt {
+ // Ignore const.
+ TokenTree::Ident(i) if i.to_string() == "const" => {}
+ TokenTree::Ident(_) if at_start => {
+ ty_generics.push(tt.clone());
+ // We also already push the `,` token, this makes it easier to append
+ // generics.
+ ty_generics.push(TokenTree::Punct(Punct::new(',', Spacing::Alone)));
+ at_start = false;
+ }
+ TokenTree::Punct(p) if p.as_char() == ',' => at_start = true,
+ // Lifetimes begin with `'`.
+ TokenTree::Punct(p) if p.as_char() == '\'' && at_start => {
+ ty_generics.push(tt.clone());
+ }
+ _ => {}
+ }
+ }
+ if nesting >= 1 {
+ impl_generics.push(tt);
+ } else if nesting == 0 {
+ // If we haven't entered the generics yet, we still want to keep these tokens.
+ rest.push(tt);
+ }
+ }
+ }
+ }
+ rest.extend(toks);
+ (
+ Generics {
+ impl_generics,
+ ty_generics,
+ },
+ rest,
+ )
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 3fc74cb4ea19..c42105c2ff96 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -7,9 +7,11 @@ mod quote;
mod concat_idents;
mod helpers;
mod module;
+mod paste;
mod pin_data;
mod pinned_drop;
mod vtable;
+mod zeroable;
use proc_macro::TokenStream;
@@ -246,3 +248,118 @@ pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
pinned_drop::pinned_drop(args, input)
}
+
+/// Paste identifiers together.
+///
+/// Within the `paste!` macro, identifiers inside `[<` and `>]` are concatenated together to form a
+/// single identifier.
+///
+/// This is similar to the [`paste`] crate, but with pasting feature limited to identifiers
+/// (literals, lifetimes and documentation strings are not supported). There is a difference in
+/// supported modifiers as well.
+///
+/// # Example
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// paste! {
+/// $(pub(crate) const $newname: u32 = [<$prefix $newname>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(BR_OK, binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// # Modifiers
+///
+/// For each identifier, it is possible to attach one or multiple modifiers to
+/// it.
+///
+/// Currently supported modifiers are:
+/// * `span`: change the span of concatenated identifier to the span of the specified token. By
+/// default the span of the `[< >]` group is used.
+/// * `lower`: change the identifier to lower case.
+/// * `upper`: change the identifier to upper case.
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// kernel::macros::paste! {
+/// $(pub(crate) const fn [<$newname:lower:span>]: u32 = [<$prefix $newname:span>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(br_ok(), binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// [`paste`]: https://docs.rs/paste/
+#[proc_macro]
+pub fn paste(input: TokenStream) -> TokenStream {
+ let mut tokens = input.into_iter().collect();
+ paste::expand(&mut tokens);
+ tokens.into_iter().collect()
+}
+
+/// Derives the [`Zeroable`] trait for the given struct.
+///
+/// This can only be used for structs where every field implements the [`Zeroable`] trait.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// #[derive(Zeroable)]
+/// pub struct DriverData {
+/// id: i64,
+/// buf_ptr: *mut u8,
+/// len: usize,
+/// }
+/// ```
+#[proc_macro_derive(Zeroable)]
+pub fn derive_zeroable(input: TokenStream) -> TokenStream {
+ zeroable::derive(input)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index fb1244f8c2e6..d62d8710d77a 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -199,7 +199,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
/// Used by the printing macros, e.g. [`info!`].
const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
- /// The \"Rust loadable module\" mark, for `scripts/is_rust_module.sh`.
+ /// The \"Rust loadable module\" mark.
//
// This may be best done another way later on, e.g. as a new modinfo
// key or a new section. For the moment, keep it simple.
diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs
new file mode 100644
index 000000000000..385a78434224
--- /dev/null
+++ b/rust/macros/paste.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{Delimiter, Group, Ident, Spacing, Span, TokenTree};
+
+fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
+ let mut tokens = tokens.iter();
+ let mut segments = Vec::new();
+ let mut span = None;
+ loop {
+ match tokens.next() {
+ None => break,
+ Some(TokenTree::Literal(lit)) => segments.push((lit.to_string(), lit.span())),
+ Some(TokenTree::Ident(ident)) => {
+ let mut value = ident.to_string();
+ if value.starts_with("r#") {
+ value.replace_range(0..2, "");
+ }
+ segments.push((value, ident.span()));
+ }
+ Some(TokenTree::Punct(p)) if p.as_char() == ':' => {
+ let Some(TokenTree::Ident(ident)) = tokens.next() else {
+ panic!("expected identifier as modifier");
+ };
+
+ let (mut value, sp) = segments.pop().expect("expected identifier before modifier");
+ match ident.to_string().as_str() {
+ // Set the overall span of concatenated token as current span
+ "span" => {
+ assert!(
+ span.is_none(),
+ "span modifier should only appear at most once"
+ );
+ span = Some(sp);
+ }
+ "lower" => value = value.to_lowercase(),
+ "upper" => value = value.to_uppercase(),
+ v => panic!("unknown modifier `{v}`"),
+ };
+ segments.push((value, sp));
+ }
+ _ => panic!("unexpected token in paste segments"),
+ };
+ }
+
+ let pasted: String = segments.into_iter().map(|x| x.0).collect();
+ TokenTree::Ident(Ident::new(&pasted, span.unwrap_or(group_span)))
+}
+
+pub(crate) fn expand(tokens: &mut Vec<TokenTree>) {
+ for token in tokens.iter_mut() {
+ if let TokenTree::Group(group) = token {
+ let delimiter = group.delimiter();
+ let span = group.span();
+ let mut stream: Vec<_> = group.stream().into_iter().collect();
+ // Find groups that looks like `[< A B C D >]`
+ if delimiter == Delimiter::Bracket
+ && stream.len() >= 3
+ && matches!(&stream[0], TokenTree::Punct(p) if p.as_char() == '<')
+ && matches!(&stream[stream.len() - 1], TokenTree::Punct(p) if p.as_char() == '>')
+ {
+ // Replace the group with concatenated token
+ *token = concat(&stream[1..stream.len() - 1], span);
+ } else {
+ // Recursively expand tokens inside the group
+ expand(&mut stream);
+ let mut group = Group::new(delimiter, stream.into_iter().collect());
+ group.set_span(span);
+ *token = TokenTree::Group(group);
+ }
+ }
+ }
+
+ // Path segments cannot contain invisible delimiter group, so remove them if any.
+ for i in (0..tokens.len().saturating_sub(3)).rev() {
+ // Looking for a double colon
+ if matches!(
+ (&tokens[i + 1], &tokens[i + 2]),
+ (TokenTree::Punct(a), TokenTree::Punct(b))
+ if a.as_char() == ':' && a.spacing() == Spacing::Joint && b.as_char() == ':'
+ ) {
+ match &tokens[i + 3] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i + 3..i + 4, group.stream());
+ }
+ _ => (),
+ }
+
+ match &tokens[i] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i..i + 1, group.stream());
+ }
+ _ => (),
+ }
+ }
+ }
+}
diff --git a/rust/macros/pin_data.rs b/rust/macros/pin_data.rs
index 954149d77181..6d58cfda9872 100644
--- a/rust/macros/pin_data.rs
+++ b/rust/macros/pin_data.rs
@@ -1,79 +1,127 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-use proc_macro::{Punct, Spacing, TokenStream, TokenTree};
+use crate::helpers::{parse_generics, Generics};
+use proc_macro::{Group, Punct, Spacing, TokenStream, TokenTree};
pub(crate) fn pin_data(args: TokenStream, input: TokenStream) -> TokenStream {
// This proc-macro only does some pre-parsing and then delegates the actual parsing to
// `kernel::__pin_data!`.
- //
- // In here we only collect the generics, since parsing them in declarative macros is very
- // elaborate. We also do not need to analyse their structure, we only need to collect them.
- // `impl_generics`, the declared generics with their bounds.
- let mut impl_generics = vec![];
- // Only the names of the generics, without any bounds.
- let mut ty_generics = vec![];
- // Tokens not related to the generics e.g. the `impl` token.
- let mut rest = vec![];
- // The current level of `<`.
- let mut nesting = 0;
- let mut toks = input.into_iter();
- // If we are at the beginning of a generic parameter.
- let mut at_start = true;
- for tt in &mut toks {
- match tt.clone() {
- TokenTree::Punct(p) if p.as_char() == '<' => {
- if nesting >= 1 {
- impl_generics.push(tt);
- }
- nesting += 1;
- }
- TokenTree::Punct(p) if p.as_char() == '>' => {
- if nesting == 0 {
- break;
- } else {
- nesting -= 1;
- if nesting >= 1 {
- impl_generics.push(tt);
- }
- if nesting == 0 {
- break;
- }
+ let (
+ Generics {
+ impl_generics,
+ ty_generics,
+ },
+ rest,
+ ) = parse_generics(input);
+ // The struct definition might contain the `Self` type. Since `__pin_data!` will define a new
+ // type with the same generics and bounds, this poses a problem, since `Self` will refer to the
+ // new type as opposed to this struct definition. Therefore we have to replace `Self` with the
+ // concrete name.
+
+ // Errors that occur when replacing `Self` with `struct_name`.
+ let mut errs = TokenStream::new();
+ // The name of the struct with ty_generics.
+ let struct_name = rest
+ .iter()
+ .skip_while(|tt| !matches!(tt, TokenTree::Ident(i) if i.to_string() == "struct"))
+ .nth(1)
+ .and_then(|tt| match tt {
+ TokenTree::Ident(_) => {
+ let tt = tt.clone();
+ let mut res = vec![tt];
+ if !ty_generics.is_empty() {
+ // We add this, so it is maximally compatible with e.g. `Self::CONST` which
+ // will be replaced by `StructName::<$generics>::CONST`.
+ res.push(TokenTree::Punct(Punct::new(':', Spacing::Joint)));
+ res.push(TokenTree::Punct(Punct::new(':', Spacing::Alone)));
+ res.push(TokenTree::Punct(Punct::new('<', Spacing::Alone)));
+ res.extend(ty_generics.iter().cloned());
+ res.push(TokenTree::Punct(Punct::new('>', Spacing::Alone)));
}
+ Some(res)
}
- tt => {
- if nesting == 1 {
- match &tt {
- TokenTree::Ident(i) if i.to_string() == "const" => {}
- TokenTree::Ident(_) if at_start => {
- ty_generics.push(tt.clone());
- ty_generics.push(TokenTree::Punct(Punct::new(',', Spacing::Alone)));
- at_start = false;
- }
- TokenTree::Punct(p) if p.as_char() == ',' => at_start = true,
- TokenTree::Punct(p) if p.as_char() == '\'' && at_start => {
- ty_generics.push(tt.clone());
- }
- _ => {}
- }
- }
- if nesting >= 1 {
- impl_generics.push(tt);
- } else if nesting == 0 {
- rest.push(tt);
- }
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ // If we did not find the name of the struct then we will use `Self` as the replacement
+ // and add a compile error to ensure it does not compile.
+ errs.extend(
+ "::core::compile_error!(\"Could not locate type name.\");"
+ .parse::<TokenStream>()
+ .unwrap(),
+ );
+ "Self".parse::<TokenStream>().unwrap().into_iter().collect()
+ });
+ let impl_generics = impl_generics
+ .into_iter()
+ .flat_map(|tt| replace_self_and_deny_type_defs(&struct_name, tt, &mut errs))
+ .collect::<Vec<_>>();
+ let mut rest = rest
+ .into_iter()
+ .flat_map(|tt| {
+ // We ignore top level `struct` tokens, since they would emit a compile error.
+ if matches!(&tt, TokenTree::Ident(i) if i.to_string() == "struct") {
+ vec![tt]
+ } else {
+ replace_self_and_deny_type_defs(&struct_name, tt, &mut errs)
}
- }
- }
- rest.extend(toks);
+ })
+ .collect::<Vec<_>>();
// This should be the body of the struct `{...}`.
let last = rest.pop();
- quote!(::kernel::__pin_data! {
+ let mut quoted = quote!(::kernel::__pin_data! {
parse_input:
@args(#args),
@sig(#(#rest)*),
@impl_generics(#(#impl_generics)*),
@ty_generics(#(#ty_generics)*),
@body(#last),
- })
+ });
+ quoted.extend(errs);
+ quoted
+}
+
+/// Replaces `Self` with `struct_name` and errors on `enum`, `trait`, `struct` `union` and `impl`
+/// keywords.
+///
+/// The error is appended to `errs` to allow normal parsing to continue.
+fn replace_self_and_deny_type_defs(
+ struct_name: &Vec<TokenTree>,
+ tt: TokenTree,
+ errs: &mut TokenStream,
+) -> Vec<TokenTree> {
+ match tt {
+ TokenTree::Ident(ref i)
+ if i.to_string() == "enum"
+ || i.to_string() == "trait"
+ || i.to_string() == "struct"
+ || i.to_string() == "union"
+ || i.to_string() == "impl" =>
+ {
+ errs.extend(
+ format!(
+ "::core::compile_error!(\"Cannot use `{i}` inside of struct definition with \
+ `#[pin_data]`.\");"
+ )
+ .parse::<TokenStream>()
+ .unwrap()
+ .into_iter()
+ .map(|mut tok| {
+ tok.set_span(tt.span());
+ tok
+ }),
+ );
+ vec![tt]
+ }
+ TokenTree::Ident(i) if i.to_string() == "Self" => struct_name.clone(),
+ TokenTree::Literal(_) | TokenTree::Punct(_) | TokenTree::Ident(_) => vec![tt],
+ TokenTree::Group(g) => vec![TokenTree::Group(Group::new(
+ g.delimiter(),
+ g.stream()
+ .into_iter()
+ .flat_map(|tt| replace_self_and_deny_type_defs(struct_name, tt, errs))
+ .collect(),
+ ))],
+ }
}
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
index c8e08b3c1e4c..33a199e4f176 100644
--- a/rust/macros/quote.rs
+++ b/rust/macros/quote.rs
@@ -39,12 +39,14 @@ impl ToTokens for TokenStream {
/// [`quote_spanned!`](https://docs.rs/quote/latest/quote/macro.quote_spanned.html) macro from the
/// `quote` crate but provides only just enough functionality needed by the current `macros` crate.
macro_rules! quote_spanned {
- ($span:expr => $($tt:tt)*) => {
- #[allow(clippy::vec_init_then_push)]
- {
- let mut tokens = ::std::vec::Vec::new();
- let span = $span;
- quote_spanned!(@proc tokens span $($tt)*);
+ ($span:expr => $($tt:tt)*) => {{
+ let mut tokens;
+ #[allow(clippy::vec_init_then_push)]
+ {
+ tokens = ::std::vec::Vec::new();
+ let span = $span;
+ quote_spanned!(@proc tokens span $($tt)*);
+ }
::proc_macro::TokenStream::from_iter(tokens)
}};
(@proc $v:ident $span:ident) => {};
@@ -122,6 +124,18 @@ macro_rules! quote_spanned {
));
quote_spanned!(@proc $v $span $($tt)*);
};
+ (@proc $v:ident $span:ident ; $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(';', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident + $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('+', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
(@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
$v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
quote_spanned!(@proc $v $span $($tt)*);
diff --git a/rust/macros/vtable.rs b/rust/macros/vtable.rs
index 34d5e7fb5768..ee06044fcd4f 100644
--- a/rust/macros/vtable.rs
+++ b/rust/macros/vtable.rs
@@ -74,6 +74,7 @@ pub(crate) fn vtable(_attr: TokenStream, ts: TokenStream) -> TokenStream {
const {gen_const_name}: bool = false;",
)
.unwrap();
+ consts.insert(gen_const_name);
}
} else {
const_items = "const USE_VTABLE_ATTR: () = ();".to_owned();
diff --git a/rust/macros/zeroable.rs b/rust/macros/zeroable.rs
new file mode 100644
index 000000000000..0d605c46ab3b
--- /dev/null
+++ b/rust/macros/zeroable.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::helpers::{parse_generics, Generics};
+use proc_macro::{TokenStream, TokenTree};
+
+pub(crate) fn derive(input: TokenStream) -> TokenStream {
+ let (
+ Generics {
+ impl_generics,
+ ty_generics,
+ },
+ mut rest,
+ ) = parse_generics(input);
+ // This should be the body of the struct `{...}`.
+ let last = rest.pop();
+ // Now we insert `Zeroable` as a bound for every generic parameter in `impl_generics`.
+ let mut new_impl_generics = Vec::with_capacity(impl_generics.len());
+ // Are we inside of a generic where we want to add `Zeroable`?
+ let mut in_generic = !impl_generics.is_empty();
+ // Have we already inserted `Zeroable`?
+ let mut inserted = false;
+ // Level of `<>` nestings.
+ let mut nested = 0;
+ for tt in impl_generics {
+ match &tt {
+ // If we find a `,`, then we have finished a generic/constant/lifetime parameter.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ',' => {
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ in_generic = true;
+ inserted = false;
+ new_impl_generics.push(tt);
+ }
+ // If we find `'`, then we are entering a lifetime.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == '\'' => {
+ in_generic = false;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ':' => {
+ new_impl_generics.push(tt);
+ if in_generic {
+ new_impl_generics.extend(quote! { ::kernel::init::Zeroable + });
+ inserted = true;
+ }
+ }
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ nested += 1;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ assert!(nested > 0);
+ nested -= 1;
+ new_impl_generics.push(tt);
+ }
+ _ => new_impl_generics.push(tt),
+ }
+ }
+ assert_eq!(nested, 0);
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ quote! {
+ ::kernel::__derive_zeroable!(
+ parse_input:
+ @sig(#(#rest)*),
+ @impl_generics(#(#new_impl_generics)*),
+ @ty_generics(#(#ty_generics)*),
+ @body(#last),
+ );
+ }
+}
diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs
index 29f69f3a52de..0caad902ba40 100644
--- a/rust/uapi/lib.rs
+++ b/rust/uapi/lib.rs
@@ -8,7 +8,6 @@
//! userspace APIs.
#![no_std]
-#![feature(core_ffi_c)]
// See <https://github.com/rust-lang/rust-bindgen/issues/1651>.
#![cfg_attr(test, allow(deref_nullptr))]
#![cfg_attr(test, allow(unaligned_references))]