summaryrefslogtreecommitdiff
path: root/rust
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-25 20:25:40 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-25 20:25:40 +0300
commit570172569238c66a482ec3eb5d766cc9cf255f69 (patch)
tree615e935d737b3f8ec9d3a49cb1895c9cac81d679 /rust
parent684a64bf32b6e488004e0ad7f0d7e922798f65b6 (diff)
parenta2f11547052001bd448ccec81dd1e68409078fbb (diff)
downloadlinux-570172569238c66a482ec3eb5d766cc9cf255f69.tar.xz
Merge tag 'rust-6.12' of https://github.com/Rust-for-Linux/linux
Pull Rust updates from Miguel Ojeda: "Toolchain and infrastructure: - Support 'MITIGATION_{RETHUNK,RETPOLINE,SLS}' (which cleans up objtool warnings), teach objtool about 'noreturn' Rust symbols and mimic '___ADDRESSABLE()' for 'module_{init,exit}'. With that, we should be objtool-warning-free, so enable it to run for all Rust object files. - KASAN (no 'SW_TAGS'), KCFI and shadow call sanitizer support. - Support 'RUSTC_VERSION', including re-config and re-build on change. - Split helpers file into several files in a folder, to avoid conflicts in it. Eventually those files will be moved to the right places with the new build system. In addition, remove the need to manually export the symbols defined there, reusing existing machinery for that. - Relax restriction on configurations with Rust + GCC plugins to just the RANDSTRUCT plugin. 'kernel' crate: - New 'list' module: doubly-linked linked list for use with reference counted values, which is heavily used by the upcoming Rust Binder. This includes 'ListArc' (a wrapper around 'Arc' that is guaranteed unique for the given ID), 'AtomicTracker' (tracks whether a 'ListArc' exists using an atomic), 'ListLinks' (the prev/next pointers for an item in a linked list), 'List' (the linked list itself), 'Iter' (an iterator over a 'List'), 'Cursor' (a cursor into a 'List' that allows to remove elements), 'ListArcField' (a field exclusively owned by a 'ListArc'), as well as support for heterogeneous lists. - New 'rbtree' module: red-black tree abstractions used by the upcoming Rust Binder. This includes 'RBTree' (the red-black tree itself), 'RBTreeNode' (a node), 'RBTreeNodeReservation' (a memory reservation for a node), 'Iter' and 'IterMut' (immutable and mutable iterators), 'Cursor' (bidirectional cursor that allows to remove elements), as well as an entry API similar to the Rust standard library one. - 'init' module: add 'write_[pin_]init' methods and the 'InPlaceWrite' trait. Add the 'assert_pinned!' macro. - 'sync' module: implement the 'InPlaceInit' trait for 'Arc' by introducing an associated type in the trait. - 'alloc' module: add 'drop_contents' method to 'BoxExt'. - 'types' module: implement the 'ForeignOwnable' trait for 'Pin<Box<T>>' and improve the trait's documentation. In addition, add the 'into_raw' method to the 'ARef' type. - 'error' module: in preparation for the upcoming Rust support for 32-bit architectures, like arm, locally allow Clippy lint for those. Documentation: - https://rust.docs.kernel.org has been announced, so link to it. - Enable rustdoc's "jump to definition" feature, making its output a bit closer to the experience in a cross-referencer. - Debian Testing now also provides recent Rust releases (outside of the freeze period), so add it to the list. MAINTAINERS: - Trevor is joining as reviewer of the "RUST" entry. And a few other small bits" * tag 'rust-6.12' of https://github.com/Rust-for-Linux/linux: (54 commits) kasan: rust: Add KASAN smoke test via UAF kbuild: rust: Enable KASAN support rust: kasan: Rust does not support KHWASAN kbuild: rust: Define probing macros for rustc kasan: simplify and clarify Makefile rust: cfi: add support for CFI_CLANG with Rust cfi: add CONFIG_CFI_ICALL_NORMALIZE_INTEGERS rust: support for shadow call stack sanitizer docs: rust: include other expressions in conditional compilation section kbuild: rust: replace proc macros dependency on `core.o` with the version text kbuild: rust: rebuild if the version text changes kbuild: rust: re-run Kconfig if the version text changes kbuild: rust: add `CONFIG_RUSTC_VERSION` rust: avoid `box_uninit_write` feature MAINTAINERS: add Trevor Gross as Rust reviewer rust: rbtree: add `RBTree::entry` rust: rbtree: add cursor rust: rbtree: add mutable iterator rust: rbtree: add iterator rust: rbtree: add red-black tree implementation backed by the C version ...
Diffstat (limited to 'rust')
-rw-r--r--rust/Makefile56
-rw-r--r--rust/bindings/bindings_helper.h2
-rw-r--r--rust/exports.c1
-rw-r--r--rust/helpers.c239
-rw-r--r--rust/helpers/blk.c14
-rw-r--r--rust/helpers/bug.c8
-rw-r--r--rust/helpers/build_assert.c25
-rw-r--r--rust/helpers/build_bug.c9
-rw-r--r--rust/helpers/err.c19
-rw-r--r--rust/helpers/helpers.c26
-rw-r--r--rust/helpers/kunit.c9
-rw-r--r--rust/helpers/mutex.c9
-rw-r--r--rust/helpers/page.c19
-rw-r--r--rust/helpers/rbtree.c9
-rw-r--r--rust/helpers/refcount.c19
-rw-r--r--rust/helpers/signal.c9
-rw-r--r--rust/helpers/slab.c9
-rw-r--r--rust/helpers/spinlock.c24
-rw-r--r--rust/helpers/task.c19
-rw-r--r--rust/helpers/uaccess.c15
-rw-r--r--rust/helpers/wait.c9
-rw-r--r--rust/helpers/workqueue.c15
-rw-r--r--rust/kernel/alloc/box_ext.rs33
-rw-r--r--rust/kernel/error.rs5
-rw-r--r--rust/kernel/init.rs191
-rw-r--r--rust/kernel/init/__internal.rs29
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/list.rs686
-rw-r--r--rust/kernel/list/arc.rs521
-rw-r--r--rust/kernel/list/arc_field.rs96
-rw-r--r--rust/kernel/list/impl_list_item_mod.rs274
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/print.rs20
-rw-r--r--rust/kernel/rbtree.rs1278
-rw-r--r--rust/kernel/std_vendor.rs2
-rw-r--r--rust/kernel/sync/arc.rs25
-rw-r--r--rust/kernel/types.rs63
-rw-r--r--rust/macros/lib.rs4
-rw-r--r--rust/macros/module.rs12
39 files changed, 3481 insertions, 326 deletions
diff --git a/rust/Makefile b/rust/Makefile
index f168d2c98a15..b5e0a73b78f3 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -8,16 +8,16 @@ always-$(CONFIG_RUST) += exports_core_generated.h
# Missing prototypes are expected in the helpers since these are exported
# for Rust only, thus there is no header nor prototypes.
-obj-$(CONFIG_RUST) += helpers.o
-CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
+obj-$(CONFIG_RUST) += helpers/helpers.o
+CFLAGS_REMOVE_helpers/helpers.o = -Wmissing-prototypes -Wmissing-declarations
always-$(CONFIG_RUST) += libmacros.so
no-clean-files += libmacros.so
always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
-always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
- exports_kernel_generated.h
+always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \
+ exports_bindings_generated.h exports_kernel_generated.h
always-$(CONFIG_RUST) += uapi/uapi_generated.rs
obj-$(CONFIG_RUST) += uapi.o
@@ -63,6 +63,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
+ -Zunstable-options --generate-link-to-definition \
--output $(rustdoc_output) \
--crate-name $(subst rustdoc-,,$@) \
$(if $(rustdoc_host),,--sysroot=/dev/null) \
@@ -270,7 +271,7 @@ quiet_cmd_bindgen = BINDGEN $@
cmd_bindgen = \
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
- --no-debug '.*' \
+ --no-debug '.*' --enable-function-attribute-detection \
-o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
@@ -299,13 +300,13 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
-$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
- | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
+ | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
@@ -313,6 +314,18 @@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
$(call if_changed,exports)
+# Even though Rust kernel modules should never use the bindings directly,
+# symbols from the `bindings` crate and the C helpers need to be exported
+# because Rust generics and inlined functions may not get their code generated
+# in the crate where they are defined. Other helpers, called from non-inline
+# functions, may not be exported, in principle. However, in general, the Rust
+# compiler does not guarantee codegen will be performed for a non-inline
+# function either. Therefore, we export all symbols from helpers and bindings.
+# In the future, this may be revisited to reduce the number of exports after
+# the compiler is informed about the places codegen is required.
+$(obj)/exports_helpers_generated.h: $(obj)/helpers/helpers.o FORCE
+ $(call if_changed,exports)
+
$(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE
$(call if_changed,exports)
@@ -329,9 +342,7 @@ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<
# Procedural macros can only be used with the `rustc` that compiled it.
-# Therefore, to get `libmacros.so` automatically recompiled when the compiler
-# version changes, we add `core.o` as a dependency (even if it is not needed).
-$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
+$(obj)/libmacros.so: $(src)/macros/lib.rs FORCE
+$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
@@ -344,7 +355,8 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
--crate-type rlib -L$(objtree)/$(obj) \
--crate-name $(patsubst %.o,%,$(notdir $@)) $< \
--sysroot=/dev/null \
- $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \
+ $(cmd_objtool)
rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py \
@@ -366,44 +378,50 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
__ashlti3 __lshrti3
endif
+define rule_rustc_library
+ $(call cmd_and_fixdep,rustc_library)
+ $(call cmd,gen_objtooldep)
+endef
+
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Wunreachable_pub
$(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym))
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
-$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
- +$(call if_changed_dep,rustc_library)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \
+ $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE
+ +$(call if_changed_rule,rustc_library)
ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),)
$(obj)/core.o: scripts/target.json
endif
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
$(obj)/alloc.o: private skip_flags = -Wunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/bindings.o: $(src)/bindings/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/bindings/bindings_generated.rs \
$(obj)/bindings/bindings_helpers_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/uapi.o: $(src)/uapi/lib.rs \
$(obj)/compiler_builtins.o \
$(obj)/uapi/uapi_generated.rs FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros --extern bindings --extern uapi
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
$(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE
- +$(call if_changed_dep,rustc_library)
+ +$(call if_changed_rule,rustc_library)
endif # CONFIG_RUST
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index b940a5777330..ae82e9c941af 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -7,8 +7,8 @@
*/
#include <kunit/test.h>
-#include <linux/blk_types.h>
#include <linux/blk-mq.h>
+#include <linux/blk_types.h>
#include <linux/blkdev.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
diff --git a/rust/exports.c b/rust/exports.c
index 3803c21d1403..e5695f3b45b7 100644
--- a/rust/exports.c
+++ b/rust/exports.c
@@ -17,6 +17,7 @@
#include "exports_core_generated.h"
#include "exports_alloc_generated.h"
+#include "exports_helpers_generated.h"
#include "exports_bindings_generated.h"
#include "exports_kernel_generated.h"
diff --git a/rust/helpers.c b/rust/helpers.c
deleted file mode 100644
index 92d3c03ae1bd..000000000000
--- a/rust/helpers.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
- * cannot be called either. This file explicitly creates functions ("helpers")
- * that wrap those so that they can be called from Rust.
- *
- * Even though Rust kernel modules should never use the bindings directly, some
- * of these helpers need to be exported because Rust generics and inlined
- * functions may not get their code generated in the crate where they are
- * defined. Other helpers, called from non-inline functions, may not be
- * exported, in principle. However, in general, the Rust compiler does not
- * guarantee codegen will be performed for a non-inline function either.
- * Therefore, this file exports all the helpers. In the future, this may be
- * revisited to reduce the number of exports after the compiler is informed
- * about the places codegen is required.
- *
- * All symbols are exported as GPL-only to guarantee no GPL-only feature is
- * accidentally exposed.
- *
- * Sorted alphabetically.
- */
-
-#include <kunit/test-bug.h>
-#include <linux/bug.h>
-#include <linux/build_bug.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/errname.h>
-#include <linux/gfp.h>
-#include <linux/highmem.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-__noreturn void rust_helper_BUG(void)
-{
- BUG();
-}
-EXPORT_SYMBOL_GPL(rust_helper_BUG);
-
-unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
- unsigned long n)
-{
- return copy_from_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
-
-unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
- unsigned long n)
-{
- return copy_to_user(to, from, n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
-
-void rust_helper_mutex_lock(struct mutex *lock)
-{
- mutex_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
-
-void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
-#else
- spin_lock_init(lock);
-#endif
-}
-EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
-
-void rust_helper_spin_lock(spinlock_t *lock)
-{
- spin_lock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
-
-void rust_helper_spin_unlock(spinlock_t *lock)
-{
- spin_unlock(lock);
-}
-EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
-
-void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
-{
- init_wait(wq_entry);
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_wait);
-
-int rust_helper_signal_pending(struct task_struct *t)
-{
- return signal_pending(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
-
-struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
-{
- return alloc_pages(gfp_mask, order);
-}
-EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
-
-void *rust_helper_kmap_local_page(struct page *page)
-{
- return kmap_local_page(page);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page);
-
-void rust_helper_kunmap_local(const void *addr)
-{
- kunmap_local(addr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
-
-refcount_t rust_helper_REFCOUNT_INIT(int n)
-{
- return (refcount_t)REFCOUNT_INIT(n);
-}
-EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
-
-void rust_helper_refcount_inc(refcount_t *r)
-{
- refcount_inc(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
-
-bool rust_helper_refcount_dec_and_test(refcount_t *r)
-{
- return refcount_dec_and_test(r);
-}
-EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
-
-__force void *rust_helper_ERR_PTR(long err)
-{
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR);
-
-bool rust_helper_IS_ERR(__force const void *ptr)
-{
- return IS_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
-
-long rust_helper_PTR_ERR(__force const void *ptr)
-{
- return PTR_ERR(ptr);
-}
-EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
-
-const char *rust_helper_errname(int err)
-{
- return errname(err);
-}
-EXPORT_SYMBOL_GPL(rust_helper_errname);
-
-struct task_struct *rust_helper_get_current(void)
-{
- return current;
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_current);
-
-void rust_helper_get_task_struct(struct task_struct *t)
-{
- get_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
-
-void rust_helper_put_task_struct(struct task_struct *t)
-{
- put_task_struct(t);
-}
-EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
-
-struct kunit *rust_helper_kunit_get_current_test(void)
-{
- return kunit_get_current_test();
-}
-EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
-
-void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
- bool onstack, const char *name,
- struct lock_class_key *key)
-{
- __init_work(work, onstack);
- work->data = (atomic_long_t)WORK_DATA_INIT();
- lockdep_init_map(&work->lockdep_map, name, key, 0);
- INIT_LIST_HEAD(&work->entry);
- work->func = func;
-}
-EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key);
-
-void * __must_check __realloc_size(2)
-rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
-{
- return krealloc(objp, new_size, flags);
-}
-EXPORT_SYMBOL_GPL(rust_helper_krealloc);
-
-/*
- * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
- * use it in contexts where Rust expects a `usize` like slice (array) indices.
- * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
- * pointer) but not necessarily the same as `size_t` (can hold the size of any
- * single object). Most modern platforms use the same concrete integer type for
- * both of them, but in case we find ourselves on a platform where
- * that's not true, fail early instead of risking ABI or
- * integer-overflow issues.
- *
- * If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to add
- * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
- * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
- * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
- */
-static_assert(
- sizeof(size_t) == sizeof(uintptr_t) &&
- __alignof__(size_t) == __alignof__(uintptr_t),
- "Rust code expects C `size_t` to match Rust `usize`"
-);
-
-// This will soon be moved to a separate file, so no need to merge with above.
-#include <linux/blk-mq.h>
-#include <linux/blkdev.h>
-
-void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_to_pdu);
-
-struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
-{
- return blk_mq_rq_from_pdu(pdu);
-}
-EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_from_pdu);
diff --git a/rust/helpers/blk.c b/rust/helpers/blk.c
new file mode 100644
index 000000000000..cc9f4e6a2d23
--- /dev/null
+++ b/rust/helpers/blk.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blkdev.h>
+
+void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
+{
+ return blk_mq_rq_to_pdu(rq);
+}
+
+struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu)
+{
+ return blk_mq_rq_from_pdu(pdu);
+}
diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c
new file mode 100644
index 000000000000..e2d13babc737
--- /dev/null
+++ b/rust/helpers/bug.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bug.h>
+
+__noreturn void rust_helper_BUG(void)
+{
+ BUG();
+}
diff --git a/rust/helpers/build_assert.c b/rust/helpers/build_assert.c
new file mode 100644
index 000000000000..6a54b2680b14
--- /dev/null
+++ b/rust/helpers/build_assert.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/build_bug.h>
+
+/*
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
+ * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
+ */
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C `size_t` to match Rust `usize`"
+);
diff --git a/rust/helpers/build_bug.c b/rust/helpers/build_bug.c
new file mode 100644
index 000000000000..e994f7b5928c
--- /dev/null
+++ b/rust/helpers/build_bug.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/errname.h>
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
diff --git a/rust/helpers/err.c b/rust/helpers/err.c
new file mode 100644
index 000000000000..be3d45ef78a2
--- /dev/null
+++ b/rust/helpers/err.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/err.h>
+#include <linux/export.h>
+
+__force void *rust_helper_ERR_PTR(long err)
+{
+ return ERR_PTR(err);
+}
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
new file mode 100644
index 000000000000..30f40149f3a9
--- /dev/null
+++ b/rust/helpers/helpers.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
+ * cannot be called either. This file explicitly creates functions ("helpers")
+ * that wrap those so that they can be called from Rust.
+ *
+ * Sorted alphabetically.
+ */
+
+#include "blk.c"
+#include "bug.c"
+#include "build_assert.c"
+#include "build_bug.c"
+#include "err.c"
+#include "kunit.c"
+#include "mutex.c"
+#include "page.c"
+#include "rbtree.c"
+#include "refcount.c"
+#include "signal.c"
+#include "slab.c"
+#include "spinlock.c"
+#include "task.c"
+#include "uaccess.c"
+#include "wait.c"
+#include "workqueue.c"
diff --git a/rust/helpers/kunit.c b/rust/helpers/kunit.c
new file mode 100644
index 000000000000..9d725067eb3b
--- /dev/null
+++ b/rust/helpers/kunit.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test-bug.h>
+#include <linux/export.h>
+
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c
new file mode 100644
index 000000000000..200db7e6279f
--- /dev/null
+++ b/rust/helpers/mutex.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
new file mode 100644
index 000000000000..b3f2b8fbf87f
--- /dev/null
+++ b/rust/helpers/page.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+
+void *rust_helper_kmap_local_page(struct page *page)
+{
+ return kmap_local_page(page);
+}
+
+void rust_helper_kunmap_local(const void *addr)
+{
+ kunmap_local(addr);
+}
diff --git a/rust/helpers/rbtree.c b/rust/helpers/rbtree.c
new file mode 100644
index 000000000000..6d404b84a9b5
--- /dev/null
+++ b/rust/helpers/rbtree.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/rbtree.h>
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c
new file mode 100644
index 000000000000..f47afc148ec3
--- /dev/null
+++ b/rust/helpers/refcount.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/refcount.h>
+
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
diff --git a/rust/helpers/signal.c b/rust/helpers/signal.c
new file mode 100644
index 000000000000..63c407f80c26
--- /dev/null
+++ b/rust/helpers/signal.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/signal.h>
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c
new file mode 100644
index 000000000000..f043e087f9d6
--- /dev/null
+++ b/rust/helpers/slab.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+void * __must_check __realloc_size(2)
+rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags)
+{
+ return krealloc(objp, new_size, flags);
+}
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
new file mode 100644
index 000000000000..acc1376b833c
--- /dev/null
+++ b/rust/helpers/spinlock.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/spinlock.h>
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+#else
+ spin_lock_init(lock);
+#endif
+}
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
diff --git a/rust/helpers/task.c b/rust/helpers/task.c
new file mode 100644
index 000000000000..7ac789232d11
--- /dev/null
+++ b/rust/helpers/task.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/sched/task.h>
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
new file mode 100644
index 000000000000..f49076f813cd
--- /dev/null
+++ b/rust/helpers/uaccess.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/uaccess.h>
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
diff --git a/rust/helpers/wait.c b/rust/helpers/wait.c
new file mode 100644
index 000000000000..c7336bbf2750
--- /dev/null
+++ b/rust/helpers/wait.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/wait.h>
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
diff --git a/rust/helpers/workqueue.c b/rust/helpers/workqueue.c
new file mode 100644
index 000000000000..f59427acc323
--- /dev/null
+++ b/rust/helpers/workqueue.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/workqueue.h>
+
+void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func,
+ bool onstack, const char *name,
+ struct lock_class_key *key)
+{
+ __init_work(work, onstack);
+ work->data = (atomic_long_t)WORK_DATA_INIT();
+ lockdep_init_map(&work->lockdep_map, name, key, 0);
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs
index 9f1c1c489189..7009ad78d4e0 100644
--- a/rust/kernel/alloc/box_ext.rs
+++ b/rust/kernel/alloc/box_ext.rs
@@ -4,7 +4,7 @@
use super::{AllocError, Flags};
use alloc::boxed::Box;
-use core::mem::MaybeUninit;
+use core::{mem::MaybeUninit, ptr, result::Result};
/// Extensions to [`Box`].
pub trait BoxExt<T>: Sized {
@@ -17,6 +17,24 @@ pub trait BoxExt<T>: Sized {
///
/// The allocation may fail, in which case an error is returned.
fn new_uninit(flags: Flags) -> Result<Box<MaybeUninit<T>>, AllocError>;
+
+ /// Drops the contents, but keeps the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::alloc::{flags, box_ext::BoxExt};
+ /// let value = Box::new([0; 32], flags::GFP_KERNEL)?;
+ /// assert_eq!(*value, [0; 32]);
+ /// let mut value = Box::drop_contents(value);
+ /// // Now we can re-use `value`:
+ /// value.write([1; 32]);
+ /// // SAFETY: We just wrote to it.
+ /// let value = unsafe { value.assume_init() };
+ /// assert_eq!(*value, [1; 32]);
+ /// # Ok::<(), Error>(())
+ /// ```
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>>;
}
impl<T> BoxExt<T> for Box<T> {
@@ -55,4 +73,17 @@ impl<T> BoxExt<T> for Box<T> {
// zero-sized types, we use `NonNull::dangling`.
Ok(unsafe { Box::from_raw(ptr) })
}
+
+ fn drop_contents(this: Self) -> Box<MaybeUninit<T>> {
+ let ptr = Box::into_raw(this);
+ // SAFETY: `ptr` is valid, because it came from `Box::into_raw`.
+ unsafe { ptr::drop_in_place(ptr) };
+
+ // CAST: `MaybeUninit<T>` is a transparent wrapper of `T`.
+ let ptr = ptr.cast::<MaybeUninit<T>>();
+
+ // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for
+ // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit<T>`.
+ unsafe { Box::from_raw(ptr) }
+ }
}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 145f5c397009..6f1587a2524e 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -135,8 +135,11 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
+ #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))]
// SAFETY: `self.0` is a valid error due to its invariant.
- unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
+ unsafe {
+ bindings::ERR_PTR(self.0.into()) as *mut _
+ }
}
/// Returns a string representing the error, if one exists.
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 495c09ebe3a3..a17ac8762d8f 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -213,6 +213,7 @@
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
error::{self, Error},
+ sync::Arc,
sync::UniqueArc,
types::{Opaque, ScopeGuard},
};
@@ -742,6 +743,74 @@ macro_rules! try_init {
};
}
+/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is
+/// structurally pinned.
+///
+/// # Example
+///
+/// This will succeed:
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// #[pin]
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// This will fail:
+// TODO: replace with `compile_fail` when supported.
+/// ```ignore
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct MyStruct {
+/// some_field: u64,
+/// }
+///
+/// assert_pinned!(MyStruct, some_field, u64);
+/// ```
+///
+/// Some uses of the macro may trigger the `can't use generic parameters from outer item` error. To
+/// work around this, you may pass the `inline` parameter to the macro. The `inline` parameter can
+/// only be used when the macro is invoked from a function body.
+/// ```
+/// use kernel::assert_pinned;
+/// #[pin_data]
+/// struct Foo<T> {
+/// #[pin]
+/// elem: T,
+/// }
+///
+/// impl<T> Foo<T> {
+/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> {
+/// assert_pinned!(Foo<T>, elem, T, inline);
+///
+/// // SAFETY: The field is structurally pinned.
+/// unsafe { self.map_unchecked_mut(|me| &mut me.elem) }
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! assert_pinned {
+ ($ty:ty, $field:ident, $field_ty:ty, inline) => {
+ let _ = move |ptr: *mut $field_ty| {
+ // SAFETY: This code is unreachable.
+ let data = unsafe { <$ty as $crate::init::__internal::HasPinData>::__pin_data() };
+ let init = $crate::init::__internal::AlwaysFail::<$field_ty>::new();
+ // SAFETY: This code is unreachable.
+ unsafe { data.$field(ptr, init) }.ok();
+ };
+ };
+
+ ($ty:ty, $field:ident, $field_ty:ty) => {
+ const _: () = {
+ $crate::assert_pinned!($ty, $field, $field_ty, inline);
+ };
+ };
+}
+
/// A pin-initializer for the type `T`.
///
/// To use this initializer, you will need a suitable memory location that can hold a `T`. This can
@@ -1107,11 +1176,17 @@ unsafe impl<T, E> PinInit<T, E> for T {
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
+ /// Pinned version of `Self`.
+ ///
+ /// If a type already implicitly pins its pointee, `Pin<Self>` is unnecessary. In this case use
+ /// `Self`, otherwise just use `Pin<Self>`.
+ type PinnedSelf;
+
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>;
@@ -1119,7 +1194,7 @@ pub trait InPlaceInit<T>: Sized {
/// type.
///
/// If `T: !Unpin` it will not be able to move afterwards.
- fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Pin<Self>>
+ fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self::PinnedSelf>
where
Error: From<E>,
{
@@ -1148,19 +1223,35 @@ pub trait InPlaceInit<T>: Sized {
}
}
+impl<T> InPlaceInit<T> for Arc<T> {
+ type PinnedSelf = Self;
+
+ #[inline]
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_pin_init(init, flags).map(|u| u.into())
+ }
+
+ #[inline]
+ fn try_init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ UniqueArc::try_init(init, flags).map(|u| u.into())
+ }
+}
+
impl<T> InPlaceInit<T> for Box<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1168,29 +1259,19 @@ impl<T> InPlaceInit<T> for Box<T> {
where
E: From<AllocError>,
{
- let mut this = <Box<_> as BoxExt<_>>::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid.
- unsafe { init.__init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ <Box<_> as BoxExt<_>>::new_uninit(flags)?.write_init(init)
}
}
impl<T> InPlaceInit<T> for UniqueArc<T> {
+ type PinnedSelf = Pin<Self>;
+
#[inline]
- fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Pin<Self>, E>
+ fn try_pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self::PinnedSelf, E>
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
- // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
- // slot is valid and will not be moved, because we pin it later.
- unsafe { init.__pinned_init(slot)? };
- // SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() }.into())
+ UniqueArc::new_uninit(flags)?.write_pin_init(init)
}
#[inline]
@@ -1198,13 +1279,67 @@ impl<T> InPlaceInit<T> for UniqueArc<T> {
where
E: From<AllocError>,
{
- let mut this = UniqueArc::new_uninit(flags)?;
- let slot = this.as_mut_ptr();
+ UniqueArc::new_uninit(flags)?.write_init(init)
+ }
+}
+
+/// Smart pointer containing uninitialized memory and that can write a value.
+pub trait InPlaceWrite<T> {
+ /// The type `Self` turns into when the contents are initialized.
+ type Initialized;
+
+ /// Use the given initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_init<E>(self, init: impl Init<T, E>) -> Result<Self::Initialized, E>;
+
+ /// Use the given pin-initializer to write a value into `self`.
+ ///
+ /// Does not drop the current value and considers it as uninitialized memory.
+ fn write_pin_init<E>(self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E>;
+}
+
+impl<T> InPlaceWrite<T> for Box<MaybeUninit<T>> {
+ type Initialized = Box<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
// SAFETY: When init errors/panics, slot will get deallocated but not dropped,
// slot is valid.
unsafe { init.__init(slot)? };
// SAFETY: All fields have been initialized.
- Ok(unsafe { this.assume_init() })
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
+ }
+}
+
+impl<T> InPlaceWrite<T> for UniqueArc<MaybeUninit<T>> {
+ type Initialized = UniqueArc<T>;
+
+ fn write_init<E>(mut self, init: impl Init<T, E>) -> Result<Self::Initialized, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid.
+ unsafe { init.__init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() })
+ }
+
+ fn write_pin_init<E>(mut self, init: impl PinInit<T, E>) -> Result<Pin<Self::Initialized>, E> {
+ let slot = self.as_mut_ptr();
+ // SAFETY: When init errors/panics, slot will get deallocated but not dropped,
+ // slot is valid and will not be moved, because we pin it later.
+ unsafe { init.__pinned_init(slot)? };
+ // SAFETY: All fields have been initialized.
+ Ok(unsafe { self.assume_init() }.into())
}
}
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index db3372619ecd..13cefd37512f 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -228,3 +228,32 @@ impl OnlyCallFromDrop {
Self(())
}
}
+
+/// Initializer that always fails.
+///
+/// Used by [`assert_pinned!`].
+///
+/// [`assert_pinned!`]: crate::assert_pinned
+pub struct AlwaysFail<T: ?Sized> {
+ _t: PhantomData<T>,
+}
+
+impl<T: ?Sized> AlwaysFail<T> {
+ /// Creates a new initializer that always fails.
+ pub fn new() -> Self {
+ Self { _t: PhantomData }
+ }
+}
+
+impl<T: ?Sized> Default for AlwaysFail<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// SAFETY: `__pinned_init` always fails, which is always okay.
+unsafe impl<T: ?Sized> PinInit<T, ()> for AlwaysFail<T> {
+ unsafe fn __pinned_init(self, _slot: *mut T) -> Result<(), ()> {
+ Err(())
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 58ed400198bf..22a3bfa5a9e9 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -38,12 +38,14 @@ pub mod init;
pub mod ioctl;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
+pub mod list;
#[cfg(CONFIG_NET)]
pub mod net;
pub mod page;
pub mod prelude;
pub mod print;
pub mod sizes;
+pub mod rbtree;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs
new file mode 100644
index 000000000000..5b4aec29eb67
--- /dev/null
+++ b/rust/kernel/list.rs
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A linked list implementation.
+
+use crate::init::PinInit;
+use crate::sync::ArcBorrow;
+use crate::types::Opaque;
+use core::iter::{DoubleEndedIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::ptr;
+
+mod impl_list_item_mod;
+pub use self::impl_list_item_mod::{
+ impl_has_list_links, impl_has_list_links_self_ptr, impl_list_item, HasListLinks, HasSelfPtr,
+};
+
+mod arc;
+pub use self::arc::{impl_list_arc_safe, AtomicTracker, ListArc, ListArcSafe, TryNewListArc};
+
+mod arc_field;
+pub use self::arc_field::{define_list_arc_field_getter, ListArcField};
+
+/// A linked list.
+///
+/// All elements in this linked list will be [`ListArc`] references to the value. Since a value can
+/// only have one `ListArc` (for each pair of prev/next pointers), this ensures that the same
+/// prev/next pointers are not used for several linked lists.
+///
+/// # Invariants
+///
+/// * If the list is empty, then `first` is null. Otherwise, `first` points at the `ListLinks`
+/// field of the first element in the list.
+/// * All prev/next pointers in `ListLinks` fields of items in the list are valid and form a cycle.
+/// * For every item in the list, the list owns the associated [`ListArc`] reference and has
+/// exclusive access to the `ListLinks` field.
+pub struct List<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ first: *mut ListLinksFields,
+ _ty: PhantomData<ListArc<T, ID>>,
+}
+
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Send for List<T, ID>
+where
+ ListArc<T, ID>: Send,
+ T: ?Sized + ListItem<ID>,
+{
+}
+// SAFETY: This is a container of `ListArc<T, ID>`, and access to the container allows the same
+// type of access to the `ListArc<T, ID>` elements.
+unsafe impl<T, const ID: u64> Sync for List<T, ID>
+where
+ ListArc<T, ID>: Sync,
+ T: ?Sized + ListItem<ID>,
+{
+}
+
+/// Implemented by types where a [`ListArc<Self>`] can be inserted into a [`List`].
+///
+/// # Safety
+///
+/// Implementers must ensure that they provide the guarantees documented on methods provided by
+/// this trait.
+///
+/// [`ListArc<Self>`]: ListArc
+pub unsafe trait ListItem<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Views the [`ListLinks`] for this value.
+ ///
+ /// # Guarantees
+ ///
+ /// If there is a previous call to `prepare_to_insert` and there is no call to `post_remove`
+ /// since the most recent such call, then this returns the same pointer as the one returned by
+ /// the most recent call to `prepare_to_insert`.
+ ///
+ /// Otherwise, the returned pointer points at a read-only [`ListLinks`] with two null pointers.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid value. (It need not be in an `Arc`.)
+ unsafe fn view_links(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// View the full value given its [`ListLinks`] field.
+ ///
+ /// Can only be used when the value is in a list.
+ ///
+ /// # Guarantees
+ ///
+ /// * Returns the same pointer as the one passed to the most recent call to `prepare_to_insert`.
+ /// * The returned pointer is valid until the next call to `post_remove`.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must originate from the most recent call to `prepare_to_insert`, or
+ /// from a call to `view_links` that happened after the most recent call to
+ /// `prepare_to_insert`.
+ /// * Since the most recent call to `prepare_to_insert`, the `post_remove` method must not have
+ /// been called.
+ unsafe fn view_value(me: *mut ListLinks<ID>) -> *const Self;
+
+ /// This is called when an item is inserted into a [`List`].
+ ///
+ /// # Guarantees
+ ///
+ /// The caller is granted exclusive access to the returned [`ListLinks`] until `post_remove` is
+ /// called.
+ ///
+ /// # Safety
+ ///
+ /// * The provided pointer must point at a valid value in an [`Arc`].
+ /// * Calls to `prepare_to_insert` and `post_remove` on the same value must alternate.
+ /// * The caller must own the [`ListArc`] for this value.
+ /// * The caller must not give up ownership of the [`ListArc`] unless `post_remove` has been
+ /// called after this call to `prepare_to_insert`.
+ ///
+ /// [`Arc`]: crate::sync::Arc
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut ListLinks<ID>;
+
+ /// This undoes a previous call to `prepare_to_insert`.
+ ///
+ /// # Guarantees
+ ///
+ /// The returned pointer is the pointer that was originally passed to `prepare_to_insert`.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must be the pointer returned by the most recent call to
+ /// `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut ListLinks<ID>) -> *const Self;
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+struct ListLinksFields {
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+}
+
+/// The prev/next pointers for an item in a linked list.
+///
+/// # Invariants
+///
+/// The fields are null if and only if this item is not in a list.
+#[repr(transparent)]
+pub struct ListLinks<const ID: u64 = 0> {
+ // This type is `!Unpin` for aliasing reasons as the pointers are part of an intrusive linked
+ // list.
+ inner: Opaque<ListLinksFields>,
+}
+
+// SAFETY: The only way to access/modify the pointers inside of `ListLinks<ID>` is via holding the
+// associated `ListArc<T, ID>`. Since that type correctly implements `Send`, it is impossible to
+// move this an instance of this type to a different thread if the pointees are `!Send`.
+unsafe impl<const ID: u64> Send for ListLinks<ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinks are useless. Therefore, it's
+// okay to have immutable access to a ListLinks from several threads at once.
+unsafe impl<const ID: u64> Sync for ListLinks<ID> {}
+
+impl<const ID: u64> ListLinks<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn fields(me: *mut Self) -> *mut ListLinksFields {
+ // SAFETY: The caller promises that the pointer is valid.
+ unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) }
+ }
+
+ /// # Safety
+ ///
+ /// `me` must be dereferenceable.
+ #[inline]
+ unsafe fn from_fields(me: *mut ListLinksFields) -> *mut Self {
+ me.cast()
+ }
+}
+
+/// Similar to [`ListLinks`], but also contains a pointer to the full value.
+///
+/// This type can be used instead of [`ListLinks`] to support lists with trait objects.
+#[repr(C)]
+pub struct ListLinksSelfPtr<T: ?Sized, const ID: u64 = 0> {
+ /// The `ListLinks` field inside this value.
+ ///
+ /// This is public so that it can be used with `impl_has_list_links!`.
+ pub inner: ListLinks<ID>,
+ // UnsafeCell is not enough here because we use `Opaque::uninit` as a dummy value, and
+ // `ptr::null()` doesn't work for `T: ?Sized`.
+ self_ptr: Opaque<*const T>,
+}
+
+// SAFETY: The fields of a ListLinksSelfPtr can be moved across thread boundaries.
+unsafe impl<T: ?Sized + Send, const ID: u64> Send for ListLinksSelfPtr<T, ID> {}
+// SAFETY: The type is opaque so immutable references to a ListLinksSelfPtr are useless. Therefore,
+// it's okay to have immutable access to a ListLinks from several threads at once.
+//
+// Note that `inner` being a public field does not prevent this type from being opaque, since
+// `inner` is a opaque type.
+unsafe impl<T: ?Sized + Sync, const ID: u64> Sync for ListLinksSelfPtr<T, ID> {}
+
+impl<T: ?Sized, const ID: u64> ListLinksSelfPtr<T, ID> {
+ /// The offset from the [`ListLinks`] to the self pointer field.
+ pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr);
+
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: ListLinks {
+ inner: Opaque::new(ListLinksFields {
+ prev: ptr::null_mut(),
+ next: ptr::null_mut(),
+ }),
+ },
+ self_ptr: Opaque::uninit(),
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> List<T, ID> {
+ /// Creates a new empty list.
+ pub const fn new() -> Self {
+ Self {
+ first: ptr::null_mut(),
+ _ty: PhantomData,
+ }
+ }
+
+ /// Returns whether this list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.first.is_null()
+ }
+
+ /// Add the provided item to the back of the list.
+ pub fn push_back(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * Since we have ownership of the `ListArc`, `post_remove` must have been called after
+ // the most recent call to `prepare_to_insert`, if any.
+ // * We own the `ListArc`.
+ // * Removing items from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ self.first = item;
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: By the type invariant, this pointer is valid or null. We just checked that
+ // it's not null, so it must be valid.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ }
+
+ /// Add the provided item to the front of the list.
+ pub fn push_front(&mut self, item: ListArc<T, ID>) {
+ let raw_item = ListArc::into_raw(item);
+ // SAFETY:
+ // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`.
+ // * If this requirement is violated, then the previous caller of `prepare_to_insert`
+ // violated the safety requirement that they can't give up ownership of the `ListArc`
+ // until they call `post_remove`.
+ // * We own the `ListArc`.
+ // * Removing items] from this list is always done using `remove_internal_inner`, which
+ // calls `post_remove` before giving up ownership.
+ let list_links = unsafe { T::prepare_to_insert(raw_item) };
+ // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid.
+ let item = unsafe { ListLinks::fields(list_links) };
+
+ if self.first.is_null() {
+ // SAFETY: The caller just gave us ownership of these fields.
+ // INVARIANT: A linked list with one item should be cyclic.
+ unsafe {
+ (*item).next = item;
+ (*item).prev = item;
+ }
+ } else {
+ let next = self.first;
+ // SAFETY: We just checked that `next` is non-null.
+ let prev = unsafe { (*next).prev };
+ // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us
+ // ownership of the fields on `item`.
+ // INVARIANT: This correctly inserts `item` between `prev` and `next`.
+ unsafe {
+ (*item).next = next;
+ (*item).prev = prev;
+ (*prev).next = item;
+ (*next).prev = item;
+ }
+ }
+ self.first = item;
+ }
+
+ /// Removes the last item from this list.
+ pub fn pop_back(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: We just checked that the list is not empty.
+ let last = unsafe { (*self.first).prev };
+ // SAFETY: The last item of this list is in this list.
+ Some(unsafe { self.remove_internal(last) })
+ }
+
+ /// Removes the first item from this list.
+ pub fn pop_front(&mut self) -> Option<ListArc<T, ID>> {
+ if self.first.is_null() {
+ return None;
+ }
+
+ // SAFETY: The first item of this list is in this list.
+ Some(unsafe { self.remove_internal(self.first) })
+ }
+
+ /// Removes the provided item from this list and returns it.
+ ///
+ /// This returns `None` if the item is not in the list. (Note that by the safety requirements,
+ /// this means that the item is not in any list.)
+ ///
+ /// # Safety
+ ///
+ /// `item` must not be in a different linked list (with the same id).
+ pub unsafe fn remove(&mut self, item: &T) -> Option<ListArc<T, ID>> {
+ let mut item = unsafe { ListLinks::fields(T::view_links(item)) };
+ // SAFETY: The user provided a reference, and reference are never dangling.
+ //
+ // As for why this is not a data race, there are two cases:
+ //
+ // * If `item` is not in any list, then these fields are read-only and null.
+ // * If `item` is in this list, then we have exclusive access to these fields since we
+ // have a mutable reference to the list.
+ //
+ // In either case, there's no race.
+ let ListLinksFields { next, prev } = unsafe { *item };
+
+ debug_assert_eq!(next.is_null(), prev.is_null());
+ if !next.is_null() {
+ // This is really a no-op, but this ensures that `item` is a raw pointer that was
+ // obtained without going through a pointer->reference->pointer conversion roundtrip.
+ // This ensures that the list is valid under the more restrictive strict provenance
+ // ruleset.
+ //
+ // SAFETY: We just checked that `next` is not null, and it's not dangling by the
+ // list invariants.
+ unsafe {
+ debug_assert_eq!(item, (*next).prev);
+ item = (*next).prev;
+ }
+
+ // SAFETY: We just checked that `item` is in a list, so the caller guarantees that it
+ // is in this list. The pointers are in the right order.
+ Some(unsafe { self.remove_internal_inner(item, next, prev) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// `item` must point at an item in this list.
+ unsafe fn remove_internal(&mut self, item: *mut ListLinksFields) -> ListArc<T, ID> {
+ // SAFETY: The caller promises that this pointer is not dangling, and there's no data race
+ // since we have a mutable reference to the list containing `item`.
+ let ListLinksFields { next, prev } = unsafe { *item };
+ // SAFETY: The pointers are ok and in the right order.
+ unsafe { self.remove_internal_inner(item, next, prev) }
+ }
+
+ /// Removes the provided item from the list.
+ ///
+ /// # Safety
+ ///
+ /// The `item` pointer must point at an item in this list, and we must have `(*item).next ==
+ /// next` and `(*item).prev == prev`.
+ unsafe fn remove_internal_inner(
+ &mut self,
+ item: *mut ListLinksFields,
+ next: *mut ListLinksFields,
+ prev: *mut ListLinksFields,
+ ) -> ListArc<T, ID> {
+ // SAFETY: We have exclusive access to the pointers of items in the list, and the prev/next
+ // pointers are always valid for items in a list.
+ //
+ // INVARIANT: There are three cases:
+ // * If the list has at least three items, then after removing the item, `prev` and `next`
+ // will be next to each other.
+ // * If the list has two items, then the remaining item will point at itself.
+ // * If the list has one item, then `next == prev == item`, so these writes have no
+ // effect. The list remains unchanged and `item` is still in the list for now.
+ unsafe {
+ (*next).prev = prev;
+ (*prev).next = next;
+ }
+ // SAFETY: We have exclusive access to items in the list.
+ // INVARIANT: `item` is being removed, so the pointers should be null.
+ unsafe {
+ (*item).prev = ptr::null_mut();
+ (*item).next = ptr::null_mut();
+ }
+ // INVARIANT: There are three cases:
+ // * If `item` was not the first item, then `self.first` should remain unchanged.
+ // * If `item` was the first item and there is another item, then we just updated
+ // `prev->next` to `next`, which is the new first item, and setting `item->next` to null
+ // did not modify `prev->next`.
+ // * If `item` was the only item in the list, then `prev == item`, and we just set
+ // `item->next` to null, so this correctly sets `first` to null now that the list is
+ // empty.
+ if self.first == item {
+ // SAFETY: The `prev` pointer is the value that `item->prev` had when it was in this
+ // list, so it must be valid. There is no race since `prev` is still in the list and we
+ // still have exclusive access to the list.
+ self.first = unsafe { (*prev).next };
+ }
+
+ // SAFETY: `item` used to be in the list, so it is dereferenceable by the type invariants
+ // of `List`.
+ let list_links = unsafe { ListLinks::from_fields(item) };
+ // SAFETY: Any pointer in the list originates from a `prepare_to_insert` call.
+ let raw_item = unsafe { T::post_remove(list_links) };
+ // SAFETY: The above call to `post_remove` guarantees that we can recreate the `ListArc`.
+ unsafe { ListArc::from_raw(raw_item) }
+ }
+
+ /// Moves all items from `other` into `self`.
+ ///
+ /// The items of `other` are added to the back of `self`, so the last item of `other` becomes
+ /// the last item of `self`.
+ pub fn push_all_back(&mut self, other: &mut List<T, ID>) {
+ // First, we insert the elements into `self`. At the end, we make `other` empty.
+ if self.is_empty() {
+ // INVARIANT: All of the elements in `other` become elements of `self`.
+ self.first = other.first;
+ } else if !other.is_empty() {
+ let other_first = other.first;
+ // SAFETY: The other list is not empty, so this pointer is valid.
+ let other_last = unsafe { (*other_first).prev };
+ let self_first = self.first;
+ // SAFETY: The self list is not empty, so this pointer is valid.
+ let self_last = unsafe { (*self_first).prev };
+
+ // SAFETY: We have exclusive access to both lists, so we can update the pointers.
+ // INVARIANT: This correctly sets the pointers to merge both lists. We do not need to
+ // update `self.first` because the first element of `self` does not change.
+ unsafe {
+ (*self_first).prev = other_last;
+ (*other_last).next = self_first;
+ (*self_last).next = other_first;
+ (*other_first).prev = self_last;
+ }
+ }
+
+ // INVARIANT: The other list is now empty, so update its pointer.
+ other.first = ptr::null_mut();
+ }
+
+ /// Returns a cursor to the first element of the list.
+ ///
+ /// If the list is empty, this returns `None`.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, T, ID>> {
+ if self.first.is_null() {
+ None
+ } else {
+ Some(Cursor {
+ current: self.first,
+ list: self,
+ })
+ }
+ }
+
+ /// Creates an iterator over the list.
+ pub fn iter(&self) -> Iter<'_, T, ID> {
+ // INVARIANT: If the list is empty, both pointers are null. Otherwise, both pointers point
+ // at the first element of the same list.
+ Iter {
+ current: self.first,
+ stop: self.first,
+ _ty: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Default for List<T, ID> {
+ fn default() -> Self {
+ List::new()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Drop for List<T, ID> {
+ fn drop(&mut self) {
+ while let Some(item) = self.pop_front() {
+ drop(item);
+ }
+ }
+}
+
+/// An iterator over a [`List`].
+///
+/// # Invariants
+///
+/// * There must be a [`List`] that is immutably borrowed for the duration of `'a`.
+/// * The `current` pointer is null or points at a value in that [`List`].
+/// * The `stop` pointer is equal to the `first` field of that [`List`].
+#[derive(Clone)]
+pub struct Iter<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ stop: *mut ListLinksFields,
+ _ty: PhantomData<&'a ListArc<T, ID>>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Iterator for Iter<'a, T, ID> {
+ type Item = ArcBorrow<'a, T>;
+
+ fn next(&mut self) -> Option<ArcBorrow<'a, T>> {
+ if self.current.is_null() {
+ return None;
+ }
+
+ let current = self.current;
+
+ // SAFETY: We just checked that `current` is not null, so it is in a list, and hence not
+ // dangling. There's no race because the iterator holds an immutable borrow to the list.
+ let next = unsafe { (*current).next };
+ // INVARIANT: If `current` was the last element of the list, then this updates it to null.
+ // Otherwise, we update it to the next element.
+ self.current = if next != self.stop {
+ next
+ } else {
+ ptr::null_mut()
+ };
+
+ // SAFETY: The `current` pointer points at a value in the list.
+ let item = unsafe { T::view_value(ListLinks::from_fields(current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the list. However, the `ArcBorrow` is annotated with the iterator's
+ // lifetime, and the list is immutably borrowed for that lifetime.
+ // * Values in a list never have a `UniqueArc` reference.
+ Some(unsafe { ArcBorrow::from_raw(item) })
+ }
+}
+
+/// A cursor into a [`List`].
+///
+/// # Invariants
+///
+/// The `current` pointer points a value in `list`.
+pub struct Cursor<'a, T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ current: *mut ListLinksFields,
+ list: &'a mut List<T, ID>,
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> Cursor<'a, T, ID> {
+ /// Access the current element of this cursor.
+ pub fn current(&self) -> ArcBorrow<'_, T> {
+ // SAFETY: The `current` pointer points a value in the list.
+ let me = unsafe { T::view_value(ListLinks::from_fields(self.current)) };
+ // SAFETY:
+ // * All values in a list are stored in an `Arc`.
+ // * The value cannot be removed from the list for the duration of the lifetime annotated
+ // on the returned `ArcBorrow`, because removing it from the list would require mutable
+ // access to the cursor or the list. However, the `ArcBorrow` holds an immutable borrow
+ // on the cursor, which in turn holds a mutable borrow on the list, so any such
+ // mutable access requires first releasing the immutable borrow on the cursor.
+ // * Values in a list never have a `UniqueArc` reference, because the list has a `ListArc`
+ // reference, and `UniqueArc` references must be unique.
+ unsafe { ArcBorrow::from_raw(me) }
+ }
+
+ /// Move the cursor to the next element.
+ pub fn next(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let next = unsafe { (*self.current).next };
+
+ if next == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `next` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: next,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Move the cursor to the previous element.
+ pub fn prev(self) -> Option<Cursor<'a, T, ID>> {
+ // SAFETY: The `current` field is always in a list.
+ let prev = unsafe { (*self.current).prev };
+
+ if self.current == self.list.first {
+ None
+ } else {
+ // INVARIANT: Since `self.current` is in the `list`, its `prev` pointer is also in the
+ // `list`.
+ Some(Cursor {
+ current: prev,
+ list: self.list,
+ })
+ }
+ }
+
+ /// Remove the current element from the list.
+ pub fn remove(self) -> ListArc<T, ID> {
+ // SAFETY: The `current` pointer always points at a member of the list.
+ unsafe { self.list.remove_internal(self.current) }
+ }
+}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for Iter<'a, T, ID> {}
+
+impl<'a, T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for &'a List<T, ID> {
+ type IntoIter = Iter<'a, T, ID>;
+ type Item = ArcBorrow<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T, ID> {
+ self.iter()
+ }
+}
+
+/// An owning iterator into a [`List`].
+pub struct IntoIter<T: ?Sized + ListItem<ID>, const ID: u64 = 0> {
+ list: List<T, ID>,
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> Iterator for IntoIter<T, ID> {
+ type Item = ListArc<T, ID>;
+
+ fn next(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_front()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> FusedIterator for IntoIter<T, ID> {}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> DoubleEndedIterator for IntoIter<T, ID> {
+ fn next_back(&mut self) -> Option<ListArc<T, ID>> {
+ self.list.pop_back()
+ }
+}
+
+impl<T: ?Sized + ListItem<ID>, const ID: u64> IntoIterator for List<T, ID> {
+ type IntoIter = IntoIter<T, ID>;
+ type Item = ListArc<T, ID>;
+
+ fn into_iter(self) -> IntoIter<T, ID> {
+ IntoIter { list: self }
+ }
+}
diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
new file mode 100644
index 000000000000..d801b9dc6291
--- /dev/null
+++ b/rust/kernel/list/arc.rs
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A wrapper around `Arc` for linked lists.
+
+use crate::alloc::{AllocError, Flags};
+use crate::prelude::*;
+use crate::sync::{Arc, ArcBorrow, UniqueArc};
+use core::marker::{PhantomPinned, Unsize};
+use core::ops::Deref;
+use core::pin::Pin;
+use core::sync::atomic::{AtomicBool, Ordering};
+
+/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for
+/// this id.
+///
+/// Types that implement this trait should include some kind of logic for keeping track of whether
+/// a [`ListArc`] exists or not. We refer to this logic as "the tracking inside `T`".
+///
+/// We allow the case where the tracking inside `T` thinks that a [`ListArc`] exists, but actually,
+/// there isn't a [`ListArc`]. However, we do not allow the opposite situation where a [`ListArc`]
+/// exists, but the tracking thinks it doesn't. This is because the former can at most result in us
+/// failing to create a [`ListArc`] when the operation could succeed, whereas the latter can result
+/// in the creation of two [`ListArc`] references. Only the latter situation can lead to memory
+/// safety issues.
+///
+/// A consequence of the above is that you may implement the tracking inside `T` by not actually
+/// keeping track of anything. To do this, you always claim that a [`ListArc`] exists, even if
+/// there isn't one. This implementation is allowed by the above rule, but it means that
+/// [`ListArc`] references can only be created if you have ownership of *all* references to the
+/// refcounted object, as you otherwise have no way of knowing whether a [`ListArc`] exists.
+pub trait ListArcSafe<const ID: u64 = 0> {
+ /// Informs the tracking inside this type that it now has a [`ListArc`] reference.
+ ///
+ /// This method may be called even if the tracking inside this type thinks that a `ListArc`
+ /// reference exists. (But only if that's not actually the case.)
+ ///
+ /// # Safety
+ ///
+ /// Must not be called if a [`ListArc`] already exist for this value.
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>);
+
+ /// Informs the tracking inside this type that there is no [`ListArc`] reference anymore.
+ ///
+ /// # Safety
+ ///
+ /// Must only be called if there is no [`ListArc`] reference, but the tracking thinks there is.
+ unsafe fn on_drop_list_arc(&self);
+}
+
+/// Declares that this type is able to safely attempt to create `ListArc`s at any time.
+///
+/// # Safety
+///
+/// The guarantees of `try_new_list_arc` must be upheld.
+pub unsafe trait TryNewListArc<const ID: u64 = 0>: ListArcSafe<ID> {
+ /// Attempts to convert an `Arc<Self>` into an `ListArc<Self>`. Returns `true` if the
+ /// conversion was successful.
+ ///
+ /// This method should not be called directly. Use [`ListArc::try_from_arc`] instead.
+ ///
+ /// # Guarantees
+ ///
+ /// If this call returns `true`, then there is no [`ListArc`] pointing to this value.
+ /// Additionally, this call will have transitioned the tracking inside `Self` from not thinking
+ /// that a [`ListArc`] exists, to thinking that a [`ListArc`] exists.
+ fn try_new_list_arc(&self) -> bool;
+}
+
+/// Declares that this type supports [`ListArc`].
+///
+/// This macro supports a few different strategies for implementing the tracking inside the type:
+///
+/// * The `untracked` strategy does not actually keep track of whether a [`ListArc`] exists. When
+/// using this strategy, the only way to create a [`ListArc`] is using a [`UniqueArc`].
+/// * The `tracked_by` strategy defers the tracking to a field of the struct. The user much specify
+/// which field to defer the tracking to. The field must implement [`ListArcSafe`]. If the field
+/// implements [`TryNewListArc`], then the type will also implement [`TryNewListArc`].
+///
+/// The `tracked_by` strategy is usually used by deferring to a field of type
+/// [`AtomicTracker`]. However, it is also possible to defer the tracking to another struct
+/// using also using this macro.
+#[macro_export]
+macro_rules! impl_list_arc_safe {
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { untracked; } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {}
+ unsafe fn on_drop_list_arc(&self) {}
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty {
+ tracked_by $field:ident : $fty:ty;
+ } $($rest:tt)*) => {
+ impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t {
+ unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {
+ $crate::assert_pinned!($t, $field, $fty, inline);
+
+ // SAFETY: This field is structurally pinned as per the above assertion.
+ let field = unsafe {
+ ::core::pin::Pin::map_unchecked_mut(self, |me| &mut me.$field)
+ };
+ // SAFETY: The caller promises that there is no `ListArc`.
+ unsafe {
+ <$fty as $crate::list::ListArcSafe<$num>>::on_create_list_arc_from_unique(field)
+ };
+ }
+ unsafe fn on_drop_list_arc(&self) {
+ // SAFETY: The caller promises that there is no `ListArc` reference, and also
+ // promises that the tracking thinks there is a `ListArc` reference.
+ unsafe { <$fty as $crate::list::ListArcSafe<$num>>::on_drop_list_arc(&self.$field) };
+ }
+ }
+ unsafe impl$(<$($generics)*>)? $crate::list::TryNewListArc<$num> for $t
+ where
+ $fty: TryNewListArc<$num>,
+ {
+ fn try_new_list_arc(&self) -> bool {
+ <$fty as $crate::list::TryNewListArc<$num>>::try_new_list_arc(&self.$field)
+ }
+ }
+ $crate::list::impl_list_arc_safe! { $($rest)* }
+ };
+
+ () => {};
+}
+pub use impl_list_arc_safe;
+
+/// A wrapper around [`Arc`] that's guaranteed unique for the given id.
+///
+/// The `ListArc` type can be thought of as a special reference to a refcounted object that owns the
+/// permission to manipulate the `next`/`prev` pointers stored in the refcounted object. By ensuring
+/// that each object has only one `ListArc` reference, the owner of that reference is assured
+/// exclusive access to the `next`/`prev` pointers. When a `ListArc` is inserted into a [`List`],
+/// the [`List`] takes ownership of the `ListArc` reference.
+///
+/// There are various strategies to ensuring that a value has only one `ListArc` reference. The
+/// simplest is to convert a [`UniqueArc`] into a `ListArc`. However, the refcounted object could
+/// also keep track of whether a `ListArc` exists using a boolean, which could allow for the
+/// creation of new `ListArc` references from an [`Arc`] reference. Whatever strategy is used, the
+/// relevant tracking is referred to as "the tracking inside `T`", and the [`ListArcSafe`] trait
+/// (and its subtraits) are used to update the tracking when a `ListArc` is created or destroyed.
+///
+/// Note that we allow the case where the tracking inside `T` thinks that a `ListArc` exists, but
+/// actually, there isn't a `ListArc`. However, we do not allow the opposite situation where a
+/// `ListArc` exists, but the tracking thinks it doesn't. This is because the former can at most
+/// result in us failing to create a `ListArc` when the operation could succeed, whereas the latter
+/// can result in the creation of two `ListArc` references.
+///
+/// While this `ListArc` is unique for the given id, there still might exist normal `Arc`
+/// references to the object.
+///
+/// # Invariants
+///
+/// * Each reference counted object has at most one `ListArc` for each value of `ID`.
+/// * The tracking inside `T` is aware that a `ListArc` reference exists.
+///
+/// [`List`]: crate::list::List
+#[repr(transparent)]
+pub struct ListArc<T, const ID: u64 = 0>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ arc: Arc<T>,
+}
+
+impl<T: ListArcSafe<ID>, const ID: u64> ListArc<T, ID> {
+ /// Constructs a new reference counted instance of `T`.
+ #[inline]
+ pub fn new(contents: T, flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self::from(UniqueArc::new(contents, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// If `T: !Unpin` it will not be able to move afterwards.
+ // We don't implement `InPlaceInit` because `ListArc` is implicitly pinned. This is similar to
+ // what we do for `Arc`.
+ #[inline]
+ pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_pin_init(init, flags)?))
+ }
+
+ /// Use the given initializer to in-place initialize a `T`.
+ ///
+ /// This is equivalent to [`ListArc<T>::pin_init`], since a [`ListArc`] is always pinned.
+ #[inline]
+ pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> Result<Self, E>
+ where
+ E: From<AllocError>,
+ {
+ Ok(Self::from(UniqueArc::try_init(init, flags)?))
+ }
+}
+
+impl<T, const ID: u64> From<UniqueArc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(unique: UniqueArc<T>) -> Self {
+ Self::from(Pin::from(unique))
+ }
+}
+
+impl<T, const ID: u64> From<Pin<UniqueArc<T>>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Convert a pinned [`UniqueArc`] into a [`ListArc`].
+ #[inline]
+ fn from(mut unique: Pin<UniqueArc<T>>) -> Self {
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { T::on_create_list_arc_from_unique(unique.as_mut()) };
+ let arc = Arc::from(unique);
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`,
+ // so we can create a `ListArc`.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+}
+
+impl<T, const ID: u64> ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ /// Creates two `ListArc`s from a [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_unique<const ID2: u64>(unique: UniqueArc<T>) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ Self::pair_from_pin_unique(Pin::from(unique))
+ }
+
+ /// Creates two `ListArc`s from a pinned [`UniqueArc`].
+ ///
+ /// The two ids must be different.
+ #[inline]
+ pub fn pair_from_pin_unique<const ID2: u64>(
+ mut unique: Pin<UniqueArc<T>>,
+ ) -> (Self, ListArc<T, ID2>)
+ where
+ T: ListArcSafe<ID2>,
+ {
+ build_assert!(ID != ID2);
+
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID>>::on_create_list_arc_from_unique(unique.as_mut()) };
+ // SAFETY: We have a `UniqueArc`, so there is no `ListArc`.
+ unsafe { <T as ListArcSafe<ID2>>::on_create_list_arc_from_unique(unique.as_mut()) };
+
+ let arc1 = Arc::from(unique);
+ let arc2 = Arc::clone(&arc1);
+
+ // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`
+ // for both IDs (which are different), so we can create two `ListArc`s.
+ unsafe {
+ (
+ Self::transmute_from_arc(arc1),
+ ListArc::transmute_from_arc(arc2),
+ )
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc(arc: Arc<T>) -> Result<Self, Arc<T>>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Ok(unsafe { Self::transmute_from_arc(arc) })
+ } else {
+ Err(arc)
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// This fails if this value already has a `ListArc`.
+ pub fn try_from_arc_borrow(arc: ArcBorrow<'_, T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ if arc.try_new_list_arc() {
+ // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think
+ // that a `ListArc` exists. This lets us create a `ListArc`.
+ Some(unsafe { Self::transmute_from_arc(Arc::from(arc)) })
+ } else {
+ None
+ }
+ }
+
+ /// Try to create a new `ListArc`.
+ ///
+ /// If it's not possible to create a new `ListArc`, then the `Arc` is dropped. This will never
+ /// run the destructor of the value.
+ pub fn try_from_arc_or_drop(arc: Arc<T>) -> Option<Self>
+ where
+ T: TryNewListArc<ID>,
+ {
+ match Self::try_from_arc(arc) {
+ Ok(list_arc) => Some(list_arc),
+ Err(arc) => Arc::into_unique_or_drop(arc).map(Self::from),
+ }
+ }
+
+ /// Transmutes an [`Arc`] into a `ListArc` without updating the tracking inside `T`.
+ ///
+ /// # Safety
+ ///
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ unsafe fn transmute_from_arc(arc: Arc<T>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants on `ListArc` are satisfied.
+ Self { arc }
+ }
+
+ /// Transmutes a `ListArc` into an [`Arc`] without updating the tracking inside `T`.
+ ///
+ /// After this call, the tracking inside `T` will still think that there is a `ListArc`
+ /// reference.
+ #[inline]
+ fn transmute_to_arc(self) -> Arc<T> {
+ // Use a transmute to skip destructor.
+ //
+ // SAFETY: ListArc is repr(transparent).
+ unsafe { core::mem::transmute(self) }
+ }
+
+ /// Convert ownership of this `ListArc` into a raw pointer.
+ ///
+ /// The returned pointer is indistinguishable from pointers returned by [`Arc::into_raw`]. The
+ /// tracking inside `T` will still think that a `ListArc` exists after this call.
+ #[inline]
+ pub fn into_raw(self) -> *const T {
+ Arc::into_raw(Self::transmute_to_arc(self))
+ }
+
+ /// Take ownership of the `ListArc` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must satisfy the safety requirements of [`Arc::from_raw`].
+ /// * The value must not already have a `ListArc` reference.
+ /// * The tracking inside `T` must think that there is a `ListArc` reference.
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The pointer satisfies the safety requirements for `Arc::from_raw`.
+ let arc = unsafe { Arc::from_raw(ptr) };
+ // SAFETY: The value doesn't already have a `ListArc` reference, but the tracking thinks it
+ // does.
+ unsafe { Self::transmute_from_arc(arc) }
+ }
+
+ /// Converts the `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn into_arc(self) -> Arc<T> {
+ let arc = Self::transmute_to_arc(self);
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is.
+ unsafe { T::on_drop_list_arc(&arc) };
+ arc
+ }
+
+ /// Clone a `ListArc` into an [`Arc`].
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ self.arc.clone()
+ }
+
+ /// Returns a reference to an [`Arc`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`&Arc`] (e.g., in a method
+ /// receiver), but we have a [`ListArc`] instead.
+ ///
+ /// [`&Arc`]: Arc
+ #[inline]
+ pub fn as_arc(&self) -> &Arc<T> {
+ &self.arc
+ }
+
+ /// Returns an [`ArcBorrow`] from the given [`ListArc`].
+ ///
+ /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method
+ /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> {
+ self.arc.as_arc_borrow()
+ }
+
+ /// Compare whether two [`ListArc`] pointers reference the same underlying object.
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ Arc::ptr_eq(&this.arc, &other.arc)
+ }
+}
+
+impl<T, const ID: u64> Deref for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.arc.deref()
+ }
+}
+
+impl<T, const ID: u64> Drop for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is by the type
+ // invariants on `Self`.
+ unsafe { T::on_drop_list_arc(&self.arc) };
+ }
+}
+
+impl<T, const ID: u64> AsRef<Arc<T>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + ?Sized,
+{
+ #[inline]
+ fn as_ref(&self) -> &Arc<T> {
+ self.as_arc()
+ }
+}
+
+// This is to allow [`ListArc`] (and variants) to be used as the type of `self`.
+impl<T, const ID: u64> core::ops::Receiver for ListArc<T, ID> where T: ListArcSafe<ID> + ?Sized {}
+
+// This is to allow coercion from `ListArc<T>` to `ListArc<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+impl<T, U, const ID: u64> core::ops::CoerceUnsized<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+// This is to allow `ListArc<U>` to be dispatched on when `ListArc<T>` can be coerced into
+// `ListArc<U>`.
+impl<T, U, const ID: u64> core::ops::DispatchFromDyn<ListArc<U, ID>> for ListArc<T, ID>
+where
+ T: ListArcSafe<ID> + Unsize<U> + ?Sized,
+ U: ListArcSafe<ID> + ?Sized,
+{
+}
+
+/// A utility for tracking whether a [`ListArc`] exists using an atomic.
+///
+/// # Invariant
+///
+/// If the boolean is `false`, then there is no [`ListArc`] for this value.
+#[repr(transparent)]
+pub struct AtomicTracker<const ID: u64 = 0> {
+ inner: AtomicBool,
+ // This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`.
+ _pin: PhantomPinned,
+}
+
+impl<const ID: u64> AtomicTracker<ID> {
+ /// Creates a new initializer for this type.
+ pub fn new() -> impl PinInit<Self> {
+ // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
+ // not be constructed in an `Arc` that already has a `ListArc`.
+ Self {
+ inner: AtomicBool::new(false),
+ _pin: PhantomPinned,
+ }
+ }
+
+ fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool {
+ // SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable
+ // reference to it even if we only have a pinned reference to `self`.
+ unsafe { &mut Pin::into_inner_unchecked(self).inner }
+ }
+}
+
+impl<const ID: u64> ListArcSafe<ID> for AtomicTracker<ID> {
+ unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>) {
+ // INVARIANT: We just created a ListArc, so the boolean should be true.
+ *self.project_inner().get_mut() = true;
+ }
+
+ unsafe fn on_drop_list_arc(&self) {
+ // INVARIANT: We just dropped a ListArc, so the boolean should be false.
+ self.inner.store(false, Ordering::Release);
+ }
+}
+
+// SAFETY: If this method returns `true`, then by the type invariant there is no `ListArc` before
+// this call, so it is okay to create a new `ListArc`.
+//
+// The acquire ordering will synchronize with the release store from the destruction of any
+// previous `ListArc`, so if there was a previous `ListArc`, then the destruction of the previous
+// `ListArc` happens-before the creation of the new `ListArc`.
+unsafe impl<const ID: u64> TryNewListArc<ID> for AtomicTracker<ID> {
+ fn try_new_list_arc(&self) -> bool {
+ // INVARIANT: If this method returns true, then the boolean used to be false, and is no
+ // longer false, so it is okay for the caller to create a new [`ListArc`].
+ self.inner
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}
diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs
new file mode 100644
index 000000000000..2330f673427a
--- /dev/null
+++ b/rust/kernel/list/arc_field.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! A field that is exclusively owned by a [`ListArc`].
+//!
+//! This can be used to have reference counted struct where one of the reference counted pointers
+//! has exclusive access to a field of the struct.
+//!
+//! [`ListArc`]: crate::list::ListArc
+
+use core::cell::UnsafeCell;
+
+/// A field owned by a specific [`ListArc`].
+///
+/// [`ListArc`]: crate::list::ListArc
+pub struct ListArcField<T, const ID: u64 = 0> {
+ value: UnsafeCell<T>,
+}
+
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Send for ListArcField<T, ID> {}
+// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe.
+unsafe impl<T: Send + Sync, const ID: u64> Sync for ListArcField<T, ID> {}
+
+impl<T, const ID: u64> ListArcField<T, ID> {
+ /// Creates a new `ListArcField`.
+ pub fn new(value: T) -> Self {
+ Self {
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Access the value when we have exclusive access to the `ListArcField`.
+ ///
+ /// This allows access to the field using an `UniqueArc` instead of a `ListArc`.
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Unsafely assert that you have shared access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have shared access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ pub unsafe fn assert_ref(&self) -> &T {
+ // SAFETY: The caller has shared access to the `ListArc`, so they also have shared access
+ // to this field.
+ unsafe { &*self.value.get() }
+ }
+
+ /// Unsafely assert that you have mutable access to the `ListArc` for this field.
+ ///
+ /// # Safety
+ ///
+ /// The caller must have mutable access to the `ListArc<ID>` containing the struct with this
+ /// field for the duration of the returned reference.
+ #[allow(clippy::mut_from_ref)]
+ pub unsafe fn assert_mut(&self) -> &mut T {
+ // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive
+ // access to this field.
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+/// Defines getters for a [`ListArcField`].
+#[macro_export]
+macro_rules! define_list_arc_field_getter {
+ ($pub:vis fn $name:ident(&self $(<$id:tt>)?) -> &$typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a $crate::list::ListArc<Self $(, $id)?>) -> &'a $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a shared reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_ref(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ ($pub:vis fn $name:ident(&mut self $(<$id:tt>)?) -> &mut $typ:ty { $field:ident }
+ $($rest:tt)*
+ ) => {
+ $pub fn $name<'a>(self: &'a mut $crate::list::ListArc<Self $(, $id)?>) -> &'a mut $typ {
+ let field = &(&**self).$field;
+ // SAFETY: We have a mutable reference to the `ListArc`.
+ unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_mut(field) }
+ }
+
+ $crate::list::define_list_arc_field_getter!($($rest)*);
+ };
+
+ () => {};
+}
+pub use define_list_arc_field_getter;
diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs
new file mode 100644
index 000000000000..a0438537cee1
--- /dev/null
+++ b/rust/kernel/list/impl_list_item_mod.rs
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Helpers for implementing list traits safely.
+
+use crate::list::ListLinks;
+
+/// Declares that this type has a `ListLinks<ID>` field at a fixed offset.
+///
+/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented
+/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement
+/// this trait.
+///
+/// # Safety
+///
+/// All values of this type must have a `ListLinks<ID>` field at the given offset.
+///
+/// The behavior of `raw_get_list_links` must not be changed.
+pub unsafe trait HasListLinks<const ID: u64 = 0> {
+ /// The offset of the `ListLinks` field.
+ const OFFSET: usize;
+
+ /// Returns a pointer to the [`ListLinks<T, ID>`] field.
+ ///
+ /// # Safety
+ ///
+ /// The provided pointer must point at a valid struct of type `Self`.
+ ///
+ /// [`ListLinks<T, ID>`]: ListLinks
+ // We don't really need this method, but it's necessary for the implementation of
+ // `impl_has_list_links!` to be correct.
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks<ID> {
+ // SAFETY: The caller promises that the pointer is valid. The implementer promises that the
+ // `OFFSET` constant is correct.
+ unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks<ID> }
+ }
+}
+
+/// Implements the [`HasListLinks`] trait for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links {
+ ($(impl$(<$($implarg:ident),*>)?
+ HasListLinks$(<$id:tt>)?
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self$(.$field:ident)* }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ //
+ // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is
+ // equivalent to the pointer offset operation in the trait definition.
+ unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling. We know that this
+ // expression doesn't follow any pointers, as the `offset_of!` invocation above
+ // would otherwise not compile.
+ unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) }
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links;
+
+/// Declares that the `ListLinks<ID>` field in this struct is inside a `ListLinksSelfPtr<T, ID>`.
+///
+/// # Safety
+///
+/// The `ListLinks<ID>` field of this struct at the offset `HasListLinks<ID>::OFFSET` must be
+/// inside a `ListLinksSelfPtr<T, ID>`.
+pub unsafe trait HasSelfPtr<T: ?Sized, const ID: u64 = 0>
+where
+ Self: HasListLinks<ID>,
+{
+}
+
+/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type.
+#[macro_export]
+macro_rules! impl_has_list_links_self_ptr {
+ ($(impl$({$($implarg:tt)*})?
+ HasSelfPtr<$item_type:ty $(, $id:tt)?>
+ for $self:ident $(<$($selfarg:ty),*>)?
+ { self.$field:ident }
+ )*) => {$(
+ // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the
+ // right type.
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for
+ $self $(<$($selfarg),*>)?
+ {}
+
+ unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for
+ $self $(<$($selfarg),*>)?
+ {
+ const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize;
+
+ #[inline]
+ unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? {
+ // SAFETY: The caller promises that the pointer is not dangling.
+ let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> =
+ unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) };
+ ptr.cast()
+ }
+ }
+ )*};
+}
+pub use impl_has_list_links_self_ptr;
+
+/// Implements the [`ListItem`] trait for the given type.
+///
+/// Requires that the type implements [`HasListLinks`]. Use the [`impl_has_list_links!`] macro to
+/// implement that trait.
+///
+/// [`ListItem`]: crate::list::ListItem
+#[macro_export]
+macro_rules! impl_list_item {
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinks;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // is implemented in terms of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller guarantees that `me` points at a valid value of type `Self`.
+ unsafe {
+ <Self as $crate::list::HasListLinks<$num>>::raw_get_list_links(me.cast_mut())
+ }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) }
+ }
+
+ // GUARANTEES:
+ // * `me` originates from the most recent call to `prepare_to_insert`, which just added
+ // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts
+ // `offset` from `me` so it returns the pointer originally passed to
+ // `prepare_to_insert`.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let offset = <Self as $crate::list::HasListLinks<$num>>::OFFSET;
+ // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it
+ // points at the field at offset `offset` in a value of type `Self`. Thus,
+ // subtracting `offset` from `me` is still in-bounds of the allocation.
+ unsafe { (me as *const u8).sub(offset) as *const Self }
+ }
+ }
+ )*};
+
+ (
+ $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty {
+ using ListLinksSelfPtr;
+ })*
+ ) => {$(
+ // SAFETY: See GUARANTEES comment on each method.
+ unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t {
+ // GUARANTEES:
+ // This implementation of `ListItem` will not give out exclusive access to the same
+ // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove`
+ // must alternate and exclusive access is given up when `post_remove` is called.
+ //
+ // Other invocations of `impl_list_item!` also cannot give out exclusive access to the
+ // same `ListLinks` because you can only implement `ListItem` once for each value of
+ // `ID`, and the `ListLinks` fields only work with the specified `ID`.
+ unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ let links_field = unsafe { <Self as $crate::list::ListItem<$num>>::view_links(me) };
+
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // Goes via the offset as the field is private.
+ //
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const $crate::types::Opaque<*const Self>;
+ let cell_inner = $crate::types::Opaque::raw_get(self_ptr);
+
+ // SAFETY: This value is not accessed in any other places than `prepare_to_insert`,
+ // `post_remove`, or `view_value`. By the safety requirements of those methods,
+ // none of these three methods may be called in parallel with this call to
+ // `prepare_to_insert`, so this write will not race with any other access to the
+ // value.
+ unsafe { ::core::ptr::write(cell_inner, me) };
+
+ links_field
+ }
+
+ // GUARANTEES:
+ // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert`
+ // returns the return value of `view_links`.
+ // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when
+ // this value is not in a list.
+ unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> {
+ // SAFETY: The caller promises that `me` points at a valid value of type `Self`.
+ unsafe { <Self as HasListLinks<$num>>::raw_get_list_links(me.cast_mut()) }
+ }
+
+ // This function is also used as the implementation of `post_remove`, so the caller
+ // may choose to satisfy the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ //
+ // GUARANTEES: (always)
+ // * This returns the same pointer as the one passed to the most recent call to
+ // `prepare_to_insert` since that call wrote that pointer to this location. The value
+ // is only modified in `prepare_to_insert`, so it has not been modified since the
+ // most recent call.
+ //
+ // GUARANTEES: (only when using the `view_value` safety requirements)
+ // * The pointer remains valid until the next call to `post_remove` because the caller
+ // of the most recent call to `prepare_to_insert` promised to retain ownership of the
+ // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot
+ // be destroyed while a `ListArc` reference exists.
+ unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ let spoff = $crate::list::ListLinksSelfPtr::<Self, $num>::LIST_LINKS_SELF_PTR_OFFSET;
+ // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so
+ // the pointer stays in bounds of the allocation.
+ let self_ptr = unsafe { (links_field as *const u8).add(spoff) }
+ as *const ::core::cell::UnsafeCell<*const Self>;
+ let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr);
+ // SAFETY: This is not a data race, because the only function that writes to this
+ // value is `prepare_to_insert`, but by the safety requirements the
+ // `prepare_to_insert` method may not be called in parallel with `view_value` or
+ // `post_remove`.
+ unsafe { ::core::ptr::read(cell_inner) }
+ }
+
+ // GUARANTEES:
+ // The first guarantee of `view_value` is exactly what `post_remove` guarantees.
+ unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self {
+ // SAFETY: This specific implementation of `view_value` allows the caller to
+ // promise the safety requirements of `post_remove` instead of the safety
+ // requirements for `view_value`.
+ unsafe { <Self as $crate::list::ListItem<$num>>::view_value(me) }
+ }
+ }
+ )*};
+}
+pub use impl_list_item;
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index b37a0b3180fb..4571daec0961 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -37,6 +37,6 @@ pub use super::error::{code::*, Error, Result};
pub use super::{str::CStr, ThisModule};
-pub use super::init::{InPlaceInit, Init, PinInit};
+pub use super::init::{InPlaceInit, InPlaceWrite, Init, PinInit};
pub use super::current;
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index a78aa3514a0a..508b0221256c 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h)
//!
-//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+//! Reference: <https://docs.kernel.org/core-api/printk-basics.html>
use core::{
ffi::{c_char, c_void},
@@ -197,7 +197,7 @@ macro_rules! print_macro (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`pr_emerg`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_emerg
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -221,7 +221,7 @@ macro_rules! pr_emerg (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`pr_alert`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_alert
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -245,7 +245,7 @@ macro_rules! pr_alert (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`pr_crit`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_crit
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -269,7 +269,7 @@ macro_rules! pr_crit (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`pr_err`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_err
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -293,7 +293,7 @@ macro_rules! pr_err (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`pr_warn`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_warn
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -317,7 +317,7 @@ macro_rules! pr_warn (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`pr_notice`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_notice
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -341,7 +341,7 @@ macro_rules! pr_notice (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`pr_info`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_info
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -367,7 +367,7 @@ macro_rules! pr_info (
/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
/// `alloc::format!` for information about the formatting syntax.
///
-/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug
+/// [`pr_debug`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_debug
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
@@ -395,7 +395,7 @@ macro_rules! pr_debug (
/// `alloc::format!` for information about the formatting syntax.
///
/// [`pr_info!`]: crate::pr_info!
-/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`pr_cont`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_cont
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..25eb36fd1cdc
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,1278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](srctree/include/linux/rbtree.h)
+//!
+//! Reference: <https://docs.kernel.org/core-api/rbtree.html>
+
+use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*};
+use alloc::boxed::Box;
+use core::{
+ cmp::{Ord, Ordering},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, from_mut, NonNull},
+};
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// assert_eq!(tree.get(&10).unwrap(), &100);
+/// assert_eq!(tree.get(&20).unwrap(), &200);
+/// assert_eq!(tree.get(&30).unwrap(), &300);
+/// }
+///
+/// // Iterate over the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_create_and_insert(10, 1000, flags::GFP_KERNEL)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode}, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTreeNode::new(10, 100, flags::GFP_KERNEL)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNodeReservation}};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove(&30).unwrap();
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Create a preallocated reservation that we can re-use later.
+/// let reservation = RBTreeNodeReservation::new(flags::GFP_KERNEL)?;
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&15, &150));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Send condition as would be used for a struct with K and V fields.
+unsafe impl<K: Send, V: Send> Send for RBTree<K, V> {}
+
+// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its
+// fields, so we use the same Sync condition as would be used for a struct with K and V fields.
+unsafe impl<K: Sync, V: Sync> Sync for RBTree<K, V> {}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(&self.root) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut {
+ _tree: PhantomData,
+ // INVARIANT:
+ // - `self.root` is a valid pointer to a tree root.
+ // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid.
+ iter_raw: IterRaw {
+ // SAFETY: by the invariants, all pointers are valid.
+ next: unsafe { bindings::rb_first(from_mut(&mut self.root)) },
+ _phantom: PhantomData,
+ },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the smallest key.
+ pub fn cursor_front(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_first(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+
+ /// Returns a cursor over the tree nodes, starting with the largest key.
+ pub fn cursor_back(&mut self) -> Option<Cursor<'_, K, V>> {
+ let root = addr_of_mut!(self.root);
+ // SAFETY: `self.root` is always a valid root node
+ let current = unsafe { bindings::rb_last(root) };
+ NonNull::new(current).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> RBTree<K, V>
+where
+ K: Ord,
+{
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_create_and_insert(
+ &mut self,
+ key: K,
+ value: V,
+ flags: Flags,
+ ) -> Result<Option<RBTreeNode<K, V>>> {
+ Ok(self.insert(RBTreeNode::new(key, value, flags)?))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>> {
+ match self.raw_entry(&node.node.key) {
+ RawEntry::Occupied(entry) => Some(entry.replace(node)),
+ RawEntry::Vacant(entry) => {
+ entry.insert(node);
+ None
+ }
+ }
+ }
+
+ fn raw_entry(&mut self, key: &K) -> RawEntry<'_, K, V> {
+ let raw_self: *mut RBTree<K, V> = self;
+ // The returned `RawEntry` is used to call either `rb_link_node` or `rb_replace_node`.
+ // The parameters of `bindings::rb_link_node` are as follows:
+ // - `node`: A pointer to an uninitialized node being inserted.
+ // - `parent`: A pointer to an existing node in the tree. One of its child pointers must be
+ // null, and `node` will become a child of `parent` by replacing that child pointer
+ // with a pointer to `node`.
+ // - `rb_link`: A pointer to either the left-child or right-child field of `parent`. This
+ // specifies which child of `parent` should hold `node` after this call. The
+ // value of `*rb_link` must be null before the call to `rb_link_node`. If the
+ // red/black tree is empty, then it’s also possible for `parent` to be null. In
+ // this case, `rb_link` is a pointer to the `root` field of the red/black tree.
+ //
+ // We will traverse the tree looking for a node that has a null pointer as its child,
+ // representing an empty subtree where we can insert our new node. We need to make sure
+ // that we preserve the ordering of the nodes in the tree. In each iteration of the loop
+ // we store `parent` and `child_field_of_parent`, and the new `node` will go somewhere
+ // in the subtree of `parent` that `child_field_of_parent` points at. Once
+ // we find an empty subtree, we can insert the new node using `rb_link_node`.
+ let mut parent = core::ptr::null_mut();
+ let mut child_field_of_parent: &mut *mut bindings::rb_node =
+ // SAFETY: `raw_self` is a valid pointer to the `RBTree` (created from `self` above).
+ unsafe { &mut (*raw_self).root.rb_node };
+ while !(*child_field_of_parent).is_null() {
+ let curr = *child_field_of_parent;
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let node = unsafe { container_of!(curr, Node<K, V>, links) };
+
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ match key.cmp(unsafe { &(*node).key }) {
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left },
+ // SAFETY: `curr` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right },
+ Ordering::Equal => {
+ return RawEntry::Occupied(OccupiedEntry {
+ rbtree: self,
+ node_links: curr,
+ })
+ }
+ }
+ parent = curr;
+ }
+
+ RawEntry::Vacant(RawVacantEntry {
+ rbtree: raw_self,
+ parent,
+ child_field_of_parent,
+ _phantom: PhantomData,
+ })
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ match self.raw_entry(&key) {
+ RawEntry::Occupied(entry) => Entry::Occupied(entry),
+ RawEntry::Vacant(entry) => Entry::Vacant(VacantEntry { raw: entry, key }),
+ }
+ }
+
+ /// Used for accessing the given node, if it exists.
+ pub fn find_mut(&mut self, key: &K) -> Option<OccupiedEntry<'_, K, V>> {
+ match self.raw_entry(key) {
+ RawEntry::Occupied(entry) => Some(entry),
+ RawEntry::Vacant(_entry) => None,
+ }
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V> {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) };
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Equal => return Some(unsafe { &(*this).value }),
+ }
+ }
+ None
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+ self.find_mut(key).map(|node| node.into_mut())
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>> {
+ self.find_mut(key).map(OccupiedEntry::remove_node)
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ self.find_mut(key).map(OccupiedEntry::remove)
+ }
+
+ /// Returns a cursor over the tree nodes based on the given key.
+ ///
+ /// If the given key exists, the cursor starts there.
+ /// Otherwise it starts with the first larger key in sort order.
+ /// If there is no larger key, it returns [`None`].
+ pub fn cursor_lower_bound(&mut self, key: &K) -> Option<Cursor<'_, K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ let mut best_match: Option<NonNull<Node<K, V>>> = None;
+ while !node.is_null() {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ let this_key = unsafe { &(*this).key };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let left_child = unsafe { (*node).rb_left };
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let right_child = unsafe { (*node).rb_right };
+ match key.cmp(this_key) {
+ Ordering::Equal => {
+ best_match = NonNull::new(this);
+ break;
+ }
+ Ordering::Greater => {
+ node = right_child;
+ }
+ Ordering::Less => {
+ let is_better_match = match best_match {
+ None => true,
+ Some(best) => {
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let best_key = unsafe { &(*best.as_ptr()).key };
+ best_key > this_key
+ }
+ };
+ if is_better_match {
+ best_match = NonNull::new(this);
+ }
+ node = left_child;
+ }
+ };
+ }
+
+ let best = best_match?;
+
+ // SAFETY: `best` is a non-null node so it is valid by the type invariants.
+ let links = unsafe { addr_of_mut!((*best.as_ptr()).links) };
+
+ NonNull::new(links).map(|current| {
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self`.
+ Cursor {
+ current,
+ tree: self,
+ }
+ })
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ // SAFETY: All links fields we create are in a `Node<K, V>`.
+ let this = unsafe { container_of!(next, Node<K, V>, links) };
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { drop(Box::from_raw(this.cast_mut())) };
+ }
+ }
+}
+
+/// A bidirectional cursor over the tree nodes, sorted by key.
+///
+/// # Examples
+///
+/// In the following example, we obtain a cursor to the first element in the tree.
+/// The cursor allows us to iterate bidirectionally over key/value pairs in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Move the cursor, updating it to the 2nd element.
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Peek at the next element without impacting the cursor.
+/// let next = cursor.peek_next().unwrap();
+/// assert_eq!(next, (&30, &300));
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Moving past the last element causes the cursor to return [`None`].
+/// cursor = cursor.move_next().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+/// let cursor = cursor.move_next();
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// A cursor can also be obtained at the last element in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// let mut cursor = tree.cursor_back().unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Obtaining a cursor returns [`None`] if the tree is empty.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// let mut tree: RBTree<u16, u16> = RBTree::new();
+/// assert!(tree.cursor_front().is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// [`RBTree::cursor_lower_bound`] can be used to start at an arbitrary node in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert five elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(40, 400, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(50, 500, flags::GFP_KERNEL)?;
+///
+/// // If the provided key exists, a cursor to that key is returned.
+/// let cursor = tree.cursor_lower_bound(&20).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // If the provided key doesn't exist, a cursor to the first larger element in sort order is returned.
+/// let cursor = tree.cursor_lower_bound(&25).unwrap();
+/// let current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // If there is no larger key, [`None`] is returned.
+/// let cursor = tree.cursor_lower_bound(&55);
+/// assert!(cursor.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// The cursor allows mutation of values in the tree.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Retrieve a cursor.
+/// let mut cursor = tree.cursor_front().unwrap();
+///
+/// // Get a mutable reference to the current value.
+/// let (k, v) = cursor.current_mut();
+/// *v = 1000;
+///
+/// // The updated value is reflected in the tree.
+/// let updated = tree.get(&10).unwrap();
+/// assert_eq!(updated, &1000);
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// It also allows node removal. The following examples demonstrate the behavior of removing the current node.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Remove the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+/// cursor = cursor.remove_current().0.unwrap();
+///
+/// // If a node exists after the current element, it is returned.
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Get a cursor to the last element, and remove it.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Since there is no next node, the previous node is returned.
+/// cursor = cursor.remove_current().0.unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&20, &200));
+///
+/// // Removing the last element in the tree returns [`None`].
+/// assert!(cursor.remove_current().0.is_none());
+///
+/// # Ok::<(), Error>(())
+/// ```
+///
+/// Nodes adjacent to the current node can also be removed.
+///
+/// ```
+/// use kernel::{alloc::flags, rbtree::RBTree};
+///
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?;
+/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?;
+///
+/// // Get a cursor to the first element.
+/// let mut cursor = tree.cursor_front().unwrap();
+/// let mut current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_prev` from the first element returns [`None`].
+/// assert!(cursor.remove_prev().is_none());
+///
+/// // Get a cursor to the last element.
+/// cursor = tree.cursor_back().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&30, &300));
+///
+/// // Calling `remove_prev` removes and returns the middle element.
+/// assert_eq!(cursor.remove_prev().unwrap().to_key_value(), (20, 200));
+///
+/// // Calling `remove_next` from the last element returns [`None`].
+/// assert!(cursor.remove_next().is_none());
+///
+/// // Move to the first element
+/// cursor = cursor.move_prev().unwrap();
+/// current = cursor.current();
+/// assert_eq!(current, (&10, &100));
+///
+/// // Calling `remove_next` removes and returns the last element.
+/// assert_eq!(cursor.remove_next().unwrap().to_key_value(), (30, 300));
+///
+/// # Ok::<(), Error>(())
+///
+/// ```
+///
+/// # Invariants
+/// - `current` points to a node that is in the same [`RBTree`] as `tree`.
+pub struct Cursor<'a, K, V> {
+ tree: &'a mut RBTree<K, V>,
+ current: NonNull<bindings::rb_node>,
+}
+
+// SAFETY: The [`Cursor`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The cursor only gives out immutable references to the keys, but since it has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for Cursor<'a, K, V> {}
+
+// SAFETY: The [`Cursor`] gives out immutable references to K and mutable references to V,
+// so it has the same thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Cursor<'a, K, V> {}
+
+impl<'a, K, V> Cursor<'a, K, V> {
+ /// The current node
+ pub fn current(&self) -> (&K, &V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an immutable reference by the function signature.
+ unsafe { Self::to_key_value(self.current) }
+ }
+
+ /// The current node, with a mutable value
+ pub fn current_mut(&mut self) -> (&K, &mut V) {
+ // SAFETY:
+ // - `self.current` is a valid node by the type invariants.
+ // - We have an mutable reference by the function signature.
+ unsafe { Self::to_key_value_mut(self.current) }
+ }
+
+ /// Remove the current node from the tree.
+ ///
+ /// Returns a tuple where the first element is a cursor to the next node, if it exists,
+ /// else the previous node, else [`None`] (if the tree becomes empty). The second element
+ /// is the removed node.
+ pub fn remove_current(self) -> (Option<Self>, RBTreeNode<K, V>) {
+ let prev = self.get_neighbor_raw(Direction::Prev);
+ let next = self.get_neighbor_raw(Direction::Next);
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(self.current.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ let node = RBTreeNode { node };
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) };
+
+ let current = match (prev, next) {
+ (_, Some(next)) => next,
+ (Some(prev), None) => prev,
+ (None, None) => {
+ return (None, node);
+ }
+ };
+
+ (
+ // INVARIANT:
+ // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ Some(Self {
+ current,
+ tree: self.tree,
+ }),
+ node,
+ )
+ }
+
+ /// Remove the previous node, returning it if it exists.
+ pub fn remove_prev(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Prev)
+ }
+
+ /// Remove the next node, returning it if it exists.
+ pub fn remove_next(&mut self) -> Option<RBTreeNode<K, V>> {
+ self.remove_neighbor(Direction::Next)
+ }
+
+ fn remove_neighbor(&mut self, direction: Direction) -> Option<RBTreeNode<K, V>> {
+ if let Some(neighbor) = self.get_neighbor_raw(direction) {
+ let neighbor = neighbor.as_ptr();
+ // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ unsafe { bindings::rb_erase(neighbor, addr_of_mut!(self.tree.root)) };
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(neighbor, Node<K, V>, links) }.cast_mut();
+ // SAFETY: `this` is valid by the type invariants as described above.
+ let node = unsafe { Box::from_raw(this) };
+ return Some(RBTreeNode { node });
+ }
+ None
+ }
+
+ /// Move the cursor to the previous node, returning [`None`] if it doesn't exist.
+ pub fn move_prev(self) -> Option<Self> {
+ self.mv(Direction::Prev)
+ }
+
+ /// Move the cursor to the next node, returning [`None`] if it doesn't exist.
+ pub fn move_next(self) -> Option<Self> {
+ self.mv(Direction::Next)
+ }
+
+ fn mv(self, direction: Direction) -> Option<Self> {
+ // INVARIANT:
+ // - `neighbor` is a valid node in the [`RBTree`] pointed to by `self.tree`.
+ self.get_neighbor_raw(direction).map(|neighbor| Self {
+ tree: self.tree,
+ current: neighbor,
+ })
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_prev(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Prev)
+ }
+
+ /// Access the previous node without moving the cursor.
+ pub fn peek_next(&self) -> Option<(&K, &V)> {
+ self.peek(Direction::Next)
+ }
+
+ fn peek(&self, direction: Direction) -> Option<(&K, &V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have an immutable reference to `self`.
+ unsafe { Self::to_key_value(neighbor) }
+ })
+ }
+
+ /// Access the previous node mutably without moving the cursor.
+ pub fn peek_prev_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Prev)
+ }
+
+ /// Access the next node mutably without moving the cursor.
+ pub fn peek_next_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.peek_mut(Direction::Next)
+ }
+
+ fn peek_mut(&mut self, direction: Direction) -> Option<(&K, &mut V)> {
+ self.get_neighbor_raw(direction).map(|neighbor| {
+ // SAFETY:
+ // - `neighbor` is a valid tree node.
+ // - By the function signature, we have a mutable reference to `self`.
+ unsafe { Self::to_key_value_mut(neighbor) }
+ })
+ }
+
+ fn get_neighbor_raw(&self, direction: Direction) -> Option<NonNull<bindings::rb_node>> {
+ // SAFETY: `self.current` is valid by the type invariants.
+ let neighbor = unsafe {
+ match direction {
+ Direction::Prev => bindings::rb_prev(self.current.as_ptr()),
+ Direction::Next => bindings::rb_next(self.current.as_ptr()),
+ }
+ };
+
+ NonNull::new(neighbor)
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees immutable access to `node`.
+ (k, unsafe { &*v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has mutable access to `node` for the duration of 'b.
+ unsafe fn to_key_value_mut<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, &'b mut V) {
+ // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`.
+ let (k, v) = unsafe { Self::to_key_value_raw(node) };
+ // SAFETY: the caller guarantees mutable access to `node`.
+ (k, unsafe { &mut *v })
+ }
+
+ /// SAFETY:
+ /// - `node` must be a valid pointer to a node in an [`RBTree`].
+ /// - The caller has immutable access to the key for the duration of 'b.
+ unsafe fn to_key_value_raw<'b>(node: NonNull<bindings::rb_node>) -> (&'b K, *mut V) {
+ // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
+ // point to the links field of `Node<K, V>` objects.
+ let this = unsafe { container_of!(node.as_ptr(), Node<K, V>, links) }.cast_mut();
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let k = unsafe { &(*this).key };
+ // SAFETY: The passed `node` is the current node or a non-null neighbor,
+ // thus `this` is valid by the type invariants.
+ let v = unsafe { addr_of_mut!((*this).value) };
+ (k, v)
+ }
+}
+
+/// Direction for [`Cursor`] operations.
+enum Direction {
+ /// the node immediately before, in sort order
+ Prev,
+ /// the node immediately after, in sort order
+ Next,
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct Iter<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
+
+// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same
+// thread safety requirements as immutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // SAFETY: Due to `self._tree`, `k` and `v` are valid for the lifetime of `'a`.
+ self.iter_raw.next().map(|(k, v)| unsafe { (&*k, &*v) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct IterMut<'a, K, V> {
+ _tree: PhantomData<&'a mut RBTree<K, V>>,
+ iter_raw: IterRaw<K, V>,
+}
+
+// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
+// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
+unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
+
+// SAFETY: The [`IterMut`] gives out immutable references to K and mutable references to V, so it has the same
+// thread safety requirements as mutable references.
+unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter_raw.next().map(|(k, v)|
+ // SAFETY: Due to `&mut self`, we have exclusive access to `k` and `v`, for the lifetime of `'a`.
+ unsafe { (&*k, &mut *v) })
+ }
+}
+
+/// A raw iterator over the nodes of a [`RBTree`].
+///
+/// # Invariants
+/// - `self.next` is a valid pointer.
+/// - `self.next` points to a node stored inside of a valid `RBTree`.
+struct IterRaw<K, V> {
+ next: *mut bindings::rb_node,
+ _phantom: PhantomData<fn() -> (K, V)>,
+}
+
+impl<K, V> Iterator for IterRaw<K, V> {
+ type Item = (*mut K, *mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ // SAFETY: By the type invariant of `IterRaw`, `self.next` is a valid node in an `RBTree`,
+ // and by the type invariant of `RBTree`, all nodes point to the links field of `Node<K, V>` objects.
+ let cur = unsafe { container_of!(self.next, Node<K, V>, links) }.cast_mut();
+
+ // SAFETY: `self.next` is a valid tree node by the type invariants.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node.
+ Some(unsafe { (addr_of_mut!((*cur).key), addr_of_mut!((*cur).value)) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]).
+pub struct RBTreeNodeReservation<K, V> {
+ node: Box<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn new(flags: Flags) -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: <Box<_> as BoxExt<_>>::new_uninit(flags)?,
+ })
+ }
+}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. Those can always
+// be moved across threads.
+unsafe impl<K, V> Send for RBTreeNodeReservation<K, V> {}
+
+// SAFETY: This doesn't actually contain K or V, and is just a memory allocation.
+unsafe impl<K, V> Sync for RBTreeNodeReservation<K, V> {}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ self.node.write(Node {
+ key,
+ value,
+ links: bindings::rb_node::default(),
+ });
+ // SAFETY: We just wrote to it.
+ let node = unsafe { self.node.assume_init() };
+ RBTreeNode { node }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: Box<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Allocates and initialises a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn new(key: K, value: V, flags: Flags) -> Result<RBTreeNode<K, V>> {
+ Ok(RBTreeNodeReservation::new(flags)?.into_node(key, value))
+ }
+
+ /// Get the key and value from inside the node.
+ pub fn to_key_value(self) -> (K, V) {
+ (self.node.key, self.node.value)
+ }
+}
+
+// SAFETY: If K and V can be sent across threads, then it's also okay to send [`RBTreeNode`] across
+// threads.
+unsafe impl<K: Send, V: Send> Send for RBTreeNode<K, V> {}
+
+// SAFETY: If K and V can be accessed without synchronization, then it's also okay to access
+// [`RBTreeNode`] without synchronization.
+unsafe impl<K: Sync, V: Sync> Sync for RBTreeNode<K, V> {}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// Drop the key and value, but keep the allocation.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ RBTreeNodeReservation {
+ node: Box::drop_contents(self.node),
+ }
+ }
+}
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This enum is constructed from the [`RBTree::entry`].
+///
+/// [`entry`]: fn@RBTree::entry
+pub enum Entry<'a, K, V> {
+ /// This [`RBTree`] does not have a node with this key.
+ Vacant(VacantEntry<'a, K, V>),
+ /// This [`RBTree`] already has a node with this key.
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// Like [`Entry`], except that it doesn't have ownership of the key.
+enum RawEntry<'a, K, V> {
+ Vacant(RawVacantEntry<'a, K, V>),
+ Occupied(OccupiedEntry<'a, K, V>),
+}
+
+/// A view into a vacant entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+pub struct VacantEntry<'a, K, V> {
+ key: K,
+ raw: RawVacantEntry<'a, K, V>,
+}
+
+/// Like [`VacantEntry`], but doesn't hold on to the key.
+///
+/// # Invariants
+/// - `parent` may be null if the new node becomes the root.
+/// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is
+/// null, it is a pointer to the root of the [`RBTree`].
+struct RawVacantEntry<'a, K, V> {
+ rbtree: *mut RBTree<K, V>,
+ /// The node that will become the parent of the new node if we insert one.
+ parent: *mut bindings::rb_node,
+ /// This points to the left-child or right-child field of `parent`, or `root` if `parent` is
+ /// null.
+ child_field_of_parent: *mut *mut bindings::rb_node,
+ _phantom: PhantomData<&'a mut RBTree<K, V>>,
+}
+
+impl<'a, K, V> RawVacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ ///
+ /// The `node` must have a key such that inserting it here does not break the ordering of this
+ /// [`RBTree`].
+ fn insert(self, node: RBTreeNode<K, V>) -> &'a mut V {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `Box::into_raw`.
+ // SAFETY: The type invariants of `RawVacantEntry` are exactly the safety requirements of `rb_link_node`.
+ unsafe { bindings::rb_link_node(node_links, self.parent, self.child_field_of_parent) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, addr_of_mut!((*self.rbtree).root)) };
+
+ // SAFETY: The node is valid until we remove it from the tree.
+ unsafe { &mut (*node).value }
+ }
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Inserts the given node into the [`RBTree`] at this entry.
+ pub fn insert(self, value: V, reservation: RBTreeNodeReservation<K, V>) -> &'a mut V {
+ self.raw.insert(reservation.into_node(self.key, value))
+ }
+}
+
+/// A view into an occupied entry in a [`RBTree`]. It is part of the [`Entry`] enum.
+///
+/// # Invariants
+/// - `node_links` is a valid, non-null pointer to a tree node in `self.rbtree`
+pub struct OccupiedEntry<'a, K, V> {
+ rbtree: &'a mut RBTree<K, V>,
+ /// The node that this entry corresponds to.
+ node_links: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the value in the entry.
+ pub fn get(&self) -> &V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have shared access to the underlying tree, and can thus give out a shared reference.
+ unsafe { &(*container_of!(self.node_links, Node<K, V>, links)).value }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ pub fn get_mut(&mut self) -> &mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - We have exclusive access to the underlying tree, and can thus give out a mutable reference.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`self#get_mut`].
+ pub fn into_mut(self) -> &'a mut V {
+ // SAFETY:
+ // - `self.node_links` is a valid pointer to a node in the tree.
+ // - This consumes the `&'a mut RBTree<K, V>`, therefore it can give out a mutable reference that lives for `'a`.
+ unsafe { &mut (*(container_of!(self.node_links, Node<K, V>, links).cast_mut())).value }
+ }
+
+ /// Remove this entry from the [`RBTree`].
+ pub fn remove_node(self) -> RBTreeNode<K, V> {
+ // SAFETY: The node is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(self.node_links, &mut self.rbtree.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ RBTreeNode {
+ // SAFETY: The node was a node in the tree, but we removed it, so we can convert it
+ // back into a box.
+ node: unsafe {
+ Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut())
+ },
+ }
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ pub fn remove(self) -> V {
+ self.remove_node().node.value
+ }
+
+ /// Swap the current node for the provided node.
+ ///
+ /// The key of both nodes must be equal.
+ fn replace(self, node: RBTreeNode<K, V>) -> RBTreeNode<K, V> {
+ let node = Box::into_raw(node.node);
+
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let new_node_links = unsafe { addr_of_mut!((*node).links) };
+
+ // SAFETY: This updates the pointers so that `new_node_links` is in the tree where
+ // `self.node_links` used to be.
+ unsafe {
+ bindings::rb_replace_node(self.node_links, new_node_links, &mut self.rbtree.root)
+ };
+
+ // SAFETY:
+ // - `self.node_ptr` produces a valid pointer to a node in the tree.
+ // - Now that we removed this entry from the tree, we can convert the node to a box.
+ let old_node =
+ unsafe { Box::from_raw(container_of!(self.node_links, Node<K, V>, links).cast_mut()) };
+
+ RBTreeNode { node: old_node }
+ }
+}
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
index 39679a960c1a..67bf9d37ddb5 100644
--- a/rust/kernel/std_vendor.rs
+++ b/rust/kernel/std_vendor.rs
@@ -136,7 +136,7 @@
///
/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
-/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+/// [`printk`]: https://docs.kernel.org/core-api/printk-basics.html
/// [`pr_info`]: crate::pr_info!
/// [`pr_debug`]: crate::pr_debug!
#[macro_export]
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 3673496c2363..3021f30fd822 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -12,12 +12,13 @@
//! 2. It does not support weak references, which allows it to be half the size.
//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//! 5. The object in [`Arc`] is pinned implicitly.
//!
//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
use crate::{
alloc::{box_ext::BoxExt, AllocError, Flags},
- error::{self, Error},
+ bindings,
init::{self, InPlaceInit, Init, PinInit},
try_init,
types::{ForeignOwnable, Opaque},
@@ -209,28 +210,6 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(Box::leak(inner).into()) })
}
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// If `T: !Unpin` it will not be able to move afterwards.
- #[inline]
- pub fn pin_init<E>(init: impl PinInit<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::pin_init(init, flags).map(|u| u.into())
- }
-
- /// Use the given initializer to in-place initialize a `T`.
- ///
- /// This is equivalent to [`Arc<T>::pin_init`], since an [`Arc`] is always pinned.
- #[inline]
- pub fn init<E>(init: impl Init<T, E>, flags: Flags) -> error::Result<Self>
- where
- Error: From<E>,
- {
- UniqueArc::init(init, flags).map(|u| u.into())
- }
}
impl<T: ?Sized> Arc<T> {
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index bd189d646adb..9e7ca066355c 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -7,8 +7,9 @@ use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
- mem::MaybeUninit,
+ mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
+ pin::Pin,
ptr::NonNull,
};
@@ -26,7 +27,10 @@ pub trait ForeignOwnable: Sized {
/// Converts a Rust-owned object to a foreign-owned one.
///
- /// The foreign representation is a pointer to void.
+ /// The foreign representation is a pointer to void. There are no guarantees for this pointer.
+ /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in
+ /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`],
+ /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior.
fn into_foreign(self) -> *const core::ffi::c_void;
/// Borrows a foreign-owned object.
@@ -89,6 +93,32 @@ impl<T: 'static> ForeignOwnable for Box<T> {
}
}
+impl<T: 'static> ForeignOwnable for Pin<Box<T>> {
+ type Borrowed<'a> = Pin<&'a T>;
+
+ fn into_foreign(self) -> *const core::ffi::c_void {
+ // SAFETY: We are still treating the box as pinned.
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements of `from_foreign` also ensure that the object remains alive for
+ // the lifetime of the returned value.
+ let r = unsafe { &*ptr.cast() };
+
+ // SAFETY: This pointer originates from a `Pin<Box<T>>`.
+ unsafe { Pin::new_unchecked(r) }
+ }
+
+ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous
+ // call to `Self::into_foreign`.
+ unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) }
+ }
+}
+
impl ForeignOwnable for () {
type Borrowed<'a> = ();
@@ -366,6 +396,35 @@ impl<T: AlwaysRefCounted> ARef<T> {
_p: PhantomData,
}
}
+
+ /// Consumes the `ARef`, returning a raw pointer.
+ ///
+ /// This function does not change the refcount. After calling this function, the caller is
+ /// responsible for the refcount previously managed by the `ARef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::ptr::NonNull;
+ /// use kernel::types::{ARef, AlwaysRefCounted};
+ ///
+ /// struct Empty {}
+ ///
+ /// unsafe impl AlwaysRefCounted for Empty {
+ /// fn inc_ref(&self) {}
+ /// unsafe fn dec_ref(_obj: NonNull<Self>) {}
+ /// }
+ ///
+ /// let mut data = Empty {};
+ /// let ptr = NonNull::<Empty>::new(&mut data as *mut _).unwrap();
+ /// let data_ref: ARef<Empty> = unsafe { ARef::from_raw(ptr) };
+ /// let raw_ptr: NonNull<Empty> = ARef::into_raw(data_ref);
+ ///
+ /// assert_eq!(ptr, raw_ptr);
+ /// ```
+ pub fn into_raw(me: Self) -> NonNull<T> {
+ ManuallyDrop::new(me).ptr
+ }
}
impl<T: AlwaysRefCounted> Clone for ARef<T> {
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 5be0cb9db3ee..a626b1145e5c 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -2,6 +2,10 @@
//! Crate for all kernel procedural macros.
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
#[macro_use]
mod quote;
mod concat_idents;
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 7a5b899e47b7..aef3b132f32b 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -262,6 +262,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
+ #[used]
+ #[link_section = \".init.data\"]
+ static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
#[no_mangle]
pub extern \"C\" fn cleanup_module() {{
// SAFETY:
@@ -273,6 +279,12 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
unsafe {{ __exit() }}
}}
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ #[link_section = \".exit.data\"]
+ static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
+
// Built-in modules are initialized through an initcall pointer
// and the identifiers need to be unique.
#[cfg(not(MODULE))]