summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug11
-rw-r--r--lib/alloc_tag.c109
-rw-r--r--lib/bootconfig.c2
-rw-r--r--lib/crc/Kconfig2
-rw-r--r--lib/crypto/mpi/mpicoder.c2
-rw-r--r--lib/fonts/font_rotate.c2
-rw-r--r--lib/kunit/Kconfig5
-rw-r--r--lib/linear_ranges.c36
-rw-r--r--lib/maple_tree.c2
-rw-r--r--lib/rhashtable.c36
-rw-r--r--lib/test_hmm.c130
-rw-r--r--lib/test_kho.c5
-rw-r--r--lib/test_maple_tree.c4
-rw-r--r--lib/tests/Makefile2
-rw-r--r--lib/tests/liveupdate.c18
-rw-r--r--lib/tests/printf_kunit.c22
-rw-r--r--lib/tests/string_kunit.c274
-rw-r--r--lib/tests/test_kprobes.c29
-rw-r--r--lib/vsprintf.c4
19 files changed, 595 insertions, 100 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 77c3774c1c49..8ff5adcfe1e0 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2526,6 +2526,17 @@ config STRING_HELPERS_KUNIT_TEST
depends on KUNIT
default KUNIT_ALL_TESTS
+config STRING_KUNIT_BENCH
+ bool "Benchmark string functions at runtime"
+ depends on STRING_KUNIT_TEST
+ help
+ Enable performance measurement for string functions.
+
+ This measures the execution efficiency of string functions
+ during the KUnit test run.
+
+ If unsure, say N.
+
config FFS_KUNIT_TEST
tristate "KUnit test ffs-family functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 58991ab09d84..ed1bdcf1f8ab 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -6,7 +6,9 @@
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/page_ext.h>
+#include <linux/pgalloc_tag.h>
#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/string_choices.h>
@@ -758,8 +760,115 @@ static __init bool need_page_alloc_tagging(void)
return mem_profiling_support;
}
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+/*
+ * Track page allocations before page_ext is initialized.
+ * Some pages are allocated before page_ext becomes available, leaving
+ * their codetag uninitialized. Track these early PFNs so we can clear
+ * their codetag refs later to avoid warnings when they are freed.
+ *
+ * Early allocations include:
+ * - Base allocations independent of CPU count
+ * - Per-CPU allocations (e.g., CPU hotplug callbacks during smp_init,
+ * such as trace ring buffers, scheduler per-cpu data)
+ *
+ * For simplicity, we fix the size to 8192.
+ * If insufficient, a warning will be triggered to alert the user.
+ *
+ * TODO: Replace fixed-size array with dynamic allocation using
+ * a GFP flag similar to ___GFP_NO_OBJ_EXT to avoid recursion.
+ */
+#define EARLY_ALLOC_PFN_MAX 8192
+
+static unsigned long early_pfns[EARLY_ALLOC_PFN_MAX] __initdata;
+static atomic_t early_pfn_count __initdata = ATOMIC_INIT(0);
+
+static void __init __alloc_tag_add_early_pfn(unsigned long pfn)
+{
+ int old_idx, new_idx;
+
+ do {
+ old_idx = atomic_read(&early_pfn_count);
+ if (old_idx >= EARLY_ALLOC_PFN_MAX) {
+ pr_warn_once("Early page allocations before page_ext init exceeded EARLY_ALLOC_PFN_MAX (%d)\n",
+ EARLY_ALLOC_PFN_MAX);
+ return;
+ }
+ new_idx = old_idx + 1;
+ } while (!atomic_try_cmpxchg(&early_pfn_count, &old_idx, new_idx));
+
+ early_pfns[old_idx] = pfn;
+}
+
+typedef void alloc_tag_add_func(unsigned long pfn);
+static alloc_tag_add_func __rcu *alloc_tag_add_early_pfn_ptr __refdata =
+ RCU_INITIALIZER(__alloc_tag_add_early_pfn);
+
+void alloc_tag_add_early_pfn(unsigned long pfn)
+{
+ alloc_tag_add_func *alloc_tag_add;
+
+ if (static_key_enabled(&mem_profiling_compressed))
+ return;
+
+ rcu_read_lock();
+ alloc_tag_add = rcu_dereference(alloc_tag_add_early_pfn_ptr);
+ if (alloc_tag_add)
+ alloc_tag_add(pfn);
+ rcu_read_unlock();
+}
+
+static void __init clear_early_alloc_pfn_tag_refs(void)
+{
+ unsigned int i;
+
+ if (static_key_enabled(&mem_profiling_compressed))
+ return;
+
+ rcu_assign_pointer(alloc_tag_add_early_pfn_ptr, NULL);
+ /* Make sure we are not racing with __alloc_tag_add_early_pfn() */
+ synchronize_rcu();
+
+ for (i = 0; i < atomic_read(&early_pfn_count); i++) {
+ unsigned long pfn = early_pfns[i];
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ /*
+ * An early-allocated page could be freed and reallocated
+ * after its page_ext is initialized but before we clear it.
+ * In that case, it already has a valid tag set.
+ * We should not overwrite that valid tag with CODETAG_EMPTY.
+ *
+ * Note: there is still a small race window between checking
+ * ref.ct and calling set_codetag_empty(). We accept this
+ * race as it's unlikely and the extra complexity of atomic
+ * cmpxchg is not worth it for this debug-only code path.
+ */
+ if (ref.ct) {
+ put_page_tag_ref(handle);
+ continue;
+ }
+
+ set_codetag_empty(&ref);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+ }
+
+ }
+}
+#else /* !CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+static inline void __init clear_early_alloc_pfn_tag_refs(void) {}
+#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
static __init void init_page_alloc_tagging(void)
{
+ clear_early_alloc_pfn_tag_refs();
}
struct page_ext_operations page_alloc_tagging_ops = {
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 01966ab9a8b5..c470b93d5dbc 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -66,7 +66,7 @@ static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
if (early)
memblock_free(addr, size);
else if (addr)
- memblock_free_late(__pa(addr), size);
+ memblock_free(addr, size);
}
#else /* !__KERNEL__ */
diff --git a/lib/crc/Kconfig b/lib/crc/Kconfig
index 31038c8d111a..f47bb4c706fb 100644
--- a/lib/crc/Kconfig
+++ b/lib/crc/Kconfig
@@ -65,7 +65,7 @@ config CRC32_ARCH
depends on CRC32 && CRC_OPTIMIZATIONS
default y if ARM && KERNEL_MODE_NEON
default y if ARM64
- default y if LOONGARCH
+ default y if LOONGARCH && 64BIT
default y if MIPS && CPU_MIPSR6
default y if PPC64 && ALTIVEC
default y if RISCV && RISCV_ISA_ZBC
diff --git a/lib/crypto/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index bf716a03c704..9359a58c29ec 100644
--- a/lib/crypto/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
@@ -347,7 +347,7 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
lzeros = 0;
len = 0;
while (nbytes > 0) {
- while (len && !*buff) {
+ while (len && !*buff && lzeros < nbytes) {
lzeros++;
len--;
buff++;
diff --git a/lib/fonts/font_rotate.c b/lib/fonts/font_rotate.c
index 065e0fc0667b..275406008823 100644
--- a/lib/fonts/font_rotate.c
+++ b/lib/fonts/font_rotate.c
@@ -106,7 +106,7 @@ static void __font_glyph_rotate_180(const unsigned char *glyph,
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
if (font_glyph_test_bit(glyph, x, y, bit_pitch)) {
- font_glyph_set_bit(out, width - (1 + x + shift), height - (1 + y),
+ font_glyph_set_bit(out, bit_pitch - 1 - x - shift, height - 1 - y,
bit_pitch);
}
}
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 498cc51e493d..94ff8e4089bf 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -16,8 +16,9 @@ menuconfig KUNIT
if KUNIT
config KUNIT_DEBUGFS
- bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS
- default KUNIT_ALL_TESTS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation"
+ depends on DEBUG_FS
+ default y
help
Enable debugfs representation for kunit. Currently this consists
of /sys/kernel/debug/kunit/<test_suite>/results files for each
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
index a1a7dfa881de..c85583678f6b 100644
--- a/lib/linear_ranges.c
+++ b/lib/linear_ranges.c
@@ -242,6 +242,42 @@ int linear_range_get_selector_high(const struct linear_range *r,
EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
/**
+ * linear_range_get_selector_high_array - return linear range selector for value
+ * @r: pointer to array of linear ranges where selector is looked from
+ * @ranges: amount of ranges to scan from array
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Scan array of ranges for selector for which range value matches given
+ * input value. Value is matching if it is equal or higher than given value
+ * If given value is found to be in a range scanning is stopped and @found is
+ * set true. If a range with values greater than given value is found
+ * but the range min is being greater than given value, then the range's
+ * lowest selector is updated to @selector and scanning is stopped.
+ *
+ * Return: 0 on success, -EINVAL if range array is invalid or does not contain
+ * range with a value greater or equal to given value
+ */
+int linear_range_get_selector_high_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ranges; i++) {
+ ret = linear_range_get_selector_high(&r[i], val, selector,
+ found);
+ if (!ret)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_high_array);
+
+/**
* linear_range_get_selector_within - return linear range selector for value
* @r: pointer to linear range where selector is looked from
* @val: value for which the selector is searched
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d18d7ed9ab67..60ae5e6fc1ee 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -2,7 +2,7 @@
/*
* Maple Tree implementation
* Copyright (c) 2018-2022 Oracle Corporation
- * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
+ * Authors: Liam R. Howlett <liam@infradead.org>
* Matthew Wilcox <willy@infradead.org>
* Copyright (c) 2023 ByteDance
* Author: Peng Zhang <zhangpeng.00@bytedance.com>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6074ed5f66f3..7a67ef5b67b6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -441,10 +441,33 @@ static void rht_deferred_worker(struct work_struct *work)
mutex_unlock(&ht->mutex);
+ /*
+ * Re-arm via @run_work, not @run_irq_work.
+ * rhashtable_free_and_destroy() drains async work as irq_work_sync()
+ * followed by cancel_work_sync(). If this site queued irq_work while
+ * cancel_work_sync() was waiting for us, irq_work_sync() would already
+ * have returned and the stale irq_work could fire post-teardown.
+ * cancel_work_sync() natively handles self-requeue on @run_work.
+ */
if (err)
schedule_work(&ht->run_work);
}
+/*
+ * Insert-path callers can run under a raw spinlock (e.g. an insecure_elasticity
+ * user). Calling schedule_work() under that lock records caller_lock ->
+ * pool->lock -> pi_lock -> rq->__lock, closing a locking cycle if any of
+ * these is acquired in the reverse direction elsewhere. Bounce through
+ * irq_work so the schedule_work() runs with the caller's lock no longer held.
+ */
+static void rht_deferred_irq_work(struct irq_work *irq_work)
+{
+ struct rhashtable *ht = container_of(irq_work, struct rhashtable,
+ run_irq_work);
+
+ schedule_work(&ht->run_work);
+}
+
static int rhashtable_insert_rehash(struct rhashtable *ht,
struct bucket_table *tbl)
{
@@ -477,7 +500,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
if (err == -EEXIST)
err = 0;
} else
- schedule_work(&ht->run_work);
+ irq_work_queue(&ht->run_irq_work);
return err;
@@ -488,7 +511,7 @@ fail:
/* Schedule async rehash to retry allocation in process context. */
if (err == -ENOMEM)
- schedule_work(&ht->run_work);
+ irq_work_queue(&ht->run_irq_work);
return err;
}
@@ -538,7 +561,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return NULL;
}
- if (elasticity <= 0)
+ if (elasticity <= 0 && !ht->p.insecure_elasticity)
return ERR_PTR(-EAGAIN);
return ERR_PTR(-ENOENT);
@@ -568,7 +591,8 @@ static struct bucket_table *rhashtable_insert_one(
if (unlikely(rht_grow_above_max(ht, tbl)))
return ERR_PTR(-E2BIG);
- if (unlikely(rht_grow_above_100(ht, tbl)))
+ if (unlikely(rht_grow_above_100(ht, tbl)) &&
+ !ht->p.insecure_elasticity)
return ERR_PTR(-EAGAIN);
head = rht_ptr(bkt, tbl, hash);
@@ -629,7 +653,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
rht_unlock(tbl, bkt, flags);
if (inserted && rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
+ irq_work_queue(&ht->run_irq_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -1084,6 +1108,7 @@ int rhashtable_init_noprof(struct rhashtable *ht,
RCU_INIT_POINTER(ht->tbl, tbl);
INIT_WORK(&ht->run_work, rht_deferred_worker);
+ init_irq_work(&ht->run_irq_work, rht_deferred_irq_work);
return 0;
}
@@ -1149,6 +1174,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
struct bucket_table *tbl, *next_tbl;
unsigned int i;
+ irq_work_sync(&ht->run_irq_work);
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 0964d53365e6..213504915737 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -185,11 +185,73 @@ static int dmirror_fops_open(struct inode *inode, struct file *filp)
return 0;
}
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+ unsigned int order = 0;
+
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+
+ order = folio_order(page_folio(spage));
+ spage = BACKING_PAGE(spage);
+ if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+ dpage = folio_page(folio_alloc(GFP_HIGHUSER_MOVABLE,
+ order), 0);
+ } else {
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ order = 0;
+ }
+
+ /* TODO Support splitting here */
+ lock_page(dpage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ if (order)
+ dst_pfns[i] |= MIGRATE_PFN_COMPOUND;
+ folio_copy(page_folio(dpage), page_folio(spage));
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
+}
+
static int dmirror_fops_release(struct inode *inode, struct file *filp)
{
struct dmirror *dmirror = filp->private_data;
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ int i;
mmu_interval_notifier_remove(&dmirror->notifier);
+
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ dmirror_device_evict_chunk(devmem);
+ }
+ }
+
xa_destroy(&dmirror->pt);
kfree(dmirror);
return 0;
@@ -1377,56 +1439,6 @@ static int dmirror_snapshot(struct dmirror *dmirror,
return ret;
}
-static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
-{
- unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
- unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
- unsigned long npages = end_pfn - start_pfn + 1;
- unsigned long i;
- unsigned long *src_pfns;
- unsigned long *dst_pfns;
- unsigned int order = 0;
-
- src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
- dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
-
- migrate_device_range(src_pfns, start_pfn, npages);
- for (i = 0; i < npages; i++) {
- struct page *dpage, *spage;
-
- spage = migrate_pfn_to_page(src_pfns[i]);
- if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- if (WARN_ON(!is_device_private_page(spage) &&
- !is_device_coherent_page(spage)))
- continue;
-
- order = folio_order(page_folio(spage));
- spage = BACKING_PAGE(spage);
- if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
- dpage = folio_page(folio_alloc(GFP_HIGHUSER_MOVABLE,
- order), 0);
- } else {
- dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
- order = 0;
- }
-
- /* TODO Support splitting here */
- lock_page(dpage);
- dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
- if (src_pfns[i] & MIGRATE_PFN_WRITE)
- dst_pfns[i] |= MIGRATE_PFN_WRITE;
- if (order)
- dst_pfns[i] |= MIGRATE_PFN_COMPOUND;
- folio_copy(page_folio(dpage), page_folio(spage));
- }
- migrate_device_pages(src_pfns, dst_pfns, npages);
- migrate_device_finalize(src_pfns, dst_pfns, npages);
- kvfree(src_pfns);
- kvfree(dst_pfns);
-}
-
/* Removes free pages from the free list so they can't be re-allocated */
static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
{
@@ -1726,6 +1738,13 @@ static const struct dev_pagemap_ops dmirror_devmem_ops = {
.folio_split = dmirror_devmem_folio_split,
};
+static void dmirror_device_release(struct device *dev)
+{
+ struct dmirror_device *mdevice = container_of(dev, struct dmirror_device, device);
+
+ dmirror_device_remove_chunks(mdevice);
+}
+
static int dmirror_device_init(struct dmirror_device *mdevice, int id)
{
dev_t dev;
@@ -1737,6 +1756,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
cdev_init(&mdevice->cdevice, &dmirror_fops);
mdevice->cdevice.owner = THIS_MODULE;
+ mdevice->device.release = dmirror_device_release;
+
device_initialize(&mdevice->device);
mdevice->device.devt = dev;
@@ -1744,12 +1765,16 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
if (ret)
goto put_device;
+ /* Build a list of free ZONE_DEVICE struct pages */
+ ret = dmirror_allocate_chunk(mdevice, NULL, false);
+ if (ret)
+ goto put_device;
+
ret = cdev_device_add(&mdevice->cdevice, &mdevice->device);
if (ret)
goto put_device;
- /* Build a list of free ZONE_DEVICE struct pages */
- return dmirror_allocate_chunk(mdevice, NULL, false);
+ return 0;
put_device:
put_device(&mdevice->device);
@@ -1758,7 +1783,6 @@ put_device:
static void dmirror_device_remove(struct dmirror_device *mdevice)
{
- dmirror_device_remove_chunks(mdevice);
cdev_device_del(&mdevice->cdevice, &mdevice->device);
put_device(&mdevice->device);
}
diff --git a/lib/test_kho.c b/lib/test_kho.c
index 7ef9e4061869..aa6a0956bb8b 100644
--- a/lib/test_kho.c
+++ b/lib/test_kho.c
@@ -143,7 +143,8 @@ static int kho_test_preserve(struct kho_test_state *state)
if (err)
goto err_unpreserve_data;
- err = kho_add_subtree(KHO_TEST_FDT, folio_address(state->fdt));
+ err = kho_add_subtree(KHO_TEST_FDT, folio_address(state->fdt),
+ fdt_totalsize(folio_address(state->fdt)));
if (err)
goto err_unpreserve_data;
@@ -318,7 +319,7 @@ static int __init kho_test_init(void)
if (!kho_is_enabled())
return 0;
- err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys);
+ err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys, NULL);
if (!err) {
err = kho_test_restore(fdt_phys);
if (err)
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 434d8a2fdd99..b9367c61e8b5 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -2,7 +2,7 @@
/*
* test_maple_tree.c: Test the maple tree API
* Copyright (c) 2018-2022 Oracle Corporation
- * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Author: Liam R. Howlett <liam@infradead.org>
*
* Any tests that only require the interface of the tree.
*/
@@ -4021,6 +4021,6 @@ static void __exit maple_tree_harvest(void)
module_init(maple_tree_seed);
module_exit(maple_tree_harvest);
-MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_AUTHOR("Liam R. Howlett <liam@infradead.org>");
MODULE_DESCRIPTION("maple tree API test module");
MODULE_LICENSE("GPL");
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 05f74edbc62b..7e9c2fa52e35 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -40,6 +40,8 @@ obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
obj-$(CONFIG_MIN_HEAP_KUNIT_TEST) += min_heap_kunit.o
CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+# GCC < 12.1 can miscompile errptr() test when branch profiling is enabled.
+CFLAGS_printf_kunit.o += -DDISABLE_BRANCH_PROFILING
obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
obj-$(CONFIG_RANDSTRUCT_KUNIT_TEST) += randstruct_kunit.o
obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
diff --git a/lib/tests/liveupdate.c b/lib/tests/liveupdate.c
index 496d6ef91a30..e4b0ecbee32f 100644
--- a/lib/tests/liveupdate.c
+++ b/lib/tests/liveupdate.c
@@ -135,24 +135,6 @@ void liveupdate_test_register(struct liveupdate_file_handler *fh)
TEST_NFLBS, fh->compatible);
}
-void liveupdate_test_unregister(struct liveupdate_file_handler *fh)
-{
- int err, i;
-
- for (i = 0; i < TEST_NFLBS; i++) {
- struct liveupdate_flb *flb = &test_flbs[i];
-
- err = liveupdate_unregister_flb(fh, flb);
- if (err) {
- pr_err("Failed to unregister %s %pe\n",
- flb->compatible, ERR_PTR(err));
- }
- }
-
- pr_info("Unregistered %d FLBs from file handler: [%s]\n",
- TEST_NFLBS, fh->compatible);
-}
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pasha Tatashin <pasha.tatashin@soleen.com>");
MODULE_DESCRIPTION("In-kernel test for LUO mechanism");
diff --git a/lib/tests/printf_kunit.c b/lib/tests/printf_kunit.c
index f6f21b445ece..bb70b9cddadd 100644
--- a/lib/tests/printf_kunit.c
+++ b/lib/tests/printf_kunit.c
@@ -17,6 +17,7 @@
#include <linux/dcache.h>
#include <linux/socket.h>
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -437,6 +438,27 @@ ip4(struct kunit *kunittest)
static void
ip6(struct kunit *kunittest)
{
+ const struct in6_addr addr = {
+ .s6_addr = { 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04,
+ 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x08 }
+ };
+ const struct in6_addr single_zero = {
+ .s6_addr = { 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x04,
+ 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, 0x08 }
+ };
+ struct sockaddr_in6 sa = {
+ .sin6_family = AF_INET6,
+ .sin6_port = cpu_to_be16(12345),
+ .sin6_addr = addr,
+ };
+
+ test("00010002000300040005000600070008|0001:0002:0003:0004:0005:0006:0007:0008",
+ "%pi6|%pI6", &addr, &addr);
+ test("00010002000300040005000600070008|0001:0002:0003:0004:0005:0006:0007:0008",
+ "%piS|%pIS", &sa, &sa);
+ test("1:2:3:4:5:6:7:8", "%pI6c", &addr);
+ test("1:0:3:4:5:6:7:8", "%pI6c", &single_zero);
+ test("[1:2:3:4:5:6:7:8]:12345", "%pISpc", &sa);
}
static void
diff --git a/lib/tests/string_kunit.c b/lib/tests/string_kunit.c
index f9a8e557ba77..0819ace5b027 100644
--- a/lib/tests/string_kunit.c
+++ b/lib/tests/string_kunit.c
@@ -6,10 +6,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/prandom.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/time64.h>
+#include <linux/units.h>
+#include <linux/vmalloc.h>
#define STRCMP_LARGE_BUF_LEN 2048
#define STRCMP_CHANGE_POINT 1337
@@ -17,6 +25,12 @@
#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+#define STRING_TEST_MAX_LEN 128
+#define STRING_TEST_MAX_OFFSET 16
+
+#define STRING_BENCH_SEED 888
+#define STRING_BENCH_WORKLOAD (1 * MEGA)
+
static void string_test_memset16(struct kunit *test)
{
unsigned i, j, k;
@@ -104,6 +118,64 @@ static void string_test_memset64(struct kunit *test)
}
}
+static void string_test_strlen(struct kunit *test)
+{
+ size_t buf_size;
+ char *buf, *s;
+
+ buf_size = PAGE_ALIGN(STRING_TEST_MAX_LEN + STRING_TEST_MAX_OFFSET + 1);
+ buf = vmalloc(buf_size);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, 'A', buf_size);
+
+ for (size_t offset = 0; offset < STRING_TEST_MAX_OFFSET; offset++) {
+ for (size_t len = 0; len <= STRING_TEST_MAX_LEN; len++) {
+ s = buf + buf_size - 1 - offset - len;
+ s[len] = '\0';
+ KUNIT_EXPECT_EQ_MSG(test, strlen(s), len,
+ "offset:%zu len:%zu", offset, len);
+ s[len] = 'A';
+ }
+ }
+
+ vfree(buf);
+}
+
+static void string_test_strnlen(struct kunit *test)
+{
+ size_t buf_size;
+ char *buf, *s;
+
+ buf_size = PAGE_ALIGN(STRING_TEST_MAX_LEN + STRING_TEST_MAX_OFFSET + 1);
+ buf = vmalloc(buf_size);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, 'A', buf_size);
+
+ for (size_t offset = 0; offset < STRING_TEST_MAX_OFFSET; offset++) {
+ for (size_t len = 0; len <= STRING_TEST_MAX_LEN; len++) {
+ s = buf + buf_size - 1 - offset - len;
+ s[len] = '\0';
+
+ if (len > 0)
+ KUNIT_EXPECT_EQ(test, strnlen(s, len - 1), len - 1);
+ if (len > 1)
+ KUNIT_EXPECT_EQ(test, strnlen(s, len - 2), len - 2);
+
+ KUNIT_EXPECT_EQ(test, strnlen(s, len), len);
+
+ KUNIT_EXPECT_EQ(test, strnlen(s, len + 1), len);
+ KUNIT_EXPECT_EQ(test, strnlen(s, len + 2), len);
+ KUNIT_EXPECT_EQ(test, strnlen(s, len + 10), len);
+
+ s[len] = 'A';
+ }
+ }
+
+ vfree(buf);
+}
+
static void string_test_strchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
@@ -127,6 +199,36 @@ static void string_test_strchr(struct kunit *test)
KUNIT_ASSERT_NULL(test, result);
}
+static void string_test_strrchr(struct kunit *test)
+{
+ size_t buf_size;
+ char *buf, *s;
+
+ buf_size = PAGE_ALIGN(STRING_TEST_MAX_LEN + STRING_TEST_MAX_OFFSET + 1);
+ buf = vmalloc(buf_size);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, 'A', buf_size);
+
+ for (size_t offset = 0; offset < STRING_TEST_MAX_OFFSET; offset++) {
+ for (size_t len = 0; len <= STRING_TEST_MAX_LEN; len++) {
+ s = buf + buf_size - 1 - offset - len;
+ s[len] = '\0';
+
+ KUNIT_EXPECT_PTR_EQ(test, strrchr(s, 'Z'), NULL);
+
+ if (len > 0)
+ KUNIT_EXPECT_PTR_EQ(test, strrchr(s, 'A'), s + len - 1);
+ else
+ KUNIT_EXPECT_PTR_EQ(test, strrchr(s, 'A'), NULL);
+
+ s[len] = 'A';
+ }
+ }
+
+ vfree(buf);
+}
+
static void string_test_strnchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
@@ -614,12 +716,180 @@ static void string_test_strends(struct kunit *test)
KUNIT_EXPECT_TRUE(test, strends("", ""));
}
+#if IS_ENABLED(CONFIG_STRING_KUNIT_BENCH)
+/* Target string lengths for benchmarking */
+static const size_t bench_lens[] = {
+ 0, 1, 7, 8, 16, 31, 64, 127, 512, 1024, 3173, 4096,
+};
+
+/**
+ * alloc_max_bench_buffer() - Allocate buffer for the max test case.
+ * @test: KUnit context for managed allocation.
+ * @lens: Array of lengths used in the benchmark cases.
+ * @count: Number of elements in the @lens array.
+ * @buf_len: [out] Pointer to store the actually allocated buffer
+ * size (including NUL character).
+ *
+ * Return: Pointer to the allocated memory, or NULL on failure.
+ */
+static void *alloc_max_bench_buffer(struct kunit *test, const size_t *lens,
+ size_t count, size_t *buf_len)
+{
+ size_t max_len = 0;
+ void *buf;
+
+ for (size_t i = 0; i < count; i++)
+ max_len = max(lens[i], max_len);
+
+ /* Add space for NUL character */
+ max_len += 1;
+
+ buf = kunit_kzalloc(test, max_len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ if (buf_len)
+ *buf_len = max_len;
+
+ return buf;
+}
+
+/**
+ * fill_random_string() - Populate a buffer with a random NUL-terminated string.
+ * @buf: Buffer to fill.
+ * @len: Length of the buffer in bytes.
+ *
+ * Fills the buffer with random non-NUL bytes and ensures the string is
+ * properly NUL-terminated.
+ */
+static void fill_random_string(char *buf, size_t len)
+{
+ struct rnd_state state;
+
+ if (!buf || !len)
+ return;
+
+ /* Use a fixed seed to ensure deterministic benchmark results */
+ prandom_seed_state(&state, STRING_BENCH_SEED);
+ prandom_bytes_state(&state, buf, len);
+
+ /* Replace NUL characters to avoid early string termination */
+ for (size_t i = 0; i < len; i++) {
+ if (buf[i] == '\0')
+ buf[i] = 0x01;
+ }
+
+ buf[len - 1] = '\0';
+}
+
+/**
+ * STRING_BENCH() - Benchmark string functions.
+ * @iters: Number of iterations to run.
+ * @func: Function to benchmark.
+ * @...: Variable arguments passed to @func.
+ *
+ * Disables preemption and measures the total time in nanoseconds to execute
+ * @func(@__VA_ARGS__) for @iters times, including a small warm-up phase.
+ *
+ * Context: Disables preemption during measurement.
+ * Return: Total execution time in nanoseconds (u64).
+ */
+#define STRING_BENCH(iters, func, ...) \
+({ \
+ /* Volatile function pointer prevents dead code elimination */ \
+ typeof(func) (* volatile __func) = (func); \
+ size_t __bn_iters = (iters); \
+ size_t __bn_warm_iters; \
+ u64 __bn_t; \
+ \
+ /* Use 10% of the given iterations (maximum 50) to warm up */ \
+ __bn_warm_iters = max(__bn_iters / 10, 50U); \
+ \
+ for (size_t __bn_i = 0; __bn_i < __bn_warm_iters; __bn_i++) \
+ (void)__func(__VA_ARGS__); \
+ \
+ preempt_disable(); \
+ __bn_t = ktime_get_ns(); \
+ for (size_t __bn_i = 0; __bn_i < __bn_iters; __bn_i++) \
+ (void)__func(__VA_ARGS__); \
+ __bn_t = ktime_get_ns() - __bn_t; \
+ preempt_enable(); \
+ __bn_t; \
+})
+
+/**
+ * STRING_BENCH_BUF() - Benchmark harness for single-buffer functions.
+ * @test: KUnit context.
+ * @buf_name: Local char * variable name to be defined.
+ * @buf_size: Local size_t variable name to be defined.
+ * @func: Function to benchmark.
+ * @...: Extra arguments for @func.
+ *
+ * Prepares a randomized, NUL-terminated buffer and iterates through lengths
+ * in bench_lens, defining @buf_name and @buf_size in each loop.
+ */
+#define STRING_BENCH_BUF(test, buf_name, buf_size, func, ...) \
+do { \
+ size_t _bn_i, _bn_iters, _bn_size = 0; \
+ u64 _bn_t, _bn_mbps = 0, _bn_lat = 0; \
+ char *_bn_buf; \
+ \
+ _bn_buf = alloc_max_bench_buffer(test, bench_lens, \
+ ARRAY_SIZE(bench_lens), &_bn_size); \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, _bn_buf); \
+ \
+ fill_random_string(_bn_buf, _bn_size); \
+ \
+ for (_bn_i = 0; _bn_i < ARRAY_SIZE(bench_lens); _bn_i++) { \
+ size_t buf_size = bench_lens[_bn_i]; \
+ char *buf_name = _bn_buf + _bn_size - buf_size - 1; \
+ _bn_iters = STRING_BENCH_WORKLOAD / max(buf_size, 1U); \
+ \
+ _bn_t = STRING_BENCH(_bn_iters, func, ##__VA_ARGS__); \
+ if (_bn_t > 0) { \
+ _bn_mbps = (u64)(buf_size) * _bn_iters * \
+ (NSEC_PER_SEC / MEGA); \
+ _bn_mbps = div64_u64(_bn_mbps, _bn_t); \
+ _bn_lat = div64_u64(_bn_t, _bn_iters); \
+ } \
+ kunit_info(test, "len=%zu: %llu MB/s (%llu ns/call)\n", \
+ buf_size, _bn_mbps, _bn_lat); \
+ } \
+} while (0)
+#else
+#define STRING_BENCH_BUF(test, buf_name, buf_size, func, ...) \
+ kunit_skip(test, "not enabled")
+#endif /* IS_ENABLED(CONFIG_STRING_KUNIT_BENCH) */
+
+static void string_bench_strlen(struct kunit *test)
+{
+ STRING_BENCH_BUF(test, buf, len, strlen, buf);
+}
+
+static void string_bench_strnlen(struct kunit *test)
+{
+ STRING_BENCH_BUF(test, buf, len, strnlen, buf, len);
+}
+
+static void string_bench_strchr(struct kunit *test)
+{
+ STRING_BENCH_BUF(test, buf, len, strchr, buf, '\0');
+}
+
+static void string_bench_strrchr(struct kunit *test)
+{
+ STRING_BENCH_BUF(test, buf, len, strrchr, buf, '\0');
+}
+
static struct kunit_case string_test_cases[] = {
KUNIT_CASE(string_test_memset16),
KUNIT_CASE(string_test_memset32),
KUNIT_CASE(string_test_memset64),
+ KUNIT_CASE(string_test_strlen),
+ KUNIT_CASE(string_test_strnlen),
KUNIT_CASE(string_test_strchr),
KUNIT_CASE(string_test_strnchr),
+ KUNIT_CASE(string_test_strrchr),
KUNIT_CASE(string_test_strspn),
KUNIT_CASE(string_test_strcmp),
KUNIT_CASE(string_test_strcmp_long_strings),
@@ -636,6 +906,10 @@ static struct kunit_case string_test_cases[] = {
KUNIT_CASE(string_test_strtomem),
KUNIT_CASE(string_test_memtostr),
KUNIT_CASE(string_test_strends),
+ KUNIT_CASE(string_bench_strlen),
+ KUNIT_CASE(string_bench_strnlen),
+ KUNIT_CASE(string_bench_strchr),
+ KUNIT_CASE(string_bench_strrchr),
{}
};
diff --git a/lib/tests/test_kprobes.c b/lib/tests/test_kprobes.c
index b7582010125c..06e729e4de05 100644
--- a/lib/tests/test_kprobes.c
+++ b/lib/tests/test_kprobes.c
@@ -12,6 +12,12 @@
#define div_factor 3
+#define KP_CLEAR(_kp) \
+do { \
+ (_kp).addr = NULL; \
+ (_kp).flags = 0; \
+} while (0)
+
static u32 rand1, preh_val, posth_val;
static u32 (*target)(u32 value);
static u32 (*recursed_target)(u32 value);
@@ -125,10 +131,6 @@ static void test_kprobes(struct kunit *test)
current_test = test;
- /* addr and flags should be cleard for reusing kprobe. */
- kp.addr = NULL;
- kp.flags = 0;
-
KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
preh_val = 0;
posth_val = 0;
@@ -226,9 +228,6 @@ static void test_kretprobes(struct kunit *test)
struct kretprobe *rps[2] = {&rp, &rp2};
current_test = test;
- /* addr and flags should be cleard for reusing kprobe. */
- rp.kp.addr = NULL;
- rp.kp.flags = 0;
KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
krph_val = 0;
@@ -290,8 +289,6 @@ static void test_stacktrace_on_kretprobe(struct kunit *test)
unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
current_test = test;
- rp3.kp.addr = NULL;
- rp3.kp.flags = 0;
/*
* Run the stacktrace_driver() to record correct return address in
@@ -352,8 +349,6 @@ static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
struct kretprobe *rps[2] = {&rp3, &rp4};
current_test = test;
- rp3.kp.addr = NULL;
- rp3.kp.flags = 0;
//KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
@@ -367,6 +362,18 @@ static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
static int kprobes_test_init(struct kunit *test)
{
+ KP_CLEAR(kp);
+ KP_CLEAR(kp2);
+ KP_CLEAR(kp_missed);
+#ifdef CONFIG_KRETPROBES
+ KP_CLEAR(rp.kp);
+ KP_CLEAR(rp2.kp);
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ KP_CLEAR(rp3.kp);
+ KP_CLEAR(rp4.kp);
+#endif
+#endif
+
target = kprobe_target;
target2 = kprobe_target2;
recursed_target = kprobe_recursed_target;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 800b8ac49f53..9f359b31c8d1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1107,7 +1107,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
- int decode = (fmt[0] == 'R') ? 1 : 0;
+ bool decode = fmt[0] == 'R';
const struct printf_spec *specp;
if (check_pointer(&buf, end, res, spec))
@@ -1132,7 +1132,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
} else {
p = string_nocheck(p, pend, "??? ", str_spec);
specp = &mem_spec;
- decode = 0;
+ decode = false;
}
if (decode && res->flags & IORESOURCE_UNSET) {
p = string_nocheck(p, pend, "size ", str_spec);