summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2026-01-13 12:11:50 +0300
committerAndrew Morton <akpm@linux-foundation.org>2026-01-27 07:02:32 +0300
commit79ffad20ebc05eb4e5dc942cdedbfbf0796c18c9 (patch)
tree45f5758bcf40f8ec0a06422d9dbcd4c6b18666ac
parented581147a417940857eeea609229de0f5de5617f (diff)
downloadlinux-79ffad20ebc05eb4e5dc942cdedbfbf0796c18c9.tar.xz
mm: kmsan: add tests for high-order page freeing
Add regression tests to verify that KMSAN correctly poisons the full memory range when freeing pages. Specifically, verify that accessing the tail pages of a high-order non-compound allocation triggers a use-after-free report. This ensures that the fix "mm: kmsan: Fix poisoning of high-order non-compound pages" is working as expected. Also add a test for standard order-0 pages for completeness. Link: https://lore.kernel.org/all/20260104134348.3544298-1-ryan.roberts@arm.com/ Link: https://lkml.kernel.org/r/20260113091151.4035013-1-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/kmsan/kmsan_test.c49
1 files changed, 48 insertions, 1 deletions
diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 902ec48b1e3e..ba44bf2072bb 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -361,7 +361,7 @@ static void test_init_vmalloc(struct kunit *test)
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
-/* Test case: ensure that use-after-free reporting works. */
+/* Test case: ensure that use-after-free reporting works for kmalloc. */
static void test_uaf(struct kunit *test)
{
EXPECTATION_USE_AFTER_FREE(expect);
@@ -378,6 +378,51 @@ static void test_uaf(struct kunit *test)
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
+static volatile char *test_uaf_pages_helper(int order, int offset)
+{
+ struct page *page;
+ volatile char *var;
+
+ /* Memory is initialized up until __free_pages() thanks to __GFP_ZERO. */
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ var = page_address(page) + offset;
+ __free_pages(page, order);
+
+ return var;
+}
+
+/* Test case: ensure that use-after-free reporting works for a freed page. */
+static void test_uaf_pages(struct kunit *test)
+{
+ EXPECTATION_USE_AFTER_FREE(expect);
+ volatile char value;
+
+ kunit_info(test, "use-after-free on a freed page (UMR report)\n");
+ /* Allocate a single page, free it, then try to access it. */
+ value = *test_uaf_pages_helper(0, 3);
+ USE(value);
+
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that UAF reporting works for high order pages. */
+static void test_uaf_high_order_pages(struct kunit *test)
+{
+ EXPECTATION_USE_AFTER_FREE(expect);
+ volatile char value;
+
+ kunit_info(test,
+ "use-after-free on a freed high-order page (UMR report)\n");
+ /*
+ * Create a high-order non-compound page, free it, then try to access
+ * its tail page.
+ */
+ value = *test_uaf_pages_helper(1, PAGE_SIZE + 3);
+ USE(value);
+
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
/*
* Test case: ensure that uninitialized values are propagated through per-CPU
* memory.
@@ -683,6 +728,8 @@ static struct kunit_case kmsan_test_cases[] = {
KUNIT_CASE(test_init_kmsan_vmap_vunmap),
KUNIT_CASE(test_init_vmalloc),
KUNIT_CASE(test_uaf),
+ KUNIT_CASE(test_uaf_pages),
+ KUNIT_CASE(test_uaf_high_order_pages),
KUNIT_CASE(test_percpu_propagate),
KUNIT_CASE(test_printk),
KUNIT_CASE(test_init_memcpy),