]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: kmsan: add tests for high-order page freeing
authorAlexander Potapenko <glider@google.com>
Tue, 13 Jan 2026 09:11:50 +0000 (10:11 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 04:02:32 +0000 (20:02 -0800)
Add regression tests to verify that KMSAN correctly poisons the full
memory range when freeing pages.

Specifically, verify that accessing the tail pages of a high-order
non-compound allocation triggers a use-after-free report.  This ensures
that the fix "mm: kmsan: Fix poisoning of high-order non-compound pages"
is working as expected.

Also add a test for standard order-0 pages for completeness.

Link: https://lore.kernel.org/all/20260104134348.3544298-1-ryan.roberts@arm.com/
Link: https://lkml.kernel.org/r/20260113091151.4035013-1-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kmsan/kmsan_test.c

index 902ec48b1e3e6a44c0529c5bb020929ecb96fedd..ba44bf2072bbefa17ff76f7f793a91886e13ede1 100644 (file)
@@ -361,7 +361,7 @@ static void test_init_vmalloc(struct kunit *test)
        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
 
-/* Test case: ensure that use-after-free reporting works. */
+/* Test case: ensure that use-after-free reporting works for kmalloc. */
 static void test_uaf(struct kunit *test)
 {
        EXPECTATION_USE_AFTER_FREE(expect);
@@ -378,6 +378,51 @@ static void test_uaf(struct kunit *test)
        KUNIT_EXPECT_TRUE(test, report_matches(&expect));
 }
 
+static volatile char *test_uaf_pages_helper(int order, int offset)
+{
+       struct page *page;
+       volatile char *var;
+
+       /* Memory is initialized up until __free_pages() thanks to __GFP_ZERO. */
+       page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       var = page_address(page) + offset;
+       __free_pages(page, order);
+
+       return var;
+}
+
+/* Test case: ensure that use-after-free reporting works for a freed page. */
+static void test_uaf_pages(struct kunit *test)
+{
+       EXPECTATION_USE_AFTER_FREE(expect);
+       volatile char value;
+
+       kunit_info(test, "use-after-free on a freed page (UMR report)\n");
+       /* Allocate a single page, free it, then try to access it. */
+       value = *test_uaf_pages_helper(0, 3);
+       USE(value);
+
+       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that UAF reporting works for high order pages. */
+static void test_uaf_high_order_pages(struct kunit *test)
+{
+       EXPECTATION_USE_AFTER_FREE(expect);
+       volatile char value;
+
+       kunit_info(test,
+                  "use-after-free on a freed high-order page (UMR report)\n");
+       /*
+        * Create a high-order non-compound page, free it, then try to access
+        * its tail page.
+        */
+       value = *test_uaf_pages_helper(1, PAGE_SIZE + 3);
+       USE(value);
+
+       KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
 /*
  * Test case: ensure that uninitialized values are propagated through per-CPU
  * memory.
@@ -683,6 +728,8 @@ static struct kunit_case kmsan_test_cases[] = {
        KUNIT_CASE(test_init_kmsan_vmap_vunmap),
        KUNIT_CASE(test_init_vmalloc),
        KUNIT_CASE(test_uaf),
+       KUNIT_CASE(test_uaf_pages),
+       KUNIT_CASE(test_uaf_high_order_pages),
        KUNIT_CASE(test_percpu_propagate),
        KUNIT_CASE(test_printk),
        KUNIT_CASE(test_init_memcpy),