]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
kmsan: fix out-of-bounds access to shadow memory
authorEric Biggers <ebiggers@kernel.org>
Thu, 11 Sep 2025 19:58:58 +0000 (12:58 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 2 Oct 2025 11:42:53 +0000 (13:42 +0200)
commit 85e1ff61060a765d91ee62dc5606d4d547d9d105 upstream.

Running sha224_kunit on a KMSAN-enabled kernel results in a crash in
kmsan_internal_set_shadow_origin():

    BUG: unable to handle page fault for address: ffffbc3840291000
    #PF: supervisor read access in kernel mode
    #PF: error_code(0x0000) - not-present page
    PGD 1810067 P4D 1810067 PUD 192d067 PMD 3c17067 PTE 0
    Oops: 0000 [#1] SMP NOPTI
    CPU: 0 UID: 0 PID: 81 Comm: kunit_try_catch Tainted: G                 N  6.17.0-rc3 #10 PREEMPT(voluntary)
    Tainted: [N]=TEST
    Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
    RIP: 0010:kmsan_internal_set_shadow_origin+0x91/0x100
    [...]
    Call Trace:
    <TASK>
    __msan_memset+0xee/0x1a0
    sha224_final+0x9e/0x350
    test_hash_buffer_overruns+0x46f/0x5f0
    ? kmsan_get_shadow_origin_ptr+0x46/0xa0
    ? __pfx_test_hash_buffer_overruns+0x10/0x10
    kunit_try_run_case+0x198/0xa00

This occurs when memset() is called on a buffer that is not 4-byte aligned
and extends to the end of a guard page, i.e.  the next page is unmapped.

The bug is that the loop at the end of kmsan_internal_set_shadow_origin()
accesses the wrong shadow memory bytes when the address is not 4-byte
aligned.  Since each 4 bytes are associated with an origin, it rounds the
address and size so that it can access all the origins that contain the
buffer.  However, when it checks the corresponding shadow bytes for a
particular origin, it incorrectly uses the original unrounded shadow
address.  This results in reads from shadow memory beyond the end of the
buffer's shadow memory, which crashes when that memory is not mapped.

To fix this, correctly align the shadow address before accessing the 4
shadow bytes corresponding to each origin.

Link: https://lkml.kernel.org/r/20250911195858.394235-1-ebiggers@kernel.org
Fixes: 2ef3cec44c60 ("kmsan: do not wipe out origin when doing partial unpoisoning")
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Tested-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/kmsan/core.c
mm/kmsan/kmsan_test.c

index 38a3bff23e8d00bd6f2920b4d0b8082c8dd5c220..38155d6982152acd242cf5d4b4065bf09cd76e24 100644 (file)
@@ -262,7 +262,8 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
                                      u32 origin, bool checked)
 {
        u64 address = (u64)addr;
-       u32 *shadow_start, *origin_start;
+       void *shadow_start;
+       u32 *aligned_shadow, *origin_start;
        size_t pad = 0;
 
        KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -281,9 +282,12 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
        }
        __memset(shadow_start, b, size);
 
-       if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
+       if (IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
+               aligned_shadow = shadow_start;
+       } else {
                pad = address % KMSAN_ORIGIN_SIZE;
                address -= pad;
+               aligned_shadow = shadow_start - pad;
                size += pad;
        }
        size = ALIGN(size, KMSAN_ORIGIN_SIZE);
@@ -297,7 +301,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
         * corresponding shadow slot is zero.
         */
        for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
-               if (origin || !shadow_start[i])
+               if (origin || !aligned_shadow[i])
                        origin_start[i] = origin;
        }
 }
index 312989aa2865c4992a86f4caa3dd1e91e2b7e7a6..e98c89d3a73aa4cb8cad62a4eaa2cf2384f57d58 100644 (file)
@@ -523,6 +523,21 @@ DEFINE_TEST_MEMSETXX(16)
 DEFINE_TEST_MEMSETXX(32)
 DEFINE_TEST_MEMSETXX(64)
 
+/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */
+static void test_memset_on_guarded_buffer(struct kunit *test)
+{
+       void *buf = vmalloc(PAGE_SIZE);
+
+       kunit_info(test,
+                  "memset() on ends of guarded buffer should not crash\n");
+
+       for (size_t size = 0; size <= 128; size++) {
+               memset(buf, 0xff, size);
+               memset(buf + PAGE_SIZE - size, 0xff, size);
+       }
+       vfree(buf);
+}
+
 static noinline void fibonacci(int *array, int size, int start)
 {
        if (start < 2 || (start == size))
@@ -602,6 +617,7 @@ static struct kunit_case kmsan_test_cases[] = {
        KUNIT_CASE(test_memset16),
        KUNIT_CASE(test_memset32),
        KUNIT_CASE(test_memset64),
+       KUNIT_CASE(test_memset_on_guarded_buffer),
        KUNIT_CASE(test_long_origin_chain),
        KUNIT_CASE(test_stackdepot_roundtrip),
        {},