]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/kasan: fix KASAN poisoning in vrealloc()
authorAndrey Ryabinin <ryabinin.a.a@gmail.com>
Tue, 13 Jan 2026 19:15:15 +0000 (20:15 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Jan 2026 03:03:45 +0000 (19:03 -0800)
A KASAN warning can be triggered when vrealloc() changes the requested
size to a value that is not aligned to KASAN_GRANULE_SIZE.

    ------------[ cut here ]------------
    WARNING: CPU: 2 PID: 1 at mm/kasan/shadow.c:174 kasan_unpoison+0x40/0x48
    ...
    pc : kasan_unpoison+0x40/0x48
    lr : __kasan_unpoison_vmalloc+0x40/0x68
    Call trace:
     kasan_unpoison+0x40/0x48 (P)
     vrealloc_node_align_noprof+0x200/0x320
     bpf_patch_insn_data+0x90/0x2f0
     convert_ctx_accesses+0x8c0/0x1158
     bpf_check+0x1488/0x1900
     bpf_prog_load+0xd20/0x1258
     __sys_bpf+0x96c/0xdf0
     __arm64_sys_bpf+0x50/0xa0
     invoke_syscall+0x90/0x160

Introduce a dedicated kasan_vrealloc() helper that centralizes KASAN
handling for vmalloc reallocations.  The helper accounts for KASAN granule
alignment when growing or shrinking an allocation and ensures that partial
granules are handled correctly.

Use this helper from vrealloc_node_align_noprof() to fix poisoning logic.

[ryabinin.a.a@gmail.com: move kasan_enabled() check, fix build]
Link: https://lkml.kernel.org/r/20260119144509.32767-1-ryabinin.a.a@gmail.com
Link: https://lkml.kernel.org/r/20260113191516.31015-1-ryabinin.a.a@gmail.com
Fixes: d699440f58ce ("mm: fix vrealloc()'s KASAN poisoning logic")
Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Reported-by: Maciej Żenczykowski <maze@google.com>
Reported-by: <joonki.min@samsung-slsi.corp-partner.google.com>
Closes: https://lkml.kernel.org/r/CANP3RGeuRW53vukDy7WDO3FiVgu34-xVJYkfpm08oLO3odYFrA@mail.gmail.com
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Tested-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/kasan.h
mm/kasan/common.c
mm/vmalloc.c

index 9c6ac4b62eb9955c2841cf1fc5d926df9cc778dc..338a1921a50ad4c2f72e5ea1d9937bfff6af3a33 100644 (file)
@@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
                __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
 }
 
+void __kasan_vrealloc(const void *start, unsigned long old_size,
+               unsigned long new_size);
+
+static __always_inline void kasan_vrealloc(const void *start,
+                                       unsigned long old_size,
+                                       unsigned long new_size)
+{
+       if (kasan_enabled())
+               __kasan_vrealloc(start, old_size, new_size);
+}
+
 #else /* CONFIG_KASAN_VMALLOC */
 
 static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
                          kasan_vmalloc_flags_t flags)
 { }
 
+static inline void kasan_vrealloc(const void *start, unsigned long old_size,
+                               unsigned long new_size) { }
+
 #endif /* CONFIG_KASAN_VMALLOC */
 
 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
index ed489a14dddf74827fdc4edec812224d6a8314d9..b7d05c2a6d93dde936faf075b0c9694f6c0b0f9b 100644 (file)
@@ -606,4 +606,25 @@ void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
                        __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
        }
 }
+
+void __kasan_vrealloc(const void *addr, unsigned long old_size,
+               unsigned long new_size)
+{
+       if (new_size < old_size) {
+               kasan_poison_last_granule(addr, new_size);
+
+               new_size = round_up(new_size, KASAN_GRANULE_SIZE);
+               old_size = round_up(old_size, KASAN_GRANULE_SIZE);
+               if (new_size < old_size)
+                       __kasan_poison_vmalloc(addr + new_size,
+                                       old_size - new_size);
+       } else if (new_size > old_size) {
+               old_size = round_down(old_size, KASAN_GRANULE_SIZE);
+               __kasan_unpoison_vmalloc(addr + old_size,
+                                       new_size - old_size,
+                                       KASAN_VMALLOC_PROT_NORMAL |
+                                       KASAN_VMALLOC_VM_ALLOC |
+                                       KASAN_VMALLOC_KEEP_TAG);
+       }
+}
 #endif
index 628f96e83b11878db05d9a8b4008c68fa15f6e20..e286c2d2068cbd4ad5735a970cf7b2ea2662db50 100644 (file)
@@ -4322,7 +4322,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
                if (want_init_on_free() || want_init_on_alloc(flags))
                        memset((void *)p + size, 0, old_size - size);
                vm->requested_size = size;
-               kasan_poison_vmalloc(p + size, old_size - size);
+               kasan_vrealloc(p, old_size, size);
                return (void *)p;
        }
 
@@ -4330,16 +4330,13 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
         * We already have the bytes available in the allocation; use them.
         */
        if (size <= alloced_size) {
-               kasan_unpoison_vmalloc(p + old_size, size - old_size,
-                                      KASAN_VMALLOC_PROT_NORMAL |
-                                      KASAN_VMALLOC_VM_ALLOC |
-                                      KASAN_VMALLOC_KEEP_TAG);
                /*
                 * No need to zero memory here, as unused memory will have
                 * already been zeroed at initial allocation time or during
                 * realloc shrink time.
                 */
                vm->requested_size = size;
+               kasan_vrealloc(p, old_size, size);
                return (void *)p;
        }