]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arm64: mm: Fix incomplete tag reset in change_memory_common()
authorJiayuan Chen <jiayuan.chen@shopee.com>
Sun, 4 Jan 2026 12:35:27 +0000 (20:35 +0800)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 8 Jan 2026 19:47:59 +0000 (19:47 +0000)
Running KASAN KUnit tests with {HW,SW}_TAGS mode triggers a fault in
change_memory_common():

  Call trace:
   change_memory_common+0x168/0x210 (P)
   set_memory_ro+0x20/0x48
   vmalloc_helpers_tags+0xe8/0x338
   kunit_try_run_case+0x74/0x188
   kunit_generic_run_threadfn_adapter+0x30/0x70
   kthread+0x11c/0x200
   ret_from_fork+0x10/0x20
  ---[ end trace 0000000000000000 ]---
      # vmalloc_helpers_tags: try faulted
      not ok 67 vmalloc_helpers_tags

Commit a06494adb7ef ("arm64: mm: use untagged address to calculate page index")
fixed a KASAN warning in the BPF subsystem by adding kasan_reset_tag() to
the index calculation. In the execmem flow:

    bpf_prog_pack_alloc()
      -> bpf_jit_alloc_exec()
        -> execmem_alloc()

The returned address from execmem_vmalloc/execmem_cache_alloc is passed
through kasan_reset_tag(), so start has no tag while area->addr still
retains the original tag. The fix correctly handled this case by resetting
the tag on area->addr:

    (start - (unsigned long)kasan_reset_tag(area->addr)) >> PAGE_SHIFT

However, in normal vmalloc paths, both start and area->addr have matching
tags(or no tags). Resetting only area->addr causes a mismatch when
subtracting a tagged address from an untagged one, resulting in an
incorrect index.

Fix this by resetting tags on both addresses in the index calculation.
This ensures correct results regardless of the tag state of either address.

Tested with KASAN KUnit tests under CONFIG_KASAN_GENERIC,
CONFIG_KASAN_SW_TAGS, and CONFIG_KASAN_HW_TAGS - all pass. Also verified
the original BPF KASAN warning from [1] is still fixed.

[1] https://lore.kernel.org/all/20251118164115.GA3977565@ax162/

Fixes: a06494adb7ef ("arm64: mm: use untagged address to calculate page index")
Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/pageattr.c

index f0e784b963e6909e678c5e347c5084f1178c6e50..7176ff39cb8796dff135686205263d1e28146286 100644 (file)
@@ -171,7 +171,8 @@ static int change_memory_common(unsigned long addr, int numpages,
         */
        area = find_vm_area((void *)addr);
        if (!area ||
-           end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
+           ((unsigned long)kasan_reset_tag((void *)end) >
+            (unsigned long)kasan_reset_tag(area->addr) + area->size) ||
            ((area->flags & (VM_ALLOC | VM_ALLOW_HUGE_VMAP)) != VM_ALLOC))
                return -EINVAL;
 
@@ -184,7 +185,8 @@ static int change_memory_common(unsigned long addr, int numpages,
         */
        if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
                            pgprot_val(clear_mask) == PTE_RDONLY)) {
-               unsigned long idx = (start - (unsigned long)kasan_reset_tag(area->addr))
+               unsigned long idx = ((unsigned long)kasan_reset_tag((void *)start) -
+                                    (unsigned long)kasan_reset_tag(area->addr))
                                    >> PAGE_SHIFT;
                for (; numpages; idx++, numpages--) {
                        ret = __change_memory_common((u64)page_address(area->pages[idx]),