]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
arm64/mm: use lm_alias() with addresses passed to memblock_free()
authorJoey Gouly <joey.gouly@arm.com>
Thu, 5 Sep 2024 15:29:35 +0000 (16:29 +0100)
committerWill Deacon <will@kernel.org>
Fri, 6 Sep 2024 11:29:30 +0000 (12:29 +0100)
The pointer argument to memblock_free() needs to be a linear map address, but
in mem_init() we pass __init_begin/__init_end, which is a kernel image address.

This results in warnings when building with CONFIG_DEBUG_VIRTUAL=y:

    virt_to_phys used for non-linear address: ffff800081270000 (set_reset_devices+0x0/0x10)
    WARNING: CPU: 0 PID: 1 at arch/arm64/mm/physaddr.c:12 __virt_to_phys+0x54/0x70
    Modules linked in:
    CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.11.0-rc6-next-20240905 #5810 b1ebb0ad06653f35ce875413d5afad24668df3f3
    Hardware name: FVP Base RevC (DT)
    pstate: 2161402005 (nZCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--)
    pc : __virt_to_phys+0x54/0x70
    lr : __virt_to_phys+0x54/0x70
    sp : ffff80008169be20
    ...
    Call trace:
     __virt_to_phys+0x54/0x70
     memblock_free+0x18/0x30
     free_initmem+0x3c/0x9c
     kernel_init+0x30/0x1cc
     ret_from_fork+0x10/0x20

Fix this by having mem_init() convert the pointers via lm_alias().

Fixes: 1db9716d4487 ("arm64/mm: Delete __init region from memblock.reserved")
Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Rong Qianfeng <rongqianfeng@vivo.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20240905152935.4156469-1-joey.gouly@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/mm/init.c

index aea834a9691a3918037adceca7bacd6024d00328..a0400b9aa814a93a523989fe3424f738328e5756 100644 (file)
@@ -414,14 +414,16 @@ void __init mem_init(void)
 
 void free_initmem(void)
 {
-       unsigned long aligned_begin = ALIGN_DOWN((u64)__init_begin, PAGE_SIZE);
-       unsigned long aligned_end = ALIGN((u64)__init_end, PAGE_SIZE);
+       void *lm_init_begin = lm_alias(__init_begin);
+       void *lm_init_end = lm_alias(__init_end);
+
+       WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
+       WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
 
        /* Delete __init region from memblock.reserved. */
-       memblock_free((void *)aligned_begin, aligned_end - aligned_begin);
+       memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
 
-       free_reserved_area(lm_alias(__init_begin),
-                          lm_alias(__init_end),
+       free_reserved_area(lm_init_begin, lm_init_end,
                           POISON_FREE_INITMEM, "unused kernel");
        /*
         * Unmap the __init region but leave the VM area in place. This