From: Mike Rapoport (Microsoft) Date: Mon, 23 Mar 2026 07:48:34 +0000 (+0200) Subject: memblock: make free_reserved_area() update memblock if ARCH_KEEP_MEMBLOCK=y X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b2129a39511b71b5ed0ae923d6eebd9398c6184e;p=thirdparty%2Flinux.git memblock: make free_reserved_area() update memblock if ARCH_KEEP_MEMBLOCK=y On architectures that keep memblock after boot, freeing of reserved memory with free_reserved_area() is paired with an update of memblock arrays, usually by a call to memblock_free(). Make free_reserved_area() directly update memblock.reserved when ARCH_KEEP_MEMBLOCK is enabled. Remove the now-redundant explicit memblock_free() call from arm64::free_initmem() and the #ifdef CONFIG_ARCH_KEEP_MEMBLOCK block from the generic free_initrd_mem(). Link: https://patch.msgid.link/20260323074836.3653702-8-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) --- diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 96711b8578fd0..07b17c7087029 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -385,9 +385,6 @@ void free_initmem(void) WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE)); WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE)); - /* Delete __init region from memblock.reserved. */ - memblock_free(lm_init_begin, lm_init_end - lm_init_begin); - free_reserved_area(lm_init_begin, lm_init_end, POISON_FREE_INITMEM, "unused kernel"); /* diff --git a/init/initramfs.c b/init/initramfs.c index 139baed06589a..bca0922b28508 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -652,13 +652,6 @@ disable: void __weak __init free_initrd_mem(unsigned long start, unsigned long end) { -#ifdef CONFIG_ARCH_KEEP_MEMBLOCK - unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); - unsigned long aligned_end = ALIGN(end, PAGE_SIZE); - - memblock_free((void *)aligned_start, aligned_end - aligned_start); -#endif - free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, "initrd"); } diff --git a/mm/memblock.c b/mm/memblock.c index 68a72bd4c8bd6..dee18c40d9289 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -943,6 +943,12 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char end_pa = __pa(end - 1) + 1; } + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { + if (start_pa < end_pa) + memblock_remove_range(&memblock.reserved, + start_pa, end_pa - start_pa); + } + pages = __free_reserved_area(start_pa, end_pa, poison); if (pages && s) pr_info("Freeing %s memory: %ldK\n", s, K(pages));