On architectures that keep memblock after boot, freeing of reserved memory
with free_reserved_area() is paired with an update of memblock arrays,
usually by a call to memblock_free().
Make free_reserved_area() directly update memblock.reserved when
ARCH_KEEP_MEMBLOCK is enabled.
Remove the now-redundant explicit memblock_free() call from
arm64::free_initmem() and the #ifdef CONFIG_ARCH_KEEP_MEMBLOCK block
from the generic free_initrd_mem().
Link: https://patch.msgid.link/20260323074836.3653702-8-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
- /* Delete __init region from memblock.reserved. */
- memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
-
free_reserved_area(lm_init_begin, lm_init_end,
POISON_FREE_INITMEM, "unused kernel");
/*
void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
{
-#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
- unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
- unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
-
- memblock_free((void *)aligned_start, aligned_end - aligned_start);
-#endif
-
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
end_pa = __pa(end - 1) + 1;
}
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
+ if (start_pa < end_pa)
+ memblock_remove_range(&memblock.reserved,
+ start_pa, end_pa - start_pa);
+ }
+
pages = __free_reserved_area(start_pa, end_pa, poison);
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));