From 934afdf7f4cc243c5b00352a0f8a54d2de283fe9 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (Microsoft)" Date: Sun, 11 Jan 2026 10:20:41 +0200 Subject: [PATCH] hexagon: introduce arch_zone_limits_init() Move calculations of zone limits to a dedicated arch_zone_limits_init() function. Later MM core will use this function as an architecture specific callback during nodes and zones initialization and thus there won't be a need to call free_area_init() from every architecture. Link: https://lkml.kernel.org/r/20260111082105.290734-8-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Alex Shi Cc: Andreas Larsson Cc: "Borislav Petkov (AMD)" Cc: Catalin Marinas Cc: David Hildenbrand Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Jonathan Corbet Cc: Klara Modin Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Magnus Lindholm Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Hocko Cc: Michal Simek Cc: Muchun Song Cc: Oscar Salvador Cc: Palmer Dabbelt Cc: Pratyush Yadav Cc: Richard Weinberger Cc: "Ritesh Harjani (IBM)" Cc: Russell King Cc: Stafford Horne Cc: Suren Baghdasaryan Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/hexagon/mm/init.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 34eb9d424b96b..e2c9487d8d347 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -54,6 +54,18 @@ void sync_icache_dcache(pte_t pte) __vmcache_idsync(addr, PAGE_SIZE); } +void __init arch_zone_limits_init(unsigned long *max_zone_pfns) +{ + /* + * This is not particularly well documented anywhere, but + * give ZONE_NORMAL all the memory, including the big holes + * left by the kernel+bootmem_map which are already left as reserved + * in the bootmem_map; free_area_init should see those bits and + * adjust accordingly. + */ + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; +} + /* * In order to set up page allocator "nodes", * somebody has to call free_area_init() for UMA. @@ -65,16 +77,7 @@ static void __init paging_init(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; - /* - * This is not particularly well documented anywhere, but - * give ZONE_NORMAL all the memory, including the big holes - * left by the kernel+bootmem_map which are already left as reserved - * in the bootmem_map; free_area_init should see those bits and - * adjust accordingly. - */ - - max_zone_pfn[ZONE_NORMAL] = max_low_pfn; - + arch_zone_limits_init(max_zone_pfn); free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */ /* -- 2.47.3