From: Mike Rapoport (Microsoft) Date: Wed, 11 Feb 2026 10:31:38 +0000 (+0200) Subject: mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=652d12bc74a075f345f228f8945e05517a38874d;p=thirdparty%2Fkernel%2Flinux.git mm: don't special case !MMU for is_zero_pfn() and my_zero_pfn() Patch series "arch, mm: consolidate empty_zero_page", v3. These patches cleanup handling of ZERO_PAGE() and zero_pfn. This patch (of 4): nommu architectures have empty_zero_page and define ZERO_PAGE() and although they don't really use it to populate page tables, there is no reason to hardwire !MMU implementation of is_zero_pfn() and my_zero_pfn() to 0. Drop #ifdef CONFIG_MMU around implementations of is_zero_pfn() and my_zero_pfn() and remove !MMU version. While on it, make zero_pfn __ro_after_init. Link: https://lkml.kernel.org/r/20260211103141.3215197-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20260211103141.3215197-2-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) Acked-by: David Hildenbrand (Arm) Acked-by: Liam R. Howlett Cc: Andreas Larsson Cc: "Borislav Petkov (AMD)" Cc: Catalin Marinas Cc: David S. Miller Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: Ingo Molnar Cc: Johannes Berg Cc: John Paul Adrian Glaubitz Cc: Lorenzo Stoakes Cc: Madhavan Srinivasan Cc: Magnus Lindholm Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Hocko Cc: Michal Simek Cc: Palmer Dabbelt Cc: Richard Weinberger Cc: Russell King Cc: Stafford Horne Cc: Suren Baghdasaryan Cc: Vineet Gupta Cc: Vlastimil Babka Cc: Will Deacon Cc: Christophe Leroy (CS GROUP) Cc: Dave Hansen Signed-off-by: Andrew Morton --- diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a50df42a893fb..5e772599d9a5c 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1917,7 +1917,6 @@ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot) pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot); } -#ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { @@ -1940,18 +1939,7 @@ static inline unsigned long my_zero_pfn(unsigned long addr) extern unsigned long zero_pfn; return zero_pfn; } -#endif -#else -static inline int is_zero_pfn(unsigned long pfn) -{ - return 0; -} - -static inline unsigned long my_zero_pfn(unsigned long addr) -{ - return 0; -} -#endif /* CONFIG_MMU */ +#endif /* __HAVE_COLOR_ZERO_PAGE */ #ifdef CONFIG_MMU diff --git a/mm/memory.c b/mm/memory.c index 7084c426f9338..6b504fc5e815f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -162,21 +162,8 @@ static int __init disable_randmaps(char *s) } __setup("norandmaps", disable_randmaps); -unsigned long zero_pfn __read_mostly; -EXPORT_SYMBOL(zero_pfn); - unsigned long highest_memmap_pfn __read_mostly; -/* - * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() - */ -static int __init init_zero_pfn(void) -{ - zero_pfn = page_to_pfn(ZERO_PAGE(0)); - return 0; -} -early_initcall(init_zero_pfn); - void mm_trace_rss_stat(struct mm_struct *mm, int member) { trace_rss_stat(mm, member); diff --git a/mm/mm_init.c b/mm/mm_init.c index df34797691bda..f3755a66b9d0f 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL(mem_map); void *high_memory; EXPORT_SYMBOL(high_memory); +unsigned long zero_pfn __ro_after_init; +EXPORT_SYMBOL(zero_pfn); + #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; @@ -2672,6 +2675,13 @@ static void __init mem_init_print_info(void) ); } +static int __init init_zero_pfn(void) +{ + zero_pfn = page_to_pfn(ZERO_PAGE(0)); + return 0; +} +early_initcall(init_zero_pfn); + void __init __weak arch_mm_preinit(void) { }