From: Ritesh Harjani (IBM) Date: Thu, 17 Jul 2025 23:25:13 +0000 (-0700) Subject: powerpc/mm/book3s64: Move kfence and debug_pagealloc related calls to __init section X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=645d1b666498ef0d2c44c434a609b5560e9dc401;p=thirdparty%2Flinux.git powerpc/mm/book3s64: Move kfence and debug_pagealloc related calls to __init section Move a few kfence and debug_pagealloc related functions in hash_utils.c and radix_pgtable.c to __init sections since these are only invoked once by an __init function during system initialization. i.e. - hash_debug_pagealloc_alloc_slots() - hash_kfence_alloc_pool() - hash_kfence_map_pool() The above 3 functions only gets called by __init htab_initialize(). - alloc_kfence_pool() - map_kfence_pool() The above 2 functions only gets called by __init radix_init_pgtable() This should also help fix warning msgs like: >> WARNING: modpost: vmlinux: section mismatch in reference: hash_debug_pagealloc_alloc_slots+0xb0 (section: .text) -> memblock_alloc_try_nid (section: .init.text) Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202504190552.mnFGs5sj-lkp@intel.com/ Signed-off-by: Ritesh Harjani (IBM) Link: https://lore.kernel.org/r/20250717232519.2984886-8-kees@kernel.org Signed-off-by: Kees Cook --- diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 5158aefe48739..4693c464fc5af 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -343,7 +343,7 @@ static inline bool hash_supports_debug_pagealloc(void) static u8 *linear_map_hash_slots; static unsigned long linear_map_hash_count; static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); -static void hash_debug_pagealloc_alloc_slots(void) +static __init void hash_debug_pagealloc_alloc_slots(void) { if (!hash_supports_debug_pagealloc()) return; @@ -409,7 +409,7 @@ static DEFINE_RAW_SPINLOCK(linear_map_kf_hash_lock); static phys_addr_t kfence_pool; -static inline void hash_kfence_alloc_pool(void) +static __init void hash_kfence_alloc_pool(void) { if (!kfence_early_init_enabled()) goto err; @@ -445,7 +445,7 @@ err: disable_kfence(); } -static inline void hash_kfence_map_pool(void) +static __init void hash_kfence_map_pool(void) { unsigned long kfence_pool_start, kfence_pool_end; unsigned long prot = pgprot_val(PAGE_KERNEL); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 9f764bc42b8cc..376dba5992f27 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -363,7 +363,7 @@ static int __meminit create_physical_mapping(unsigned long start, } #ifdef CONFIG_KFENCE -static inline phys_addr_t alloc_kfence_pool(void) +static __init phys_addr_t alloc_kfence_pool(void) { phys_addr_t kfence_pool; @@ -393,7 +393,7 @@ no_kfence: return 0; } -static inline void map_kfence_pool(phys_addr_t kfence_pool) +static __init void map_kfence_pool(phys_addr_t kfence_pool) { if (!kfence_pool) return;