From: Hao Ge Date: Wed, 18 Jun 2025 01:58:09 +0000 (+0800) Subject: mm/percpu: conditionally define _shared_alloc_tag via CONFIG_ARCH_MODULE_NEEDS_WEAK_P... X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=59b5ed409d03bc8b7bb153d78afcd7cea9d7bbfa;p=thirdparty%2Fkernel%2Flinux.git mm/percpu: conditionally define _shared_alloc_tag via CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU Recently discovered this entry while checking kallsyms on ARM64: ffff800083e509c0 D _shared_alloc_tag If ARCH_NEEDS_WEAK_PER_CPU is not defined(it is only defined for s390 and alpha architectures), there's no need to statically define the percpu variable _shared_alloc_tag. Therefore, we need to implement isolation for this purpose. When building the core kernel code for s390 or alpha architectures, ARCH_NEEDS_WEAK_PER_CPU remains undefined (as it is gated by #if defined(MODULE)). However, when building modules for these architectures, the macro is explicitly defined. Therefore, we remove all instances of ARCH_NEEDS_WEAK_PER_CPU from the code and introduced CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU to replace the relevant logic. We can now conditionally define the perpcu variable _shared_alloc_tag based on CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU. This allows architectures (such as s390/alpha) that require weak definitions for percpu variables in modules to include the definition, while others can omit it via compile-time exclusion. Link: https://lkml.kernel.org/r/20250618015809.1235761-1-hao.ge@linux.dev Signed-off-by: Hao Ge Suggested-by: Suren Baghdasaryan Acked-by: Alexander Gordeev [s390] Acked-by: Mike Rapoport (Microsoft) Cc: Chistoph Lameter Cc: Christian Borntraeger Cc: David Hildenbrand Cc: Dennis Zhou Cc: Heiko Carstens Cc: Kent Overstreet Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matt Turner Cc: Richard Henderson Cc: Sven Schnelle Cc: Tejun Heo Cc: Vasily Gorbik Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 109a4cddcd138..80367f2cf821c 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -7,6 +7,7 @@ config ALPHA select ARCH_HAS_DMA_OPS if PCI select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_MODULE_NEEDS_WEAK_PER_CPU if SMP select ARCH_NO_PREEMPT select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 6923249f2d49c..4383d66341dca 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -9,10 +9,9 @@ * way above 4G. * * Always use weak definitions for percpu variables in modules. + * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU + * in the Kconfig. */ -#if defined(MODULE) && defined(CONFIG_SMP) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif #include diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0c16dc443e2f6..b652cb952f318 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -132,6 +132,7 @@ config S390 select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE + select ARCH_MODULE_NEEDS_WEAK_PER_CPU select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 84f6b8357b453..96af7d9640142 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -16,10 +16,9 @@ * For 64 bit module code, the module may be more than 4G above the * per cpu area, use weak definitions to force the compiler to * generate external references. + * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU + * in the Kconfig. */ -#if defined(MODULE) -#define ARCH_NEEDS_WEAK_PER_CPU -#endif /* * We use a compare-and-swap loop since that uses less cpu cycles than diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 8f7931eb7d164..9ef2633e2c08c 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -88,7 +88,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) return container_of(ct, struct alloc_tag, ct); } -#ifdef ARCH_NEEDS_WEAK_PER_CPU +#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE) /* * When percpu variables are required to be defined as weak, static percpu * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). @@ -102,7 +102,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); .ct = CODE_TAG_INIT, \ .counters = &_shared_alloc_tag }; -#else /* ARCH_NEEDS_WEAK_PER_CPU */ +#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */ #ifdef MODULE @@ -123,7 +123,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #endif /* MODULE */ -#endif /* ARCH_NEEDS_WEAK_PER_CPU */ +#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */ DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index c16cdeaa505e6..12d90360f6db9 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -63,14 +63,15 @@ * 1. The symbol must be globally unique, even the static ones. * 2. Static percpu variables cannot be defined inside a function. * - * Archs which need weak percpu definitions should define - * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. + * Archs which need weak percpu definitions should set + * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary. * * To ensure that the generic code observes the above two * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak * definition is used for all cases. */ -#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) +#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \ + defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) /* * __pcpu_scope_* dummy variable is used to enforce scope. It * receives the static modifier when it's used in front of diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 36f07dc950699..41ccfb035b7b4 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -25,8 +25,10 @@ static bool mem_profiling_support; static struct codetag_type *alloc_tag_cttype; +#ifdef CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); EXPORT_SYMBOL(_shared_alloc_tag); +#endif DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); diff --git a/mm/Kconfig b/mm/Kconfig index 3b2060a61c05d..065b1f19dd991 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -933,6 +933,13 @@ config ARCH_SUPPORTS_PUD_PFNMAP def_bool y depends on ARCH_SUPPORTS_HUGE_PFNMAP && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +# +# Architectures that always use weak definitions for percpu +# variables in modules should set this. +# +config ARCH_MODULE_NEEDS_WEAK_PER_CPU + bool + # # UP and nommu archs use km based percpu allocator #