]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm, slab: clean up slab->obj_exts always
authorZhenhua Huang <quic_zhenhuah@quicinc.com>
Mon, 21 Apr 2025 07:52:32 +0000 (15:52 +0800)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 24 Apr 2025 17:19:40 +0000 (19:19 +0200)
When memory allocation profiling is disabled at runtime or due to an
error, shutdown_mem_profiling() is called: slab->obj_exts which
previously allocated remains.
It won't be cleared by unaccount_slab() because of
mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
should always be cleaned up in unaccount_slab() to avoid following error:

[...]BUG: Bad page state in process...
..
[...]page dumped because: page still charged to cgroup

[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions")
Cc: stable@vger.kernel.org
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index dc9e729e1d269b5d362cb5bc44f824640ffd00f3..be8b09e09d3043fb6986120addd5d02eb050c69b 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
        return 0;
 }
 
-/* Should be called only if mem_alloc_profiling_enabled() */
-static noinline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab)
 {
        struct slabobj_ext *obj_exts;
 
@@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab)
        slab->obj_exts = 0;
 }
 
-static inline bool need_slab_obj_ext(void)
-{
-       if (mem_alloc_profiling_enabled())
-               return true;
-
-       /*
-        * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
-        * inside memcg_slab_post_alloc_hook. No other users for now.
-        */
-       return false;
-}
-
 #else /* CONFIG_SLAB_OBJ_EXT */
 
 static inline void init_slab_obj_exts(struct slab *slab)
@@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
 {
 }
 
-static inline bool need_slab_obj_ext(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_SLAB_OBJ_EXT */
 
 #ifdef CONFIG_MEM_ALLOC_PROFILING
@@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
 static inline void
 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
 {
-       if (need_slab_obj_ext())
+       if (mem_alloc_profiling_enabled())
                __alloc_tagging_slab_alloc_hook(s, object, flags);
 }
 
@@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
 static __always_inline void unaccount_slab(struct slab *slab, int order,
                                           struct kmem_cache *s)
 {
-       if (memcg_kmem_online() || need_slab_obj_ext())
-               free_slab_obj_exts(slab);
+       /*
+        * The slab object extensions should now be freed regardless of
+        * whether mem_alloc_profiling_enabled() or not because profiling
+        * might have been disabled after slab->obj_exts got allocated.
+        */
+       free_slab_obj_exts(slab);
 
        mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
                            -(PAGE_SIZE << order));