From: Harry Yoo Date: Tue, 13 Jan 2026 06:18:40 +0000 (+0900) Subject: mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=52f1ca8a459a73cf423a0b71b59f0b950e522cab;p=thirdparty%2Fkernel%2Flinux.git mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper Currently, the slab allocator assumes that slab->obj_exts is a pointer to an array of struct slabobj_ext objects. However, to support storage methods where struct slabobj_ext is embedded within objects, the slab allocator should not make this assumption. Instead of directly dereferencing the slabobj_exts array, abstract access to struct slabobj_ext via helper functions. Introduce a new API slabobj_ext metadata access: slab_obj_ext(slab, obj_exts, index) - returns the pointer to struct slabobj_ext element at the given index. Directly dereferencing the return value of slab_obj_exts() is no longer allowed. Instead, slab_obj_ext() must always be used to access individual struct slabobj_ext objects. Convert all users to use these APIs. No functional changes intended. Signed-off-by: Harry Yoo Link: https://patch.msgid.link/20260113061845.159790-5-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka --- diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 86f43b7e5f71..276e87090a75 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2596,7 +2596,8 @@ struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p) * Memcg membership data for each individual object is saved in * slab->obj_exts. */ - struct slabobj_ext *obj_exts; + unsigned long obj_exts; + struct slabobj_ext *obj_ext; unsigned int off; obj_exts = slab_obj_exts(slab); @@ -2604,8 +2605,9 @@ struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p) return NULL; off = obj_to_index(slab->slab_cache, slab, p); - if (obj_exts[off].objcg) - return obj_cgroup_memcg(obj_exts[off].objcg); + obj_ext = slab_obj_ext(slab, obj_exts, off); + if (obj_ext->objcg) + return obj_cgroup_memcg(obj_ext->objcg); return NULL; } @@ -3191,6 +3193,9 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, } for (i = 0; i < size; i++) { + unsigned long obj_exts; + struct slabobj_ext *obj_ext; + slab = virt_to_slab(p[i]); if (!slab_obj_exts(slab) && @@ -3213,29 +3218,33 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, slab_pgdat(slab), cache_vmstat_idx(s))) return false; + obj_exts = slab_obj_exts(slab); off = obj_to_index(s, slab, p[i]); + obj_ext = slab_obj_ext(slab, obj_exts, off); obj_cgroup_get(objcg); - slab_obj_exts(slab)[off].objcg = objcg; + obj_ext->objcg = objcg; } return true; } void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, struct slabobj_ext *obj_exts) + void **p, int objects, unsigned long obj_exts) { size_t obj_size = obj_full_size(s); for (int i = 0; i < objects; i++) { struct obj_cgroup *objcg; + struct slabobj_ext *obj_ext; unsigned int off; off = obj_to_index(s, slab, p[i]); - objcg = obj_exts[off].objcg; + obj_ext = slab_obj_ext(slab, obj_exts, off); + objcg = obj_ext->objcg; if (!objcg) continue; - obj_exts[off].objcg = NULL; + obj_ext->objcg = NULL; refill_obj_stock(objcg, obj_size, true, -obj_size, slab_pgdat(slab), cache_vmstat_idx(s)); obj_cgroup_put(objcg); diff --git a/mm/slab.h b/mm/slab.h index 0993800fcced..4935602b3fce 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -507,10 +507,12 @@ static inline bool slab_in_kunit_test(void) { return false; } * associated with a slab. * @slab: a pointer to the slab struct * - * Returns a pointer to the object extension vector associated with the slab, - * or NULL if no such vector has been associated yet. + * Returns the address of the object extension vector associated with the slab, + * or zero if no such vector has been associated yet. + * Do not dereference the return value directly; use slab_obj_ext() to access + * its elements. */ -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) +static inline unsigned long slab_obj_exts(struct slab *slab) { unsigned long obj_exts = READ_ONCE(slab->obj_exts); @@ -523,7 +525,29 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab)); VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); #endif - return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); + + return obj_exts & ~OBJEXTS_FLAGS_MASK; +} + +/* + * slab_obj_ext - get the pointer to the slab object extension metadata + * associated with an object in a slab. + * @slab: a pointer to the slab struct + * @obj_exts: a pointer to the object extension vector + * @index: an index of the object + * + * Returns a pointer to the object extension associated with the object. + */ +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, + unsigned long obj_exts, + unsigned int index) +{ + struct slabobj_ext *obj_ext; + + VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); + + obj_ext = (struct slabobj_ext *)obj_exts; + return &obj_ext[index]; } int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, @@ -531,7 +555,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, #else /* CONFIG_SLAB_OBJ_EXT */ -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) +static inline unsigned long slab_obj_exts(struct slab *slab) +{ + return 0; +} + +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, + unsigned long obj_exts, + unsigned int index) { return NULL; } @@ -548,7 +579,7 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, gfp_t flags, size_t size, void **p); void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, struct slabobj_ext *obj_exts); + void **p, int objects, unsigned long obj_exts); #endif void kvfree_rcu_cb(struct rcu_head *head); diff --git a/mm/slub.c b/mm/slub.c index 1b7ed91a2f15..09af619c2c69 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2055,7 +2055,7 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) { - struct slabobj_ext *slab_exts; + unsigned long slab_exts; struct slab *obj_exts_slab; obj_exts_slab = virt_to_slab(obj_exts); @@ -2063,13 +2063,15 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) if (slab_exts) { unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, obj_exts_slab, obj_exts); + struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab, + slab_exts, offs); - if (unlikely(is_codetag_empty(&slab_exts[offs].ref))) + if (unlikely(is_codetag_empty(&ext->ref))) return; /* codetag should be NULL here */ - WARN_ON(slab_exts[offs].ref.ct); - set_codetag_empty(&slab_exts[offs].ref); + WARN_ON(ext->ref.ct); + set_codetag_empty(&ext->ref); } } @@ -2237,7 +2239,7 @@ static inline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts; - obj_exts = slab_obj_exts(slab); + obj_exts = (struct slabobj_ext *)slab_obj_exts(slab); if (!obj_exts) { /* * If obj_exts allocation failed, slab->obj_exts is set to @@ -2284,26 +2286,29 @@ static inline void free_slab_obj_exts(struct slab *slab) #ifdef CONFIG_MEM_ALLOC_PROFILING static inline struct slabobj_ext * -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) +prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p) { struct slab *slab; + unsigned long obj_exts; slab = virt_to_slab(p); - if (!slab_obj_exts(slab) && + obj_exts = slab_obj_exts(slab); + if (!obj_exts && alloc_slab_obj_exts(slab, s, flags, false)) { pr_warn_once("%s, %s: Failed to create slab extension vector!\n", __func__, s->name); return NULL; } - return slab_obj_exts(slab) + obj_to_index(s, slab, p); + obj_exts = slab_obj_exts(slab); + return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p)); } /* Should be called only if mem_alloc_profiling_enabled() */ static noinline void __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - struct slabobj_ext *obj_exts; + struct slabobj_ext *obj_ext; if (!object) return; @@ -2314,14 +2319,14 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) if (flags & __GFP_NO_OBJ_EXT) return; - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + obj_ext = prepare_slab_obj_ext_hook(s, flags, object); /* * Currently obj_exts is used only for allocation profiling. * If other users appear then mem_alloc_profiling_enabled() * check should be added before alloc_tag_add(). */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); + if (likely(obj_ext)) + alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size); else alloc_tag_set_inaccurate(current->alloc_tag); } @@ -2338,8 +2343,8 @@ static noinline void __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct slabobj_ext *obj_exts; int i; + unsigned long obj_exts; /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) @@ -2352,7 +2357,7 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p for (i = 0; i < objects; i++) { unsigned int off = obj_to_index(s, slab, p[i]); - alloc_tag_sub(&obj_exts[off].ref, s->size); + alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size); } } @@ -2411,7 +2416,7 @@ static __fastpath_inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct slabobj_ext *obj_exts; + unsigned long obj_exts; if (!memcg_kmem_online()) return; @@ -2426,7 +2431,8 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, static __fastpath_inline bool memcg_slab_post_charge(void *p, gfp_t flags) { - struct slabobj_ext *slab_exts; + unsigned long obj_exts; + struct slabobj_ext *obj_ext; struct kmem_cache *s; struct page *page; struct slab *slab; @@ -2467,10 +2473,11 @@ bool memcg_slab_post_charge(void *p, gfp_t flags) return true; /* Ignore already charged objects. */ - slab_exts = slab_obj_exts(slab); - if (slab_exts) { + obj_exts = slab_obj_exts(slab); + if (obj_exts) { off = obj_to_index(s, slab, p); - if (unlikely(slab_exts[off].objcg)) + obj_ext = slab_obj_ext(slab, obj_exts, off); + if (unlikely(obj_ext->objcg)) return true; }