]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memcg: Convert mem_cgroup_from_obj_folio() to mem_cgroup_from_obj_slab()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 13 Nov 2025 00:09:28 +0000 (00:09 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 13 Nov 2025 19:23:09 +0000 (20:23 +0100)
In preparation for splitting struct slab from struct page and struct
folio, convert the pointer to a slab rather than a folio.  This means
we can end up passing a NULL slab pointer to mem_cgroup_from_obj_slab()
if the pointer is not to a page allocated to slab, and we handle that
appropriately by returning NULL.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: cgroups@vger.kernel.org
Link: https://patch.msgid.link/20251113000932.1589073-15-willy@infradead.org
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/memcontrol.c

index 4deda33625f41aa81db0276454f56b826a033146..b46356da6c0e5522471f4f9cf0d0c9657be95877 100644 (file)
@@ -2557,38 +2557,25 @@ static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
 }
 
 static __always_inline
-struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
+struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
 {
        /*
         * Slab objects are accounted individually, not per-page.
         * Memcg membership data for each individual object is saved in
         * slab->obj_exts.
         */
-       if (folio_test_slab(folio)) {
-               struct slabobj_ext *obj_exts;
-               struct slab *slab;
-               unsigned int off;
-
-               slab = folio_slab(folio);
-               obj_exts = slab_obj_exts(slab);
-               if (!obj_exts)
-                       return NULL;
-
-               off = obj_to_index(slab->slab_cache, slab, p);
-               if (obj_exts[off].objcg)
-                       return obj_cgroup_memcg(obj_exts[off].objcg);
+       struct slabobj_ext *obj_exts;
+       unsigned int off;
 
+       obj_exts = slab_obj_exts(slab);
+       if (!obj_exts)
                return NULL;
-       }
 
-       /*
-        * folio_memcg_check() is used here, because in theory we can encounter
-        * a folio where the slab flag has been cleared already, but
-        * slab->obj_exts has not been freed yet
-        * folio_memcg_check() will guarantee that a proper memory
-        * cgroup pointer or NULL will be returned.
-        */
-       return folio_memcg_check(folio);
+       off = obj_to_index(slab->slab_cache, slab, p);
+       if (obj_exts[off].objcg)
+               return obj_cgroup_memcg(obj_exts[off].objcg);
+
+       return NULL;
 }
 
 /*
@@ -2602,10 +2589,15 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
  */
 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
 {
+       struct slab *slab;
+
        if (mem_cgroup_disabled())
                return NULL;
 
-       return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
+       slab = virt_to_slab(p);
+       if (slab)
+               return mem_cgroup_from_obj_slab(slab, p);
+       return folio_memcg_check(virt_to_folio(p));
 }
 
 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)