}
static __always_inline
-struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
+struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
{
/*
* Slab objects are accounted individually, not per-page.
* Memcg membership data for each individual object is saved in
* slab->obj_exts.
*/
- if (folio_test_slab(folio)) {
- struct slabobj_ext *obj_exts;
- struct slab *slab;
- unsigned int off;
-
- slab = folio_slab(folio);
- obj_exts = slab_obj_exts(slab);
- if (!obj_exts)
- return NULL;
-
- off = obj_to_index(slab->slab_cache, slab, p);
- if (obj_exts[off].objcg)
- return obj_cgroup_memcg(obj_exts[off].objcg);
+ struct slabobj_ext *obj_exts;
+ unsigned int off;
+ obj_exts = slab_obj_exts(slab);
+ if (!obj_exts)
return NULL;
- }
- /*
- * folio_memcg_check() is used here, because in theory we can encounter
- * a folio where the slab flag has been cleared already, but
- * slab->obj_exts has not been freed yet
- * folio_memcg_check() will guarantee that a proper memory
- * cgroup pointer or NULL will be returned.
- */
- return folio_memcg_check(folio);
+ off = obj_to_index(slab->slab_cache, slab, p);
+ if (obj_exts[off].objcg)
+ return obj_cgroup_memcg(obj_exts[off].objcg);
+
+ return NULL;
}
/*
*/
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
+ struct slab *slab;
+
if (mem_cgroup_disabled())
return NULL;
- return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
+ slab = virt_to_slab(p);
+ if (slab)
+ return mem_cgroup_from_obj_slab(slab, p);
+ return folio_memcg_check(virt_to_folio(p));
}
static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)