]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
slab: Remove folio references in memcg_slab_post_charge()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 13 Nov 2025 00:09:17 +0000 (00:09 +0000)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 13 Nov 2025 10:01:08 +0000 (11:01 +0100)
This allows us to skip the compound_head() call for large kmalloc
objects as the virt_to_page() call will always give us the head page
for the large kmalloc case.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20251113000932.1589073-4-willy@infradead.org
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index d4367f25b20db9f9917e743334b06b8965f9fab6..a7c0662f89c6dd4ba2633dc94e7968d1f2bbf15f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2369,33 +2369,34 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
 {
        struct slabobj_ext *slab_exts;
        struct kmem_cache *s;
-       struct folio *folio;
+       struct page *page;
        struct slab *slab;
        unsigned long off;
 
-       folio = virt_to_folio(p);
-       if (!folio_test_slab(folio)) {
+       page = virt_to_page(p);
+       if (PageLargeKmalloc(page)) {
+               unsigned int order;
                int size;
 
-               if (folio_memcg_kmem(folio))
+               if (PageMemcgKmem(page))
                        return true;
 
-               if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
-                                            folio_order(folio)))
+               order = large_kmalloc_order(page);
+               if (__memcg_kmem_charge_page(page, flags, order))
                        return false;
 
                /*
-                * This folio has already been accounted in the global stats but
+                * This page has already been accounted in the global stats but
                 * not in the memcg stats. So, subtract from the global and use
                 * the interface which adds to both global and memcg stats.
                 */
-               size = folio_size(folio);
-               node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
-               lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
+               size = PAGE_SIZE << order;
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
                return true;
        }
 
-       slab = folio_slab(folio);
+       slab = page_slab(page);
        s = slab->slab_cache;
 
        /*