]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: memcontrol: switch to native NR_VMALLOC vmstat counter
authorJohannes Weiner <hannes@cmpxchg.org>
Mon, 23 Feb 2026 16:01:07 +0000 (11:01 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:04 +0000 (13:53 -0700)
Eliminates the custom memcg counter and results in a single, consolidated
accounting call in vmalloc code.

Link: https://lkml.kernel.org/r/20260223160147.3792777-2-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/vmalloc.c

index 5695776f32c83cb524081ff7e5db1e8e092239ce..5173a9f1672128a2ee40f63b56cb01da3293d5fc 100644 (file)
@@ -35,7 +35,6 @@ enum memcg_stat_item {
        MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
        MEMCG_SOCK,
        MEMCG_PERCPU_B,
-       MEMCG_VMALLOC,
        MEMCG_KMEM,
        MEMCG_ZSWAP_B,
        MEMCG_ZSWAPPED,
index 75df24ffdf253833d2c616c338cfd26dcd5c0eaa..eb54cdf99624fa46c2f182e3975b65e0c5826714 100644 (file)
@@ -317,6 +317,7 @@ static const unsigned int memcg_node_stat_items[] = {
        NR_SHMEM_THPS,
        NR_FILE_THPS,
        NR_ANON_THPS,
+       NR_VMALLOC,
        NR_KERNEL_STACK_KB,
        NR_PAGETABLE,
        NR_SECONDARY_PAGETABLE,
@@ -352,7 +353,6 @@ static const unsigned int memcg_stat_items[] = {
        MEMCG_SWAP,
        MEMCG_SOCK,
        MEMCG_PERCPU_B,
-       MEMCG_VMALLOC,
        MEMCG_KMEM,
        MEMCG_ZSWAP_B,
        MEMCG_ZSWAPPED,
@@ -1364,7 +1364,7 @@ static const struct memory_stat memory_stats[] = {
        { "sec_pagetables",             NR_SECONDARY_PAGETABLE          },
        { "percpu",                     MEMCG_PERCPU_B                  },
        { "sock",                       MEMCG_SOCK                      },
-       { "vmalloc",                    MEMCG_VMALLOC                   },
+       { "vmalloc",                    NR_VMALLOC                      },
        { "shmem",                      NR_SHMEM                        },
 #ifdef CONFIG_ZSWAP
        { "zswap",                      MEMCG_ZSWAP_B                   },
index e9d7c2a8c753894de0defc894ebaccf4fe97ef8e..6dda97c3799e888c7801c32e6994707969e10d68 100644 (file)
@@ -3459,9 +3459,6 @@ void vfree(const void *addr)
 
        if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
                vm_reset_perms(vm);
-       /* All pages of vm should be charged to same memcg, so use first one. */
-       if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES))
-               mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages);
        for (i = 0; i < vm->nr_pages; i++) {
                struct page *page = vm->pages[i];
 
@@ -3471,7 +3468,7 @@ void vfree(const void *addr)
                 * can be freed as an array of order-0 allocations
                 */
                if (!(vm->flags & VM_MAP_PUT_PAGES))
-                       dec_node_page_state(page, NR_VMALLOC);
+                       mod_lruvec_page_state(page, NR_VMALLOC, -1);
                __free_page(page);
                cond_resched();
        }
@@ -3662,7 +3659,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        continue;
                }
 
-               mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << large_order);
+               mod_lruvec_page_state(page, NR_VMALLOC, 1 << large_order);
 
                split_page(page, large_order);
                for (i = 0; i < (1U << large_order); i++)
@@ -3709,7 +3706,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                                                        pages + nr_allocated);
 
                        for (i = nr_allocated; i < nr_allocated + nr; i++)
-                               inc_node_page_state(pages[i], NR_VMALLOC);
+                               mod_lruvec_page_state(pages[i], NR_VMALLOC, 1);
 
                        nr_allocated += nr;
 
@@ -3735,7 +3732,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                if (unlikely(!page))
                        break;
 
-               mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << order);
+               mod_lruvec_page_state(page, NR_VMALLOC, 1 << order);
 
                /*
                 * High-order allocations must be able to be treated as
@@ -3879,11 +3876,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                        vmalloc_gfp_adjust(gfp_mask, page_order), node,
                        page_order, nr_small_pages, area->pages);
 
-       /* All pages of vm should be charged to same memcg, so use first one. */
-       if (gfp_mask & __GFP_ACCOUNT && area->nr_pages)
-               mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
-                                    area->nr_pages);
-
        /*
         * If not enough pages were obtained to accomplish an
         * allocation request, free them via vfree() if any.