NR_SHMEM_THPS,
NR_FILE_THPS,
NR_ANON_THPS,
+ NR_VMALLOC,
NR_KERNEL_STACK_KB,
NR_PAGETABLE,
NR_SECONDARY_PAGETABLE,
MEMCG_SWAP,
MEMCG_SOCK,
MEMCG_PERCPU_B,
- MEMCG_VMALLOC,
MEMCG_KMEM,
MEMCG_ZSWAP_B,
MEMCG_ZSWAPPED,
{ "sec_pagetables", NR_SECONDARY_PAGETABLE },
{ "percpu", MEMCG_PERCPU_B },
{ "sock", MEMCG_SOCK },
- { "vmalloc", MEMCG_VMALLOC },
+ { "vmalloc", NR_VMALLOC },
{ "shmem", NR_SHMEM },
#ifdef CONFIG_ZSWAP
{ "zswap", MEMCG_ZSWAP_B },
if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
- /* All pages of vm should be charged to same memcg, so use first one. */
- if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES))
- mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages);
for (i = 0; i < vm->nr_pages; i++) {
struct page *page = vm->pages[i];
* can be freed as an array of order-0 allocations
*/
if (!(vm->flags & VM_MAP_PUT_PAGES))
- dec_node_page_state(page, NR_VMALLOC);
+ mod_lruvec_page_state(page, NR_VMALLOC, -1);
__free_page(page);
cond_resched();
}
continue;
}
- mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << large_order);
+ mod_lruvec_page_state(page, NR_VMALLOC, 1 << large_order);
split_page(page, large_order);
for (i = 0; i < (1U << large_order); i++)
pages + nr_allocated);
for (i = nr_allocated; i < nr_allocated + nr; i++)
- inc_node_page_state(pages[i], NR_VMALLOC);
+ mod_lruvec_page_state(pages[i], NR_VMALLOC, 1);
nr_allocated += nr;
if (unlikely(!page))
break;
- mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << order);
+ mod_lruvec_page_state(page, NR_VMALLOC, 1 << order);
/*
* High-order allocations must be able to be treated as
vmalloc_gfp_adjust(gfp_mask, page_order), node,
page_order, nr_small_pages, area->pages);
- /* All pages of vm should be charged to same memcg, so use first one. */
- if (gfp_mask & __GFP_ACCOUNT && area->nr_pages)
- mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
- area->nr_pages);
-
/*
* If not enough pages were obtained to accomplish an
* allocation request, free them via vfree() if any.