show_val_kb(m, "Committed_AS: ", committed);
seq_printf(m, "VmallocTotal: %8lu kB\n",
(unsigned long)VMALLOC_TOTAL >> 10);
- show_val_kb(m, "VmallocUsed: ", vmalloc_nr_pages());
+ show_val_kb(m, "VmallocUsed: ",
+ global_node_page_state(NR_VMALLOC));
show_val_kb(m, "VmallocChunk: ", 0ul);
show_val_kb(m, "Percpu: ", pcpu_nr_pages());
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
+ NR_VMALLOC,
NR_KERNEL_STACK_KB, /* measured in KiB */
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_KB, /* measured in KiB */
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-unsigned long vmalloc_nr_pages(void);
-
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
#else /* !CONFIG_MMU */
#define VMALLOC_TOTAL 0UL
-static inline unsigned long vmalloc_nr_pages(void) { return 0; }
static inline void set_vm_flush_reset_perms(void *addr) {}
#endif /* CONFIG_MMU */
static void drain_vmap_area_work(struct work_struct *work);
static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
-static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages;
static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr;
-unsigned long vmalloc_nr_pages(void)
-{
- return atomic_long_read(&nr_vmalloc_pages);
-}
-
static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
struct rb_node *n = root->rb_node;
* High-order allocs for huge vmallocs are split, so
* can be freed as an array of order-0 allocations
*/
+ if (!(vm->flags & VM_MAP_PUT_PAGES))
+ dec_node_page_state(page, NR_VMALLOC);
__free_page(page);
cond_resched();
}
- if (!(vm->flags & VM_MAP_PUT_PAGES))
- atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
kvfree(vm->pages);
kfree(vm);
}
continue;
}
+ mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << large_order);
+
split_page(page, large_order);
for (i = 0; i < (1U << large_order); i++)
pages[nr_allocated + i] = page + i;
if (!order) {
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
+ int i;
/*
* A maximum allowed request is hard-coded and is 100
nr_pages_request,
pages + nr_allocated);
+ for (i = nr_allocated; i < nr_allocated + nr; i++)
+ inc_node_page_state(pages[i], NR_VMALLOC);
+
nr_allocated += nr;
/*
if (unlikely(!page))
break;
+ mod_node_page_state(page_pgdat(page), NR_VMALLOC, 1 << order);
+
/*
* High-order allocations must be able to be treated as
* independent small pages by callers (as they can with
vmalloc_gfp_adjust(gfp_mask, page_order), node,
page_order, nr_small_pages, area->pages);
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
/* All pages of vm should be charged to same memcg, so use first one. */
if (gfp_mask & __GFP_ACCOUNT && area->nr_pages)
mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC,
[I(NR_KERNEL_MISC_RECLAIMABLE)] = "nr_kernel_misc_reclaimable",
[I(NR_FOLL_PIN_ACQUIRED)] = "nr_foll_pin_acquired",
[I(NR_FOLL_PIN_RELEASED)] = "nr_foll_pin_released",
+ [I(NR_VMALLOC)] = "nr_vmalloc",
[I(NR_KERNEL_STACK_KB)] = "nr_kernel_stack",
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
[I(NR_KERNEL_SCS_KB)] = "nr_shadow_call_stack",