1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
12 extern int sysctl_stat_interval
;
15 #define ENABLE_NUMA_STAT 1
16 #define DISABLE_NUMA_STAT 0
17 extern int sysctl_vm_numa_stat
;
18 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key
);
19 extern int sysctl_vm_numa_stat_handler(struct ctl_table
*table
,
20 int write
, void __user
*buffer
, size_t *length
, loff_t
*ppos
);
25 unsigned nr_unqueued_dirty
;
26 unsigned nr_congested
;
27 unsigned nr_writeback
;
28 unsigned nr_immediate
;
31 unsigned nr_unmap_fail
;
34 #ifdef CONFIG_VM_EVENT_COUNTERS
36 * Light weight per cpu counter implementation.
38 * Counters should only be incremented and no critical kernel component
39 * should rely on the counter values.
41 * Counters are handled completely inline. On many platforms the code
42 * generated will simply be the increment of a global address.
45 struct vm_event_state
{
46 unsigned long event
[NR_VM_EVENT_ITEMS
];
49 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
52 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
53 * local_irq_disable overhead.
55 static inline void __count_vm_event(enum vm_event_item item
)
57 raw_cpu_inc(vm_event_states
.event
[item
]);
60 static inline void count_vm_event(enum vm_event_item item
)
62 this_cpu_inc(vm_event_states
.event
[item
]);
65 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
67 raw_cpu_add(vm_event_states
.event
[item
], delta
);
70 static inline void count_vm_events(enum vm_event_item item
, long delta
)
72 this_cpu_add(vm_event_states
.event
[item
], delta
);
75 extern void all_vm_events(unsigned long *);
77 extern void vm_events_fold_cpu(int cpu
);
81 /* Disable counters */
82 static inline void count_vm_event(enum vm_event_item item
)
85 static inline void count_vm_events(enum vm_event_item item
, long delta
)
88 static inline void __count_vm_event(enum vm_event_item item
)
91 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
94 static inline void all_vm_events(unsigned long *ret
)
97 static inline void vm_events_fold_cpu(int cpu
)
101 #endif /* CONFIG_VM_EVENT_COUNTERS */
103 #ifdef CONFIG_NUMA_BALANCING
104 #define count_vm_numa_event(x) count_vm_event(x)
105 #define count_vm_numa_events(x, y) count_vm_events(x, y)
107 #define count_vm_numa_event(x) do {} while (0)
108 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
109 #endif /* CONFIG_NUMA_BALANCING */
111 #ifdef CONFIG_DEBUG_TLBFLUSH
112 #define count_vm_tlb_event(x) count_vm_event(x)
113 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
115 #define count_vm_tlb_event(x) do {} while (0)
116 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
119 #ifdef CONFIG_DEBUG_VM_VMACACHE
120 #define count_vm_vmacache_event(x) count_vm_event(x)
122 #define count_vm_vmacache_event(x) do {} while (0)
125 #define __count_zid_vm_events(item, zid, delta) \
126 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
129 * Zone and node-based page accounting with per cpu differentials.
131 extern atomic_long_t vm_zone_stat
[NR_VM_ZONE_STAT_ITEMS
];
132 extern atomic_long_t vm_numa_stat
[NR_VM_NUMA_STAT_ITEMS
];
133 extern atomic_long_t vm_node_stat
[NR_VM_NODE_STAT_ITEMS
];
136 static inline void zone_numa_state_add(long x
, struct zone
*zone
,
137 enum numa_stat_item item
)
139 atomic_long_add(x
, &zone
->vm_numa_stat
[item
]);
140 atomic_long_add(x
, &vm_numa_stat
[item
]);
143 static inline unsigned long global_numa_state(enum numa_stat_item item
)
145 long x
= atomic_long_read(&vm_numa_stat
[item
]);
150 static inline unsigned long zone_numa_state_snapshot(struct zone
*zone
,
151 enum numa_stat_item item
)
153 long x
= atomic_long_read(&zone
->vm_numa_stat
[item
]);
156 for_each_online_cpu(cpu
)
157 x
+= per_cpu_ptr(zone
->pageset
, cpu
)->vm_numa_stat_diff
[item
];
161 #endif /* CONFIG_NUMA */
163 static inline void zone_page_state_add(long x
, struct zone
*zone
,
164 enum zone_stat_item item
)
166 atomic_long_add(x
, &zone
->vm_stat
[item
]);
167 atomic_long_add(x
, &vm_zone_stat
[item
]);
170 static inline void node_page_state_add(long x
, struct pglist_data
*pgdat
,
171 enum node_stat_item item
)
173 atomic_long_add(x
, &pgdat
->vm_stat
[item
]);
174 atomic_long_add(x
, &vm_node_stat
[item
]);
177 static inline unsigned long global_zone_page_state(enum zone_stat_item item
)
179 long x
= atomic_long_read(&vm_zone_stat
[item
]);
187 static inline unsigned long global_node_page_state(enum node_stat_item item
)
189 long x
= atomic_long_read(&vm_node_stat
[item
]);
197 static inline unsigned long zone_page_state(struct zone
*zone
,
198 enum zone_stat_item item
)
200 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
209 * More accurate version that also considers the currently pending
210 * deltas. For that we need to loop over all cpus to find the current
211 * deltas. There is no synchronization so the result cannot be
212 * exactly accurate either.
214 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
215 enum zone_stat_item item
)
217 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
221 for_each_online_cpu(cpu
)
222 x
+= per_cpu_ptr(zone
->pageset
, cpu
)->vm_stat_diff
[item
];
231 extern void __inc_numa_state(struct zone
*zone
, enum numa_stat_item item
);
232 extern unsigned long sum_zone_node_page_state(int node
,
233 enum zone_stat_item item
);
234 extern unsigned long sum_zone_numa_state(int node
, enum numa_stat_item item
);
235 extern unsigned long node_page_state(struct pglist_data
*pgdat
,
236 enum node_stat_item item
);
238 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
239 #define node_page_state(node, item) global_node_page_state(item)
240 #endif /* CONFIG_NUMA */
243 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, long);
244 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
245 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
247 void __mod_node_page_state(struct pglist_data
*, enum node_stat_item item
, long);
248 void __inc_node_page_state(struct page
*, enum node_stat_item
);
249 void __dec_node_page_state(struct page
*, enum node_stat_item
);
251 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, long);
252 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
253 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
255 void mod_node_page_state(struct pglist_data
*, enum node_stat_item
, long);
256 void inc_node_page_state(struct page
*, enum node_stat_item
);
257 void dec_node_page_state(struct page
*, enum node_stat_item
);
259 extern void inc_node_state(struct pglist_data
*, enum node_stat_item
);
260 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
261 extern void __inc_node_state(struct pglist_data
*, enum node_stat_item
);
262 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
263 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
264 extern void __dec_node_state(struct pglist_data
*, enum node_stat_item
);
266 void quiet_vmstat(void);
267 void cpu_vm_stats_fold(int cpu
);
268 void refresh_zone_stat_thresholds(void);
271 int vmstat_refresh(struct ctl_table
*, int write
,
272 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
);
274 void drain_zonestat(struct zone
*zone
, struct per_cpu_pageset
*);
276 int calculate_pressure_threshold(struct zone
*zone
);
277 int calculate_normal_threshold(struct zone
*zone
);
278 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
279 int (*calculate_pressure
)(struct zone
*));
280 #else /* CONFIG_SMP */
283 * We do not maintain differentials in a single processor configuration.
284 * The functions directly modify the zone and global counters.
286 static inline void __mod_zone_page_state(struct zone
*zone
,
287 enum zone_stat_item item
, long delta
)
289 zone_page_state_add(delta
, zone
, item
);
292 static inline void __mod_node_page_state(struct pglist_data
*pgdat
,
293 enum node_stat_item item
, int delta
)
295 node_page_state_add(delta
, pgdat
, item
);
298 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
300 atomic_long_inc(&zone
->vm_stat
[item
]);
301 atomic_long_inc(&vm_zone_stat
[item
]);
304 static inline void __inc_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
306 atomic_long_inc(&pgdat
->vm_stat
[item
]);
307 atomic_long_inc(&vm_node_stat
[item
]);
310 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
312 atomic_long_dec(&zone
->vm_stat
[item
]);
313 atomic_long_dec(&vm_zone_stat
[item
]);
316 static inline void __dec_node_state(struct pglist_data
*pgdat
, enum node_stat_item item
)
318 atomic_long_dec(&pgdat
->vm_stat
[item
]);
319 atomic_long_dec(&vm_node_stat
[item
]);
322 static inline void __inc_zone_page_state(struct page
*page
,
323 enum zone_stat_item item
)
325 __inc_zone_state(page_zone(page
), item
);
328 static inline void __inc_node_page_state(struct page
*page
,
329 enum node_stat_item item
)
331 __inc_node_state(page_pgdat(page
), item
);
335 static inline void __dec_zone_page_state(struct page
*page
,
336 enum zone_stat_item item
)
338 __dec_zone_state(page_zone(page
), item
);
341 static inline void __dec_node_page_state(struct page
*page
,
342 enum node_stat_item item
)
344 __dec_node_state(page_pgdat(page
), item
);
349 * We only use atomic operations to update counters. So there is no need to
350 * disable interrupts.
352 #define inc_zone_page_state __inc_zone_page_state
353 #define dec_zone_page_state __dec_zone_page_state
354 #define mod_zone_page_state __mod_zone_page_state
356 #define inc_node_page_state __inc_node_page_state
357 #define dec_node_page_state __dec_node_page_state
358 #define mod_node_page_state __mod_node_page_state
360 #define inc_zone_state __inc_zone_state
361 #define inc_node_state __inc_node_state
362 #define dec_zone_state __dec_zone_state
364 #define set_pgdat_percpu_threshold(pgdat, callback) { }
366 static inline void refresh_zone_stat_thresholds(void) { }
367 static inline void cpu_vm_stats_fold(int cpu
) { }
368 static inline void quiet_vmstat(void) { }
370 static inline void drain_zonestat(struct zone
*zone
,
371 struct per_cpu_pageset
*pset
) { }
372 #endif /* CONFIG_SMP */
374 static inline void __mod_zone_freepage_state(struct zone
*zone
, int nr_pages
,
377 __mod_zone_page_state(zone
, NR_FREE_PAGES
, nr_pages
);
378 if (is_migrate_cma(migratetype
))
379 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
, nr_pages
);
382 extern const char * const vmstat_text
[];
384 #endif /* _LINUX_VMSTAT_H */