]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - include/linux/vmstat.h
mm: vmstat: replace __count_zone_vm_events with a zone id equivalent
[thirdparty/kernel/linux.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
293b6a4c
CL
30/*
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
33 */
f8891e5e
CL
34static inline void __count_vm_event(enum vm_event_item item)
35{
293b6a4c 36 raw_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
37}
38
39static inline void count_vm_event(enum vm_event_item item)
40{
dd17c8f7 41 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
42}
43
44static inline void __count_vm_events(enum vm_event_item item, long delta)
45{
293b6a4c 46 raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
47}
48
49static inline void count_vm_events(enum vm_event_item item, long delta)
50{
dd17c8f7 51 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
52}
53
54extern void all_vm_events(unsigned long *);
f1cb0879 55
f8891e5e
CL
56extern void vm_events_fold_cpu(int cpu);
57
58#else
59
60/* Disable counters */
780a0656
AM
61static inline void count_vm_event(enum vm_event_item item)
62{
63}
64static inline void count_vm_events(enum vm_event_item item, long delta)
65{
66}
67static inline void __count_vm_event(enum vm_event_item item)
68{
69}
70static inline void __count_vm_events(enum vm_event_item item, long delta)
71{
72}
73static inline void all_vm_events(unsigned long *ret)
74{
75}
76static inline void vm_events_fold_cpu(int cpu)
77{
78}
f8891e5e
CL
79
80#endif /* CONFIG_VM_EVENT_COUNTERS */
81
03c5a6e1
MG
82#ifdef CONFIG_NUMA_BALANCING
83#define count_vm_numa_event(x) count_vm_event(x)
84#define count_vm_numa_events(x, y) count_vm_events(x, y)
85#else
86#define count_vm_numa_event(x) do {} while (0)
3c0ff468 87#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
88#endif /* CONFIG_NUMA_BALANCING */
89
ec659934
MG
90#ifdef CONFIG_DEBUG_TLBFLUSH
91#define count_vm_tlb_event(x) count_vm_event(x)
92#define count_vm_tlb_events(x, y) count_vm_events(x, y)
93#else
94#define count_vm_tlb_event(x) do {} while (0)
95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
96#endif
97
4f115147
DB
98#ifdef CONFIG_DEBUG_VM_VMACACHE
99#define count_vm_vmacache_event(x) count_vm_event(x)
100#else
101#define count_vm_vmacache_event(x) do {} while (0)
102#endif
103
16709d1d
MG
104#define __count_zid_vm_events(item, zid, delta) \
105 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
f6ac2354 106
2244b95a 107/*
75ef7184 108 * Zone and node-based page accounting with per cpu differentials.
2244b95a 109 */
75ef7184
MG
110extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
111extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
2244b95a
CL
112
113static inline void zone_page_state_add(long x, struct zone *zone,
114 enum zone_stat_item item)
115{
116 atomic_long_add(x, &zone->vm_stat[item]);
75ef7184
MG
117 atomic_long_add(x, &vm_zone_stat[item]);
118}
119
120static inline void node_page_state_add(long x, struct pglist_data *pgdat,
121 enum node_stat_item item)
122{
123 atomic_long_add(x, &pgdat->vm_stat[item]);
124 atomic_long_add(x, &vm_node_stat[item]);
2244b95a
CL
125}
126
127static inline unsigned long global_page_state(enum zone_stat_item item)
128{
75ef7184
MG
129 long x = atomic_long_read(&vm_zone_stat[item]);
130#ifdef CONFIG_SMP
131 if (x < 0)
132 x = 0;
133#endif
134 return x;
135}
136
137static inline unsigned long global_node_page_state(enum node_stat_item item)
138{
139 long x = atomic_long_read(&vm_node_stat[item]);
2244b95a
CL
140#ifdef CONFIG_SMP
141 if (x < 0)
142 x = 0;
143#endif
144 return x;
145}
146
147static inline unsigned long zone_page_state(struct zone *zone,
148 enum zone_stat_item item)
149{
150 long x = atomic_long_read(&zone->vm_stat[item]);
151#ifdef CONFIG_SMP
152 if (x < 0)
153 x = 0;
154#endif
155 return x;
156}
157
aa454840
CL
158/*
159 * More accurate version that also considers the currently pending
160 * deltas. For that we need to loop over all cpus to find the current
161 * deltas. There is no synchronization so the result cannot be
162 * exactly accurate either.
163 */
164static inline unsigned long zone_page_state_snapshot(struct zone *zone,
165 enum zone_stat_item item)
166{
167 long x = atomic_long_read(&zone->vm_stat[item]);
168
169#ifdef CONFIG_SMP
170 int cpu;
171 for_each_online_cpu(cpu)
172 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
173
174 if (x < 0)
175 x = 0;
176#endif
177 return x;
178}
179
599d0c95
MG
180static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
181 enum node_stat_item item)
182{
183 long x = atomic_long_read(&pgdat->vm_stat[item]);
184
185#ifdef CONFIG_SMP
186 int cpu;
187 for_each_online_cpu(cpu)
188 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
189
190 if (x < 0)
191 x = 0;
192#endif
193 return x;
194}
195
196
2244b95a 197#ifdef CONFIG_NUMA
75ef7184
MG
198extern unsigned long sum_zone_node_page_state(int node,
199 enum zone_stat_item item);
200extern unsigned long node_page_state(struct pglist_data *pgdat,
201 enum node_stat_item item);
2244b95a 202#else
75ef7184
MG
203#define sum_zone_node_page_state(node, item) global_page_state(item)
204#define node_page_state(node, item) global_node_page_state(item)
ca889e6c 205#endif /* CONFIG_NUMA */
2244b95a 206
2244b95a
CL
207#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
208#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
75ef7184
MG
209#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
210#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
2244b95a 211
2244b95a 212#ifdef CONFIG_SMP
6cdb18ad 213void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
2244b95a
CL
214void __inc_zone_page_state(struct page *, enum zone_stat_item);
215void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 216
75ef7184
MG
217void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
218void __inc_node_page_state(struct page *, enum node_stat_item);
219void __dec_node_page_state(struct page *, enum node_stat_item);
220
6cdb18ad 221void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
2244b95a
CL
222void inc_zone_page_state(struct page *, enum zone_stat_item);
223void dec_zone_page_state(struct page *, enum zone_stat_item);
224
75ef7184
MG
225void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
226void inc_node_page_state(struct page *, enum node_stat_item);
227void dec_node_page_state(struct page *, enum node_stat_item);
228
75ef7184 229extern void inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385 230extern void __inc_zone_state(struct zone *, enum zone_stat_item);
75ef7184 231extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
c8785385
CL
232extern void dec_zone_state(struct zone *, enum zone_stat_item);
233extern void __dec_zone_state(struct zone *, enum zone_stat_item);
75ef7184 234extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
2244b95a 235
0eb77e98 236void quiet_vmstat(void);
2bb921e5 237void cpu_vm_stats_fold(int cpu);
a6cccdc3 238void refresh_zone_stat_thresholds(void);
b44129b3 239
52b6f46b
HD
240struct ctl_table;
241int vmstat_refresh(struct ctl_table *, int write,
242 void __user *buffer, size_t *lenp, loff_t *ppos);
243
5a883813
MK
244void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
245
b44129b3
MG
246int calculate_pressure_threshold(struct zone *zone);
247int calculate_normal_threshold(struct zone *zone);
248void set_pgdat_percpu_threshold(pg_data_t *pgdat,
249 int (*calculate_pressure)(struct zone *));
2244b95a
CL
250#else /* CONFIG_SMP */
251
252/*
253 * We do not maintain differentials in a single processor configuration.
254 * The functions directly modify the zone and global counters.
255 */
256static inline void __mod_zone_page_state(struct zone *zone,
6cdb18ad 257 enum zone_stat_item item, long delta)
2244b95a
CL
258{
259 zone_page_state_add(delta, zone, item);
260}
261
75ef7184
MG
262static inline void __mod_node_page_state(struct pglist_data *pgdat,
263 enum node_stat_item item, int delta)
264{
265 node_page_state_add(delta, pgdat, item);
266}
267
7f4599e9
CL
268static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
269{
270 atomic_long_inc(&zone->vm_stat[item]);
75ef7184
MG
271 atomic_long_inc(&vm_zone_stat[item]);
272}
273
274static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
275{
276 atomic_long_inc(&pgdat->vm_stat[item]);
277 atomic_long_inc(&vm_node_stat[item]);
7f4599e9
CL
278}
279
c8785385
CL
280static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
281{
282 atomic_long_dec(&zone->vm_stat[item]);
75ef7184
MG
283 atomic_long_dec(&vm_zone_stat[item]);
284}
285
286static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
287{
288 atomic_long_dec(&pgdat->vm_stat[item]);
289 atomic_long_dec(&vm_node_stat[item]);
c8785385
CL
290}
291
6a3ed212
JW
292static inline void __inc_zone_page_state(struct page *page,
293 enum zone_stat_item item)
294{
295 __inc_zone_state(page_zone(page), item);
296}
297
75ef7184
MG
298static inline void __inc_node_page_state(struct page *page,
299 enum node_stat_item item)
300{
301 __inc_node_state(page_pgdat(page), item);
302}
303
304
2244b95a
CL
305static inline void __dec_zone_page_state(struct page *page,
306 enum zone_stat_item item)
307{
57ce36fe 308 __dec_zone_state(page_zone(page), item);
2244b95a
CL
309}
310
75ef7184
MG
311static inline void __dec_node_page_state(struct page *page,
312 enum node_stat_item item)
313{
314 __dec_node_state(page_pgdat(page), item);
315}
316
317
2244b95a
CL
318/*
319 * We only use atomic operations to update counters. So there is no need to
320 * disable interrupts.
321 */
322#define inc_zone_page_state __inc_zone_page_state
323#define dec_zone_page_state __dec_zone_page_state
324#define mod_zone_page_state __mod_zone_page_state
325
75ef7184
MG
326#define inc_node_page_state __inc_node_page_state
327#define dec_node_page_state __dec_node_page_state
328#define mod_node_page_state __mod_node_page_state
329
6a3ed212 330#define inc_zone_state __inc_zone_state
75ef7184 331#define inc_node_state __inc_node_state
6a3ed212
JW
332#define dec_zone_state __dec_zone_state
333
b44129b3 334#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 335
a6cccdc3 336static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 337static inline void cpu_vm_stats_fold(int cpu) { }
0eb77e98 338static inline void quiet_vmstat(void) { }
a6cccdc3 339
5a883813
MK
340static inline void drain_zonestat(struct zone *zone,
341 struct per_cpu_pageset *pset) { }
fa25c503
KM
342#endif /* CONFIG_SMP */
343
d1ce749a
BZ
344static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
345 int migratetype)
346{
347 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
348 if (is_migrate_cma(migratetype))
349 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
350}
351
fa25c503 352extern const char * const vmstat_text[];
2244b95a
CL
353
354#endif /* _LINUX_VMSTAT_H */