]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
8cdea7c0 BS |
2 | /* memcontrol.h - Memory Controller |
3 | * | |
4 | * Copyright IBM Corporation, 2007 | |
5 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
6 | * | |
78fb7466 PE |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8cdea7c0 BS |
9 | */ |
10 | ||
11 | #ifndef _LINUX_MEMCONTROL_H | |
12 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 13 | #include <linux/cgroup.h> |
456f998e | 14 | #include <linux/vm_event_item.h> |
7ae1e1d0 | 15 | #include <linux/hardirq.h> |
a8964b9b | 16 | #include <linux/jump_label.h> |
33398cf2 MH |
17 | #include <linux/page_counter.h> |
18 | #include <linux/vmpressure.h> | |
19 | #include <linux/eventfd.h> | |
00f3ca2c JW |
20 | #include <linux/mm.h> |
21 | #include <linux/vmstat.h> | |
33398cf2 | 22 | #include <linux/writeback.h> |
fdf1cdb9 | 23 | #include <linux/page-flags.h> |
456f998e | 24 | |
78fb7466 | 25 | struct mem_cgroup; |
bf4f0599 | 26 | struct obj_cgroup; |
8697d331 BS |
27 | struct page; |
28 | struct mm_struct; | |
2633d7a0 | 29 | struct kmem_cache; |
78fb7466 | 30 | |
71cd3113 JW |
31 | /* Cgroup-specific page state, on top of universal node page state */ |
32 | enum memcg_stat_item { | |
468c3982 | 33 | MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, |
71cd3113 | 34 | MEMCG_SOCK, |
b2807f07 | 35 | MEMCG_NR_STAT, |
2a7106f2 GT |
36 | }; |
37 | ||
e27be240 JW |
38 | enum memcg_memory_event { |
39 | MEMCG_LOW, | |
71cd3113 JW |
40 | MEMCG_HIGH, |
41 | MEMCG_MAX, | |
42 | MEMCG_OOM, | |
fe6bdfc8 | 43 | MEMCG_OOM_KILL, |
4b82ab4f | 44 | MEMCG_SWAP_HIGH, |
f3a53a3a TH |
45 | MEMCG_SWAP_MAX, |
46 | MEMCG_SWAP_FAIL, | |
e27be240 | 47 | MEMCG_NR_MEMORY_EVENTS, |
71cd3113 JW |
48 | }; |
49 | ||
5660048c | 50 | struct mem_cgroup_reclaim_cookie { |
ef8f2327 | 51 | pg_data_t *pgdat; |
5660048c JW |
52 | unsigned int generation; |
53 | }; | |
54 | ||
71cd3113 JW |
55 | #ifdef CONFIG_MEMCG |
56 | ||
57 | #define MEM_CGROUP_ID_SHIFT 16 | |
58 | #define MEM_CGROUP_ID_MAX USHRT_MAX | |
59 | ||
60 | struct mem_cgroup_id { | |
61 | int id; | |
1c2d479a | 62 | refcount_t ref; |
71cd3113 JW |
63 | }; |
64 | ||
33398cf2 MH |
65 | /* |
66 | * Per memcg event counter is incremented at every pagein/pageout. With THP, | |
67 | * it will be incremated by the number of pages. This counter is used for | |
68 | * for trigger some periodic events. This is straightforward and better | |
69 | * than using jiffies etc. to handle periodic memcg event. | |
70 | */ | |
71 | enum mem_cgroup_events_target { | |
72 | MEM_CGROUP_TARGET_THRESH, | |
73 | MEM_CGROUP_TARGET_SOFTLIMIT, | |
33398cf2 MH |
74 | MEM_CGROUP_NTARGETS, |
75 | }; | |
76 | ||
871789d4 CD |
77 | struct memcg_vmstats_percpu { |
78 | long stat[MEMCG_NR_STAT]; | |
e27be240 | 79 | unsigned long events[NR_VM_EVENT_ITEMS]; |
33398cf2 MH |
80 | unsigned long nr_page_events; |
81 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
82 | }; | |
83 | ||
84 | struct mem_cgroup_reclaim_iter { | |
85 | struct mem_cgroup *position; | |
86 | /* scan generation, increased every round-trip */ | |
87 | unsigned int generation; | |
88 | }; | |
89 | ||
00f3ca2c JW |
90 | struct lruvec_stat { |
91 | long count[NR_VM_NODE_STAT_ITEMS]; | |
92 | }; | |
93 | ||
0a4465d3 KT |
94 | /* |
95 | * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, | |
96 | * which have elements charged to this memcg. | |
97 | */ | |
98 | struct memcg_shrinker_map { | |
99 | struct rcu_head rcu; | |
307ed94c | 100 | unsigned long map[]; |
0a4465d3 KT |
101 | }; |
102 | ||
33398cf2 | 103 | /* |
242c37b4 | 104 | * per-node information in memory controller. |
33398cf2 | 105 | */ |
ef8f2327 | 106 | struct mem_cgroup_per_node { |
33398cf2 | 107 | struct lruvec lruvec; |
a983b5eb | 108 | |
815744d7 JW |
109 | /* Legacy local VM stats */ |
110 | struct lruvec_stat __percpu *lruvec_stat_local; | |
111 | ||
112 | /* Subtree VM stats (batched updates) */ | |
a983b5eb JW |
113 | struct lruvec_stat __percpu *lruvec_stat_cpu; |
114 | atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; | |
115 | ||
b4536f0c | 116 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
33398cf2 | 117 | |
9da83f3f | 118 | struct mem_cgroup_reclaim_iter iter; |
33398cf2 | 119 | |
0a4465d3 | 120 | struct memcg_shrinker_map __rcu *shrinker_map; |
0a432dcb | 121 | |
33398cf2 MH |
122 | struct rb_node tree_node; /* RB tree node */ |
123 | unsigned long usage_in_excess;/* Set to the value by which */ | |
124 | /* the soft limit is exceeded*/ | |
125 | bool on_tree; | |
126 | struct mem_cgroup *memcg; /* Back pointer, we cannot */ | |
127 | /* use container_of */ | |
128 | }; | |
129 | ||
33398cf2 MH |
130 | struct mem_cgroup_threshold { |
131 | struct eventfd_ctx *eventfd; | |
132 | unsigned long threshold; | |
133 | }; | |
134 | ||
135 | /* For threshold */ | |
136 | struct mem_cgroup_threshold_ary { | |
137 | /* An array index points to threshold just below or equal to usage. */ | |
138 | int current_threshold; | |
139 | /* Size of entries[] */ | |
140 | unsigned int size; | |
141 | /* Array of thresholds */ | |
307ed94c | 142 | struct mem_cgroup_threshold entries[]; |
33398cf2 MH |
143 | }; |
144 | ||
145 | struct mem_cgroup_thresholds { | |
146 | /* Primary thresholds array */ | |
147 | struct mem_cgroup_threshold_ary *primary; | |
148 | /* | |
149 | * Spare threshold array. | |
150 | * This is needed to make mem_cgroup_unregister_event() "never fail". | |
151 | * It must be able to store at least primary->size - 1 entries. | |
152 | */ | |
153 | struct mem_cgroup_threshold_ary *spare; | |
154 | }; | |
155 | ||
567e9ab2 JW |
156 | enum memcg_kmem_state { |
157 | KMEM_NONE, | |
158 | KMEM_ALLOCATED, | |
159 | KMEM_ONLINE, | |
160 | }; | |
161 | ||
e81bf979 AL |
162 | #if defined(CONFIG_SMP) |
163 | struct memcg_padding { | |
164 | char x[0]; | |
165 | } ____cacheline_internodealigned_in_smp; | |
166 | #define MEMCG_PADDING(name) struct memcg_padding name; | |
167 | #else | |
168 | #define MEMCG_PADDING(name) | |
169 | #endif | |
170 | ||
97b27821 TH |
171 | /* |
172 | * Remember four most recent foreign writebacks with dirty pages in this | |
173 | * cgroup. Inode sharing is expected to be uncommon and, even if we miss | |
174 | * one in a given round, we're likely to catch it later if it keeps | |
175 | * foreign-dirtying, so a fairly low count should be enough. | |
176 | * | |
177 | * See mem_cgroup_track_foreign_dirty_slowpath() for details. | |
178 | */ | |
179 | #define MEMCG_CGWB_FRN_CNT 4 | |
180 | ||
181 | struct memcg_cgwb_frn { | |
182 | u64 bdi_id; /* bdi->id of the foreign inode */ | |
183 | int memcg_id; /* memcg->css.id of foreign inode */ | |
184 | u64 at; /* jiffies_64 at the time of dirtying */ | |
185 | struct wb_completion done; /* tracks in-flight foreign writebacks */ | |
186 | }; | |
187 | ||
bf4f0599 RG |
188 | /* |
189 | * Bucket for arbitrarily byte-sized objects charged to a memory | |
190 | * cgroup. The bucket can be reparented in one piece when the cgroup | |
191 | * is destroyed, without having to round up the individual references | |
192 | * of all live memory objects in the wild. | |
193 | */ | |
194 | struct obj_cgroup { | |
195 | struct percpu_ref refcnt; | |
196 | struct mem_cgroup *memcg; | |
197 | atomic_t nr_charged_bytes; | |
198 | union { | |
199 | struct list_head list; | |
200 | struct rcu_head rcu; | |
201 | }; | |
202 | }; | |
203 | ||
33398cf2 MH |
204 | /* |
205 | * The memory controller data structure. The memory controller controls both | |
206 | * page cache and RSS per cgroup. We would eventually like to provide | |
207 | * statistics based on the statistics developed by Rik Van Riel for clock-pro, | |
208 | * to help the administrator determine what knobs to tune. | |
209 | */ | |
210 | struct mem_cgroup { | |
211 | struct cgroup_subsys_state css; | |
212 | ||
73f576c0 JW |
213 | /* Private memcg ID. Used to ID objects that outlive the cgroup */ |
214 | struct mem_cgroup_id id; | |
215 | ||
33398cf2 MH |
216 | /* Accounted resources */ |
217 | struct page_counter memory; | |
37e84351 | 218 | struct page_counter swap; |
0db15298 JW |
219 | |
220 | /* Legacy consumer-oriented counters */ | |
33398cf2 MH |
221 | struct page_counter memsw; |
222 | struct page_counter kmem; | |
0db15298 | 223 | struct page_counter tcpmem; |
33398cf2 | 224 | |
f7e1cb6e JW |
225 | /* Range enforcement for interrupt charges */ |
226 | struct work_struct high_work; | |
227 | ||
33398cf2 MH |
228 | unsigned long soft_limit; |
229 | ||
230 | /* vmpressure notifications */ | |
231 | struct vmpressure vmpressure; | |
232 | ||
33398cf2 MH |
233 | /* |
234 | * Should the accounting and control be hierarchical, per subtree? | |
235 | */ | |
236 | bool use_hierarchy; | |
237 | ||
3d8b38eb RG |
238 | /* |
239 | * Should the OOM killer kill all belonging tasks, had it kill one? | |
240 | */ | |
241 | bool oom_group; | |
242 | ||
33398cf2 MH |
243 | /* protected by memcg_oom_lock */ |
244 | bool oom_lock; | |
245 | int under_oom; | |
246 | ||
247 | int swappiness; | |
248 | /* OOM-Killer disable */ | |
249 | int oom_kill_disable; | |
250 | ||
1e577f97 | 251 | /* memory.events and memory.events.local */ |
472912a2 | 252 | struct cgroup_file events_file; |
1e577f97 | 253 | struct cgroup_file events_local_file; |
472912a2 | 254 | |
f3a53a3a TH |
255 | /* handle for "memory.swap.events" */ |
256 | struct cgroup_file swap_events_file; | |
257 | ||
33398cf2 MH |
258 | /* protect arrays of thresholds */ |
259 | struct mutex thresholds_lock; | |
260 | ||
261 | /* thresholds for memory usage. RCU-protected */ | |
262 | struct mem_cgroup_thresholds thresholds; | |
263 | ||
264 | /* thresholds for mem+swap usage. RCU-protected */ | |
265 | struct mem_cgroup_thresholds memsw_thresholds; | |
266 | ||
267 | /* For oom notifier event fd */ | |
268 | struct list_head oom_notify; | |
269 | ||
270 | /* | |
271 | * Should we move charges of a task when a task is moved into this | |
272 | * mem_cgroup ? And what type of charges should we move ? | |
273 | */ | |
274 | unsigned long move_charge_at_immigrate; | |
e81bf979 AL |
275 | /* taken only while moving_account > 0 */ |
276 | spinlock_t move_lock; | |
277 | unsigned long move_lock_flags; | |
278 | ||
279 | MEMCG_PADDING(_pad1_); | |
280 | ||
33398cf2 MH |
281 | /* |
282 | * set > 0 if pages under this cgroup are moving to other cgroup. | |
283 | */ | |
284 | atomic_t moving_account; | |
33398cf2 | 285 | struct task_struct *move_lock_task; |
a983b5eb | 286 | |
815744d7 JW |
287 | /* Legacy local VM stats and events */ |
288 | struct memcg_vmstats_percpu __percpu *vmstats_local; | |
289 | ||
290 | /* Subtree VM stats and events (batched updates) */ | |
871789d4 | 291 | struct memcg_vmstats_percpu __percpu *vmstats_percpu; |
e81bf979 AL |
292 | |
293 | MEMCG_PADDING(_pad2_); | |
294 | ||
871789d4 CD |
295 | atomic_long_t vmstats[MEMCG_NR_STAT]; |
296 | atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; | |
42a30035 | 297 | |
815744d7 | 298 | /* memory.events */ |
42a30035 | 299 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; |
1e577f97 | 300 | atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; |
33398cf2 | 301 | |
d886f4e4 JW |
302 | unsigned long socket_pressure; |
303 | ||
304 | /* Legacy tcp memory accounting */ | |
0db15298 JW |
305 | bool tcpmem_active; |
306 | int tcpmem_pressure; | |
d886f4e4 | 307 | |
84c07d11 | 308 | #ifdef CONFIG_MEMCG_KMEM |
33398cf2 MH |
309 | /* Index in the kmem_cache->memcg_params.memcg_caches array */ |
310 | int kmemcg_id; | |
567e9ab2 | 311 | enum memcg_kmem_state kmem_state; |
bf4f0599 RG |
312 | struct obj_cgroup __rcu *objcg; |
313 | struct list_head objcg_list; /* list of inherited objcgs */ | |
33398cf2 MH |
314 | #endif |
315 | ||
33398cf2 MH |
316 | #ifdef CONFIG_CGROUP_WRITEBACK |
317 | struct list_head cgwb_list; | |
318 | struct wb_domain cgwb_domain; | |
97b27821 | 319 | struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; |
33398cf2 MH |
320 | #endif |
321 | ||
322 | /* List of events which userspace want to receive */ | |
323 | struct list_head event_list; | |
324 | spinlock_t event_list_lock; | |
325 | ||
87eaceb3 YS |
326 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
327 | struct deferred_split deferred_split_queue; | |
328 | #endif | |
329 | ||
33398cf2 MH |
330 | struct mem_cgroup_per_node *nodeinfo[0]; |
331 | /* WARNING: nodeinfo must be the last member here */ | |
332 | }; | |
7d828602 | 333 | |
a983b5eb JW |
334 | /* |
335 | * size of first charge trial. "32" comes from vmscan.c's magic value. | |
336 | * TODO: maybe necessary to use big numbers in big irons. | |
337 | */ | |
338 | #define MEMCG_CHARGE_BATCH 32U | |
339 | ||
7d828602 | 340 | extern struct mem_cgroup *root_mem_cgroup; |
56161634 | 341 | |
dfd2f10c KT |
342 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
343 | { | |
344 | return (memcg == root_mem_cgroup); | |
345 | } | |
346 | ||
23047a96 JW |
347 | static inline bool mem_cgroup_disabled(void) |
348 | { | |
349 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | |
350 | } | |
351 | ||
22f7496f YS |
352 | static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, |
353 | struct mem_cgroup *memcg, | |
1bc63fb1 | 354 | bool in_low_reclaim) |
9783aa99 | 355 | { |
1bc63fb1 CD |
356 | if (mem_cgroup_disabled()) |
357 | return 0; | |
358 | ||
22f7496f YS |
359 | /* |
360 | * There is no reclaim protection applied to a targeted reclaim. | |
361 | * We are special casing this specific case here because | |
362 | * mem_cgroup_protected calculation is not robust enough to keep | |
363 | * the protection invariant for calculated effective values for | |
364 | * parallel reclaimers with different reclaim target. This is | |
365 | * especially a problem for tail memcgs (as they have pages on LRU) | |
366 | * which would want to have effective values 0 for targeted reclaim | |
367 | * but a different value for external reclaim. | |
368 | * | |
369 | * Example | |
370 | * Let's have global and A's reclaim in parallel: | |
371 | * | | |
372 | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) | |
373 | * |\ | |
374 | * | C (low = 1G, usage = 2.5G) | |
375 | * B (low = 1G, usage = 0.5G) | |
376 | * | |
377 | * For the global reclaim | |
378 | * A.elow = A.low | |
379 | * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow | |
380 | * C.elow = min(C.usage, C.low) | |
381 | * | |
382 | * With the effective values resetting we have A reclaim | |
383 | * A.elow = 0 | |
384 | * B.elow = B.low | |
385 | * C.elow = C.low | |
386 | * | |
387 | * If the global reclaim races with A's reclaim then | |
388 | * B.elow = C.elow = 0 because children_low_usage > A.elow) | |
389 | * is possible and reclaiming B would be violating the protection. | |
390 | * | |
391 | */ | |
392 | if (root == memcg) | |
393 | return 0; | |
394 | ||
1bc63fb1 CD |
395 | if (in_low_reclaim) |
396 | return READ_ONCE(memcg->memory.emin); | |
9783aa99 | 397 | |
1bc63fb1 CD |
398 | return max(READ_ONCE(memcg->memory.emin), |
399 | READ_ONCE(memcg->memory.elow)); | |
9783aa99 CD |
400 | } |
401 | ||
45c7f7e1 CD |
402 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
403 | struct mem_cgroup *memcg); | |
404 | ||
405 | static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) | |
406 | { | |
407 | /* | |
408 | * The root memcg doesn't account charges, and doesn't support | |
409 | * protection. | |
410 | */ | |
411 | return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); | |
412 | ||
413 | } | |
414 | ||
415 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
416 | { | |
417 | if (!mem_cgroup_supports_protection(memcg)) | |
418 | return false; | |
419 | ||
420 | return READ_ONCE(memcg->memory.elow) >= | |
421 | page_counter_read(&memcg->memory); | |
422 | } | |
423 | ||
424 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
425 | { | |
426 | if (!mem_cgroup_supports_protection(memcg)) | |
427 | return false; | |
428 | ||
429 | return READ_ONCE(memcg->memory.emin) >= | |
430 | page_counter_read(&memcg->memory); | |
431 | } | |
241994ed | 432 | |
d9eb1ea2 | 433 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); |
3fea5a49 | 434 | |
0a31bc97 | 435 | void mem_cgroup_uncharge(struct page *page); |
747db954 | 436 | void mem_cgroup_uncharge_list(struct list_head *page_list); |
569b846d | 437 | |
6a93ca8f | 438 | void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); |
569b846d | 439 | |
ef8f2327 MG |
440 | static struct mem_cgroup_per_node * |
441 | mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) | |
55779ec7 | 442 | { |
ef8f2327 | 443 | return memcg->nodeinfo[nid]; |
55779ec7 JW |
444 | } |
445 | ||
446 | /** | |
867e5e1d | 447 | * mem_cgroup_lruvec - get the lru list vector for a memcg & node |
55779ec7 JW |
448 | * @memcg: memcg of the wanted lruvec |
449 | * | |
867e5e1d JW |
450 | * Returns the lru list vector holding pages for a given @memcg & |
451 | * @node combination. This can be the node lruvec, if the memory | |
452 | * controller is disabled. | |
55779ec7 | 453 | */ |
867e5e1d JW |
454 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
455 | struct pglist_data *pgdat) | |
55779ec7 | 456 | { |
ef8f2327 | 457 | struct mem_cgroup_per_node *mz; |
55779ec7 JW |
458 | struct lruvec *lruvec; |
459 | ||
460 | if (mem_cgroup_disabled()) { | |
867e5e1d | 461 | lruvec = &pgdat->__lruvec; |
55779ec7 JW |
462 | goto out; |
463 | } | |
464 | ||
1b05117d JW |
465 | if (!memcg) |
466 | memcg = root_mem_cgroup; | |
467 | ||
ef8f2327 | 468 | mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); |
55779ec7 JW |
469 | lruvec = &mz->lruvec; |
470 | out: | |
471 | /* | |
472 | * Since a node can be onlined after the mem_cgroup was created, | |
599d0c95 | 473 | * we have to be prepared to initialize lruvec->pgdat here; |
55779ec7 JW |
474 | * and if offlined then reonlined, we need to reinitialize it. |
475 | */ | |
ef8f2327 MG |
476 | if (unlikely(lruvec->pgdat != pgdat)) |
477 | lruvec->pgdat = pgdat; | |
55779ec7 JW |
478 | return lruvec; |
479 | } | |
480 | ||
599d0c95 | 481 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); |
c9b0ed51 | 482 | |
64219994 | 483 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
e993d905 | 484 | |
d46eb14b SB |
485 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); |
486 | ||
f745c6f5 SB |
487 | struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); |
488 | ||
33398cf2 MH |
489 | static inline |
490 | struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ | |
491 | return css ? container_of(css, struct mem_cgroup, css) : NULL; | |
492 | } | |
493 | ||
bf4f0599 RG |
494 | static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
495 | { | |
496 | return percpu_ref_tryget(&objcg->refcnt); | |
497 | } | |
498 | ||
499 | static inline void obj_cgroup_get(struct obj_cgroup *objcg) | |
500 | { | |
501 | percpu_ref_get(&objcg->refcnt); | |
502 | } | |
503 | ||
504 | static inline void obj_cgroup_put(struct obj_cgroup *objcg) | |
505 | { | |
506 | percpu_ref_put(&objcg->refcnt); | |
507 | } | |
508 | ||
509 | /* | |
510 | * After the initialization objcg->memcg is always pointing at | |
511 | * a valid memcg, but can be atomically swapped to the parent memcg. | |
512 | * | |
513 | * The caller must ensure that the returned memcg won't be released: | |
514 | * e.g. acquire the rcu_read_lock or css_set_lock. | |
515 | */ | |
516 | static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) | |
517 | { | |
518 | return READ_ONCE(objcg->memcg); | |
519 | } | |
520 | ||
dc0b5864 RG |
521 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
522 | { | |
d46eb14b SB |
523 | if (memcg) |
524 | css_put(&memcg->css); | |
dc0b5864 RG |
525 | } |
526 | ||
8e8ae645 JW |
527 | #define mem_cgroup_from_counter(counter, member) \ |
528 | container_of(counter, struct mem_cgroup, member) | |
529 | ||
33398cf2 MH |
530 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
531 | struct mem_cgroup *, | |
532 | struct mem_cgroup_reclaim_cookie *); | |
533 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
7c5f64f8 VD |
534 | int mem_cgroup_scan_tasks(struct mem_cgroup *, |
535 | int (*)(struct task_struct *, void *), void *); | |
33398cf2 | 536 | |
23047a96 JW |
537 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
538 | { | |
539 | if (mem_cgroup_disabled()) | |
540 | return 0; | |
541 | ||
73f576c0 | 542 | return memcg->id.id; |
23047a96 | 543 | } |
73f576c0 | 544 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
23047a96 | 545 | |
aa9694bb CD |
546 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
547 | { | |
548 | return mem_cgroup_from_css(seq_css(m)); | |
549 | } | |
550 | ||
2262185c RG |
551 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
552 | { | |
553 | struct mem_cgroup_per_node *mz; | |
554 | ||
555 | if (mem_cgroup_disabled()) | |
556 | return NULL; | |
557 | ||
558 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
559 | return mz->memcg; | |
560 | } | |
561 | ||
8e8ae645 JW |
562 | /** |
563 | * parent_mem_cgroup - find the accounting parent of a memcg | |
564 | * @memcg: memcg whose parent to find | |
565 | * | |
566 | * Returns the parent memcg, or NULL if this is the root or the memory | |
567 | * controller is in legacy no-hierarchy mode. | |
568 | */ | |
569 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | |
570 | { | |
571 | if (!memcg->memory.parent) | |
572 | return NULL; | |
573 | return mem_cgroup_from_counter(memcg->memory.parent, memory); | |
574 | } | |
575 | ||
33398cf2 MH |
576 | static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, |
577 | struct mem_cgroup *root) | |
578 | { | |
579 | if (root == memcg) | |
580 | return true; | |
581 | if (!root->use_hierarchy) | |
582 | return false; | |
583 | return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); | |
584 | } | |
e1aab161 | 585 | |
2314b42d JW |
586 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
587 | struct mem_cgroup *memcg) | |
2e4d4091 | 588 | { |
587af308 | 589 | struct mem_cgroup *task_memcg; |
413918bb | 590 | bool match = false; |
c3ac9a8a | 591 | |
2e4d4091 | 592 | rcu_read_lock(); |
587af308 | 593 | task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
413918bb | 594 | if (task_memcg) |
2314b42d | 595 | match = mem_cgroup_is_descendant(task_memcg, memcg); |
2e4d4091 | 596 | rcu_read_unlock(); |
c3ac9a8a | 597 | return match; |
2e4d4091 | 598 | } |
8a9f3ccd | 599 | |
64219994 | 600 | struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); |
2fc04524 | 601 | ino_t page_cgroup_ino(struct page *page); |
d324236b | 602 | |
eb01aaab VD |
603 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
604 | { | |
605 | if (mem_cgroup_disabled()) | |
606 | return true; | |
607 | return !!(memcg->css.flags & CSS_ONLINE); | |
608 | } | |
609 | ||
58ae83db KH |
610 | /* |
611 | * For memory reclaim. | |
612 | */ | |
889976db | 613 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
33398cf2 MH |
614 | |
615 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
b4536f0c | 616 | int zid, int nr_pages); |
33398cf2 | 617 | |
b4536f0c MH |
618 | static inline |
619 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
620 | enum lru_list lru, int zone_idx) | |
621 | { | |
622 | struct mem_cgroup_per_node *mz; | |
623 | ||
624 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
625 | return mz->lru_zone_size[zone_idx][lru]; | |
33398cf2 MH |
626 | } |
627 | ||
b23afb93 TH |
628 | void mem_cgroup_handle_over_high(void); |
629 | ||
bbec2e15 | 630 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); |
7c5f64f8 | 631 | |
9783aa99 CD |
632 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg); |
633 | ||
f0c867d9 | 634 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, |
64219994 | 635 | struct task_struct *p); |
58ae83db | 636 | |
f0c867d9 | 637 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); |
638 | ||
29ef680a | 639 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 | 640 | { |
29ef680a MH |
641 | WARN_ON(current->in_user_fault); |
642 | current->in_user_fault = 1; | |
519e5247 JW |
643 | } |
644 | ||
29ef680a | 645 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 | 646 | { |
29ef680a MH |
647 | WARN_ON(!current->in_user_fault); |
648 | current->in_user_fault = 0; | |
519e5247 JW |
649 | } |
650 | ||
3812c8c8 JW |
651 | static inline bool task_in_memcg_oom(struct task_struct *p) |
652 | { | |
626ebc41 | 653 | return p->memcg_in_oom; |
3812c8c8 JW |
654 | } |
655 | ||
49426420 | 656 | bool mem_cgroup_oom_synchronize(bool wait); |
3d8b38eb RG |
657 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, |
658 | struct mem_cgroup *oom_domain); | |
659 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); | |
3812c8c8 | 660 | |
c255a458 | 661 | #ifdef CONFIG_MEMCG_SWAP |
eccb52e7 | 662 | extern bool cgroup_memory_noswap; |
c077719b | 663 | #endif |
f8d66542 | 664 | |
739f79fc JW |
665 | struct mem_cgroup *lock_page_memcg(struct page *page); |
666 | void __unlock_page_memcg(struct mem_cgroup *memcg); | |
62cccb8c | 667 | void unlock_page_memcg(struct page *page); |
d7365e78 | 668 | |
42a30035 JW |
669 | /* |
670 | * idx can be of type enum memcg_stat_item or node_stat_item. | |
671 | * Keep in sync with memcg_exact_page_state(). | |
672 | */ | |
673 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) | |
674 | { | |
675 | long x = atomic_long_read(&memcg->vmstats[idx]); | |
676 | #ifdef CONFIG_SMP | |
677 | if (x < 0) | |
678 | x = 0; | |
679 | #endif | |
680 | return x; | |
681 | } | |
682 | ||
0b3d6e6f GT |
683 | /* |
684 | * idx can be of type enum memcg_stat_item or node_stat_item. | |
685 | * Keep in sync with memcg_exact_page_state(). | |
686 | */ | |
205b20cc JW |
687 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, |
688 | int idx) | |
2a2e4885 | 689 | { |
815744d7 JW |
690 | long x = 0; |
691 | int cpu; | |
692 | ||
693 | for_each_possible_cpu(cpu) | |
694 | x += per_cpu(memcg->vmstats_local->stat[idx], cpu); | |
a983b5eb JW |
695 | #ifdef CONFIG_SMP |
696 | if (x < 0) | |
697 | x = 0; | |
698 | #endif | |
699 | return x; | |
2a2e4885 JW |
700 | } |
701 | ||
db9adbcb | 702 | void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); |
2a2e4885 | 703 | |
04fecbf5 | 704 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 705 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 706 | int idx, int val) |
2a2e4885 | 707 | { |
c3cc3911 JW |
708 | unsigned long flags; |
709 | ||
710 | local_irq_save(flags); | |
a983b5eb | 711 | __mod_memcg_state(memcg, idx, val); |
c3cc3911 | 712 | local_irq_restore(flags); |
2a2e4885 JW |
713 | } |
714 | ||
33398cf2 | 715 | /** |
ccda7f43 | 716 | * mod_memcg_page_state - update page state statistics |
62cccb8c | 717 | * @page: the page |
33398cf2 MH |
718 | * @idx: page state item to account |
719 | * @val: number of pages (positive or negative) | |
720 | * | |
fdf1cdb9 JW |
721 | * The @page must be locked or the caller must use lock_page_memcg() |
722 | * to prevent double accounting when the page is concurrently being | |
723 | * moved to another memcg: | |
81f8c3a4 | 724 | * |
fdf1cdb9 | 725 | * lock_page(page) or lock_page_memcg(page) |
81f8c3a4 | 726 | * if (TestClearPageState(page)) |
ccda7f43 | 727 | * mod_memcg_page_state(page, state, -1); |
fdf1cdb9 | 728 | * unlock_page(page) or unlock_page_memcg(page) |
2a2e4885 JW |
729 | * |
730 | * Kernel pages are an exception to this, since they'll never move. | |
33398cf2 | 731 | */ |
00f3ca2c | 732 | static inline void __mod_memcg_page_state(struct page *page, |
04fecbf5 | 733 | int idx, int val) |
00f3ca2c JW |
734 | { |
735 | if (page->mem_cgroup) | |
736 | __mod_memcg_state(page->mem_cgroup, idx, val); | |
737 | } | |
738 | ||
ccda7f43 | 739 | static inline void mod_memcg_page_state(struct page *page, |
04fecbf5 | 740 | int idx, int val) |
33398cf2 | 741 | { |
62cccb8c | 742 | if (page->mem_cgroup) |
ccda7f43 | 743 | mod_memcg_state(page->mem_cgroup, idx, val); |
33398cf2 MH |
744 | } |
745 | ||
42a30035 JW |
746 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
747 | enum node_stat_item idx) | |
748 | { | |
749 | struct mem_cgroup_per_node *pn; | |
750 | long x; | |
751 | ||
752 | if (mem_cgroup_disabled()) | |
753 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
754 | ||
755 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
756 | x = atomic_long_read(&pn->lruvec_stat[idx]); | |
757 | #ifdef CONFIG_SMP | |
758 | if (x < 0) | |
759 | x = 0; | |
760 | #endif | |
761 | return x; | |
762 | } | |
763 | ||
205b20cc JW |
764 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
765 | enum node_stat_item idx) | |
2a7106f2 | 766 | { |
00f3ca2c | 767 | struct mem_cgroup_per_node *pn; |
815744d7 JW |
768 | long x = 0; |
769 | int cpu; | |
00f3ca2c JW |
770 | |
771 | if (mem_cgroup_disabled()) | |
772 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
773 | ||
774 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | |
815744d7 JW |
775 | for_each_possible_cpu(cpu) |
776 | x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); | |
a983b5eb JW |
777 | #ifdef CONFIG_SMP |
778 | if (x < 0) | |
779 | x = 0; | |
780 | #endif | |
781 | return x; | |
2a7106f2 GT |
782 | } |
783 | ||
eedc4e5a RG |
784 | void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
785 | int val); | |
db9adbcb JW |
786 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
787 | int val); | |
ec9f0238 | 788 | void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); |
991e7673 | 789 | |
8380ce47 | 790 | void mod_memcg_obj_state(void *p, int idx, int val); |
00f3ca2c | 791 | |
991e7673 SB |
792 | static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, |
793 | int val) | |
794 | { | |
795 | unsigned long flags; | |
796 | ||
797 | local_irq_save(flags); | |
798 | __mod_lruvec_slab_state(p, idx, val); | |
799 | local_irq_restore(flags); | |
800 | } | |
801 | ||
eedc4e5a RG |
802 | static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, |
803 | enum node_stat_item idx, int val) | |
804 | { | |
805 | unsigned long flags; | |
806 | ||
807 | local_irq_save(flags); | |
808 | __mod_memcg_lruvec_state(lruvec, idx, val); | |
809 | local_irq_restore(flags); | |
810 | } | |
811 | ||
00f3ca2c JW |
812 | static inline void mod_lruvec_state(struct lruvec *lruvec, |
813 | enum node_stat_item idx, int val) | |
814 | { | |
c3cc3911 JW |
815 | unsigned long flags; |
816 | ||
817 | local_irq_save(flags); | |
28454265 | 818 | __mod_lruvec_state(lruvec, idx, val); |
c3cc3911 | 819 | local_irq_restore(flags); |
00f3ca2c JW |
820 | } |
821 | ||
822 | static inline void __mod_lruvec_page_state(struct page *page, | |
823 | enum node_stat_item idx, int val) | |
824 | { | |
9da7b521 | 825 | struct page *head = compound_head(page); /* rmap on tail pages */ |
28454265 JW |
826 | pg_data_t *pgdat = page_pgdat(page); |
827 | struct lruvec *lruvec; | |
00f3ca2c | 828 | |
28454265 | 829 | /* Untracked pages have no memcg, no lruvec. Update only the node */ |
9da7b521 | 830 | if (!head->mem_cgroup) { |
28454265 | 831 | __mod_node_page_state(pgdat, idx, val); |
00f3ca2c | 832 | return; |
28454265 JW |
833 | } |
834 | ||
9da7b521 | 835 | lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); |
28454265 | 836 | __mod_lruvec_state(lruvec, idx, val); |
00f3ca2c JW |
837 | } |
838 | ||
839 | static inline void mod_lruvec_page_state(struct page *page, | |
840 | enum node_stat_item idx, int val) | |
841 | { | |
c3cc3911 JW |
842 | unsigned long flags; |
843 | ||
844 | local_irq_save(flags); | |
28454265 | 845 | __mod_lruvec_page_state(page, idx, val); |
c3cc3911 | 846 | local_irq_restore(flags); |
2a7106f2 GT |
847 | } |
848 | ||
ef8f2327 | 849 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
0608f43d AM |
850 | gfp_t gfp_mask, |
851 | unsigned long *total_scanned); | |
a63d83f4 | 852 | |
db9adbcb JW |
853 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, |
854 | unsigned long count); | |
c9019e9b | 855 | |
2262185c | 856 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
e27be240 JW |
857 | enum vm_event_item idx, |
858 | unsigned long count) | |
2262185c | 859 | { |
c3cc3911 JW |
860 | unsigned long flags; |
861 | ||
862 | local_irq_save(flags); | |
a983b5eb | 863 | __count_memcg_events(memcg, idx, count); |
c3cc3911 | 864 | local_irq_restore(flags); |
2262185c RG |
865 | } |
866 | ||
867 | static inline void count_memcg_page_event(struct page *page, | |
e27be240 | 868 | enum vm_event_item idx) |
2262185c RG |
869 | { |
870 | if (page->mem_cgroup) | |
871 | count_memcg_events(page->mem_cgroup, idx, 1); | |
872 | } | |
873 | ||
874 | static inline void count_memcg_event_mm(struct mm_struct *mm, | |
875 | enum vm_event_item idx) | |
68ae564b | 876 | { |
33398cf2 MH |
877 | struct mem_cgroup *memcg; |
878 | ||
68ae564b DR |
879 | if (mem_cgroup_disabled()) |
880 | return; | |
33398cf2 MH |
881 | |
882 | rcu_read_lock(); | |
883 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
fe6bdfc8 | 884 | if (likely(memcg)) |
c9019e9b | 885 | count_memcg_events(memcg, idx, 1); |
33398cf2 | 886 | rcu_read_unlock(); |
68ae564b | 887 | } |
c9019e9b | 888 | |
e27be240 JW |
889 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
890 | enum memcg_memory_event event) | |
c9019e9b | 891 | { |
1e577f97 SB |
892 | atomic_long_inc(&memcg->memory_events_local[event]); |
893 | cgroup_file_notify(&memcg->events_local_file); | |
894 | ||
9852ae3f CD |
895 | do { |
896 | atomic_long_inc(&memcg->memory_events[event]); | |
897 | cgroup_file_notify(&memcg->events_file); | |
898 | ||
04fd61a4 YS |
899 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
900 | break; | |
9852ae3f CD |
901 | if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) |
902 | break; | |
903 | } while ((memcg = parent_mem_cgroup(memcg)) && | |
904 | !mem_cgroup_is_root(memcg)); | |
c9019e9b JW |
905 | } |
906 | ||
fe6bdfc8 RG |
907 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
908 | enum memcg_memory_event event) | |
909 | { | |
910 | struct mem_cgroup *memcg; | |
911 | ||
912 | if (mem_cgroup_disabled()) | |
913 | return; | |
914 | ||
915 | rcu_read_lock(); | |
916 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | |
917 | if (likely(memcg)) | |
918 | memcg_memory_event(memcg, event); | |
919 | rcu_read_unlock(); | |
920 | } | |
921 | ||
ca3e0214 | 922 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e94c8a9c | 923 | void mem_cgroup_split_huge_fixup(struct page *head); |
ca3e0214 KH |
924 | #endif |
925 | ||
c255a458 | 926 | #else /* CONFIG_MEMCG */ |
23047a96 JW |
927 | |
928 | #define MEM_CGROUP_ID_SHIFT 0 | |
929 | #define MEM_CGROUP_ID_MAX 0 | |
930 | ||
7a81b88c KH |
931 | struct mem_cgroup; |
932 | ||
dfd2f10c KT |
933 | static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) |
934 | { | |
935 | return true; | |
936 | } | |
937 | ||
23047a96 JW |
938 | static inline bool mem_cgroup_disabled(void) |
939 | { | |
940 | return true; | |
941 | } | |
942 | ||
e27be240 JW |
943 | static inline void memcg_memory_event(struct mem_cgroup *memcg, |
944 | enum memcg_memory_event event) | |
241994ed JW |
945 | { |
946 | } | |
947 | ||
fe6bdfc8 RG |
948 | static inline void memcg_memory_event_mm(struct mm_struct *mm, |
949 | enum memcg_memory_event event) | |
950 | { | |
951 | } | |
952 | ||
22f7496f YS |
953 | static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, |
954 | struct mem_cgroup *memcg, | |
1bc63fb1 | 955 | bool in_low_reclaim) |
9783aa99 | 956 | { |
1bc63fb1 | 957 | return 0; |
9783aa99 CD |
958 | } |
959 | ||
45c7f7e1 CD |
960 | static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
961 | struct mem_cgroup *memcg) | |
962 | { | |
963 | } | |
964 | ||
965 | static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) | |
966 | { | |
967 | return false; | |
968 | } | |
969 | ||
970 | static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) | |
241994ed | 971 | { |
45c7f7e1 | 972 | return false; |
241994ed JW |
973 | } |
974 | ||
3fea5a49 | 975 | static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, |
d9eb1ea2 | 976 | gfp_t gfp_mask) |
3fea5a49 JW |
977 | { |
978 | return 0; | |
979 | } | |
980 | ||
0a31bc97 | 981 | static inline void mem_cgroup_uncharge(struct page *page) |
569b846d KH |
982 | { |
983 | } | |
984 | ||
747db954 | 985 | static inline void mem_cgroup_uncharge_list(struct list_head *page_list) |
8a9f3ccd BS |
986 | { |
987 | } | |
988 | ||
6a93ca8f | 989 | static inline void mem_cgroup_migrate(struct page *old, struct page *new) |
69029cd5 KH |
990 | { |
991 | } | |
992 | ||
867e5e1d JW |
993 | static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, |
994 | struct pglist_data *pgdat) | |
08e552c6 | 995 | { |
867e5e1d | 996 | return &pgdat->__lruvec; |
08e552c6 KH |
997 | } |
998 | ||
fa9add64 | 999 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, |
599d0c95 | 1000 | struct pglist_data *pgdat) |
66e1707b | 1001 | { |
867e5e1d | 1002 | return &pgdat->__lruvec; |
66e1707b BS |
1003 | } |
1004 | ||
b910718a JW |
1005 | static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
1006 | { | |
1007 | return NULL; | |
1008 | } | |
1009 | ||
587af308 | 1010 | static inline bool mm_match_cgroup(struct mm_struct *mm, |
c0ff4b85 | 1011 | struct mem_cgroup *memcg) |
bed7161a | 1012 | { |
587af308 | 1013 | return true; |
bed7161a BS |
1014 | } |
1015 | ||
d46eb14b SB |
1016 | static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) |
1017 | { | |
1018 | return NULL; | |
1019 | } | |
1020 | ||
f745c6f5 SB |
1021 | static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) |
1022 | { | |
1023 | return NULL; | |
1024 | } | |
1025 | ||
dc0b5864 RG |
1026 | static inline void mem_cgroup_put(struct mem_cgroup *memcg) |
1027 | { | |
1028 | } | |
1029 | ||
5660048c JW |
1030 | static inline struct mem_cgroup * |
1031 | mem_cgroup_iter(struct mem_cgroup *root, | |
1032 | struct mem_cgroup *prev, | |
1033 | struct mem_cgroup_reclaim_cookie *reclaim) | |
1034 | { | |
1035 | return NULL; | |
1036 | } | |
1037 | ||
1038 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
1039 | struct mem_cgroup *prev) | |
1040 | { | |
1041 | } | |
1042 | ||
7c5f64f8 VD |
1043 | static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1044 | int (*fn)(struct task_struct *, void *), void *arg) | |
1045 | { | |
1046 | return 0; | |
1047 | } | |
1048 | ||
23047a96 | 1049 | static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) |
f8d66542 | 1050 | { |
23047a96 JW |
1051 | return 0; |
1052 | } | |
1053 | ||
1054 | static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
1055 | { | |
1056 | WARN_ON_ONCE(id); | |
1057 | /* XXX: This should always return root_mem_cgroup */ | |
1058 | return NULL; | |
f8d66542 | 1059 | } |
a636b327 | 1060 | |
aa9694bb CD |
1061 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) |
1062 | { | |
1063 | return NULL; | |
1064 | } | |
1065 | ||
2262185c RG |
1066 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
1067 | { | |
1068 | return NULL; | |
1069 | } | |
1070 | ||
eb01aaab | 1071 | static inline bool mem_cgroup_online(struct mem_cgroup *memcg) |
14797e23 | 1072 | { |
13308ca9 | 1073 | return true; |
14797e23 KM |
1074 | } |
1075 | ||
b4536f0c MH |
1076 | static inline |
1077 | unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, | |
1078 | enum lru_list lru, int zone_idx) | |
1079 | { | |
1080 | return 0; | |
1081 | } | |
a3d8e054 | 1082 | |
bbec2e15 | 1083 | static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
7c5f64f8 VD |
1084 | { |
1085 | return 0; | |
1086 | } | |
1087 | ||
9783aa99 CD |
1088 | static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1089 | { | |
1090 | return 0; | |
1091 | } | |
1092 | ||
e222432b | 1093 | static inline void |
f0c867d9 | 1094 | mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
1095 | { | |
1096 | } | |
1097 | ||
1098 | static inline void | |
1099 | mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | |
e222432b BS |
1100 | { |
1101 | } | |
1102 | ||
739f79fc JW |
1103 | static inline struct mem_cgroup *lock_page_memcg(struct page *page) |
1104 | { | |
1105 | return NULL; | |
1106 | } | |
1107 | ||
1108 | static inline void __unlock_page_memcg(struct mem_cgroup *memcg) | |
89c06bd5 KH |
1109 | { |
1110 | } | |
1111 | ||
62cccb8c | 1112 | static inline void unlock_page_memcg(struct page *page) |
89c06bd5 KH |
1113 | { |
1114 | } | |
1115 | ||
b23afb93 TH |
1116 | static inline void mem_cgroup_handle_over_high(void) |
1117 | { | |
1118 | } | |
1119 | ||
29ef680a | 1120 | static inline void mem_cgroup_enter_user_fault(void) |
519e5247 JW |
1121 | { |
1122 | } | |
1123 | ||
29ef680a | 1124 | static inline void mem_cgroup_exit_user_fault(void) |
519e5247 JW |
1125 | { |
1126 | } | |
1127 | ||
3812c8c8 JW |
1128 | static inline bool task_in_memcg_oom(struct task_struct *p) |
1129 | { | |
1130 | return false; | |
1131 | } | |
1132 | ||
49426420 | 1133 | static inline bool mem_cgroup_oom_synchronize(bool wait) |
3812c8c8 JW |
1134 | { |
1135 | return false; | |
1136 | } | |
1137 | ||
3d8b38eb RG |
1138 | static inline struct mem_cgroup *mem_cgroup_get_oom_group( |
1139 | struct task_struct *victim, struct mem_cgroup *oom_domain) | |
1140 | { | |
1141 | return NULL; | |
1142 | } | |
1143 | ||
1144 | static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |
1145 | { | |
1146 | } | |
1147 | ||
42a30035 JW |
1148 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
1149 | { | |
1150 | return 0; | |
1151 | } | |
1152 | ||
205b20cc JW |
1153 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, |
1154 | int idx) | |
2a2e4885 JW |
1155 | { |
1156 | return 0; | |
1157 | } | |
1158 | ||
00f3ca2c | 1159 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1160 | int idx, |
00f3ca2c | 1161 | int nr) |
2a2e4885 JW |
1162 | { |
1163 | } | |
1164 | ||
00f3ca2c | 1165 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1166 | int idx, |
00f3ca2c | 1167 | int nr) |
2a2e4885 JW |
1168 | { |
1169 | } | |
1170 | ||
00f3ca2c | 1171 | static inline void __mod_memcg_page_state(struct page *page, |
04fecbf5 | 1172 | int idx, |
00f3ca2c | 1173 | int nr) |
2a2e4885 JW |
1174 | { |
1175 | } | |
1176 | ||
ccda7f43 | 1177 | static inline void mod_memcg_page_state(struct page *page, |
04fecbf5 | 1178 | int idx, |
ccda7f43 | 1179 | int nr) |
553af430 JW |
1180 | { |
1181 | } | |
1182 | ||
42a30035 JW |
1183 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, |
1184 | enum node_stat_item idx) | |
1185 | { | |
1186 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
1187 | } | |
1188 | ||
205b20cc JW |
1189 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
1190 | enum node_stat_item idx) | |
2a7106f2 | 1191 | { |
00f3ca2c | 1192 | return node_page_state(lruvec_pgdat(lruvec), idx); |
2a7106f2 GT |
1193 | } |
1194 | ||
eedc4e5a RG |
1195 | static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
1196 | enum node_stat_item idx, int val) | |
1197 | { | |
1198 | } | |
1199 | ||
00f3ca2c JW |
1200 | static inline void __mod_lruvec_state(struct lruvec *lruvec, |
1201 | enum node_stat_item idx, int val) | |
d69b042f | 1202 | { |
00f3ca2c JW |
1203 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); |
1204 | } | |
1205 | ||
1206 | static inline void mod_lruvec_state(struct lruvec *lruvec, | |
1207 | enum node_stat_item idx, int val) | |
1208 | { | |
1209 | mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
1210 | } | |
1211 | ||
1212 | static inline void __mod_lruvec_page_state(struct page *page, | |
1213 | enum node_stat_item idx, int val) | |
1214 | { | |
1215 | __mod_node_page_state(page_pgdat(page), idx, val); | |
1216 | } | |
1217 | ||
1218 | static inline void mod_lruvec_page_state(struct page *page, | |
1219 | enum node_stat_item idx, int val) | |
1220 | { | |
1221 | mod_node_page_state(page_pgdat(page), idx, val); | |
d69b042f BS |
1222 | } |
1223 | ||
ec9f0238 RG |
1224 | static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, |
1225 | int val) | |
1226 | { | |
1227 | struct page *page = virt_to_head_page(p); | |
1228 | ||
1229 | __mod_node_page_state(page_pgdat(page), idx, val); | |
1230 | } | |
1231 | ||
991e7673 SB |
1232 | static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, |
1233 | int val) | |
1234 | { | |
1235 | struct page *page = virt_to_head_page(p); | |
1236 | ||
1237 | mod_node_page_state(page_pgdat(page), idx, val); | |
1238 | } | |
1239 | ||
8380ce47 RG |
1240 | static inline void mod_memcg_obj_state(void *p, int idx, int val) |
1241 | { | |
1242 | } | |
1243 | ||
4e416953 | 1244 | static inline |
ef8f2327 | 1245 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
0608f43d AM |
1246 | gfp_t gfp_mask, |
1247 | unsigned long *total_scanned) | |
4e416953 | 1248 | { |
0608f43d | 1249 | return 0; |
4e416953 BS |
1250 | } |
1251 | ||
e94c8a9c | 1252 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
ca3e0214 KH |
1253 | { |
1254 | } | |
1255 | ||
2262185c RG |
1256 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
1257 | enum vm_event_item idx, | |
1258 | unsigned long count) | |
1259 | { | |
1260 | } | |
1261 | ||
9851ac13 KT |
1262 | static inline void __count_memcg_events(struct mem_cgroup *memcg, |
1263 | enum vm_event_item idx, | |
1264 | unsigned long count) | |
1265 | { | |
1266 | } | |
1267 | ||
2262185c | 1268 | static inline void count_memcg_page_event(struct page *page, |
04fecbf5 | 1269 | int idx) |
2262185c RG |
1270 | { |
1271 | } | |
1272 | ||
456f998e | 1273 | static inline |
2262185c | 1274 | void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) |
456f998e YH |
1275 | { |
1276 | } | |
c255a458 | 1277 | #endif /* CONFIG_MEMCG */ |
78fb7466 | 1278 | |
04fecbf5 | 1279 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1280 | static inline void __inc_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1281 | int idx) |
00f3ca2c JW |
1282 | { |
1283 | __mod_memcg_state(memcg, idx, 1); | |
1284 | } | |
1285 | ||
04fecbf5 | 1286 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1287 | static inline void __dec_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1288 | int idx) |
00f3ca2c JW |
1289 | { |
1290 | __mod_memcg_state(memcg, idx, -1); | |
1291 | } | |
1292 | ||
04fecbf5 | 1293 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1294 | static inline void __inc_memcg_page_state(struct page *page, |
04fecbf5 | 1295 | int idx) |
00f3ca2c JW |
1296 | { |
1297 | __mod_memcg_page_state(page, idx, 1); | |
1298 | } | |
1299 | ||
04fecbf5 | 1300 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1301 | static inline void __dec_memcg_page_state(struct page *page, |
04fecbf5 | 1302 | int idx) |
00f3ca2c JW |
1303 | { |
1304 | __mod_memcg_page_state(page, idx, -1); | |
1305 | } | |
1306 | ||
1307 | static inline void __inc_lruvec_state(struct lruvec *lruvec, | |
1308 | enum node_stat_item idx) | |
1309 | { | |
1310 | __mod_lruvec_state(lruvec, idx, 1); | |
1311 | } | |
1312 | ||
1313 | static inline void __dec_lruvec_state(struct lruvec *lruvec, | |
1314 | enum node_stat_item idx) | |
1315 | { | |
1316 | __mod_lruvec_state(lruvec, idx, -1); | |
1317 | } | |
1318 | ||
1319 | static inline void __inc_lruvec_page_state(struct page *page, | |
1320 | enum node_stat_item idx) | |
1321 | { | |
1322 | __mod_lruvec_page_state(page, idx, 1); | |
1323 | } | |
1324 | ||
1325 | static inline void __dec_lruvec_page_state(struct page *page, | |
1326 | enum node_stat_item idx) | |
1327 | { | |
1328 | __mod_lruvec_page_state(page, idx, -1); | |
1329 | } | |
1330 | ||
ec9f0238 RG |
1331 | static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) |
1332 | { | |
1333 | __mod_lruvec_slab_state(p, idx, 1); | |
1334 | } | |
1335 | ||
1336 | static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) | |
1337 | { | |
1338 | __mod_lruvec_slab_state(p, idx, -1); | |
1339 | } | |
1340 | ||
04fecbf5 | 1341 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1342 | static inline void inc_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1343 | int idx) |
00f3ca2c JW |
1344 | { |
1345 | mod_memcg_state(memcg, idx, 1); | |
1346 | } | |
1347 | ||
04fecbf5 | 1348 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1349 | static inline void dec_memcg_state(struct mem_cgroup *memcg, |
04fecbf5 | 1350 | int idx) |
00f3ca2c JW |
1351 | { |
1352 | mod_memcg_state(memcg, idx, -1); | |
1353 | } | |
1354 | ||
04fecbf5 | 1355 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1356 | static inline void inc_memcg_page_state(struct page *page, |
04fecbf5 | 1357 | int idx) |
00f3ca2c JW |
1358 | { |
1359 | mod_memcg_page_state(page, idx, 1); | |
1360 | } | |
1361 | ||
04fecbf5 | 1362 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
00f3ca2c | 1363 | static inline void dec_memcg_page_state(struct page *page, |
04fecbf5 | 1364 | int idx) |
00f3ca2c JW |
1365 | { |
1366 | mod_memcg_page_state(page, idx, -1); | |
1367 | } | |
1368 | ||
1369 | static inline void inc_lruvec_state(struct lruvec *lruvec, | |
1370 | enum node_stat_item idx) | |
1371 | { | |
1372 | mod_lruvec_state(lruvec, idx, 1); | |
1373 | } | |
1374 | ||
1375 | static inline void dec_lruvec_state(struct lruvec *lruvec, | |
1376 | enum node_stat_item idx) | |
1377 | { | |
1378 | mod_lruvec_state(lruvec, idx, -1); | |
1379 | } | |
1380 | ||
1381 | static inline void inc_lruvec_page_state(struct page *page, | |
1382 | enum node_stat_item idx) | |
1383 | { | |
1384 | mod_lruvec_page_state(page, idx, 1); | |
1385 | } | |
1386 | ||
1387 | static inline void dec_lruvec_page_state(struct page *page, | |
1388 | enum node_stat_item idx) | |
1389 | { | |
1390 | mod_lruvec_page_state(page, idx, -1); | |
1391 | } | |
1392 | ||
7cf111bc JW |
1393 | static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) |
1394 | { | |
1395 | struct mem_cgroup *memcg; | |
1396 | ||
1397 | memcg = lruvec_memcg(lruvec); | |
1398 | if (!memcg) | |
1399 | return NULL; | |
1400 | memcg = parent_mem_cgroup(memcg); | |
1401 | if (!memcg) | |
1402 | return NULL; | |
1403 | return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); | |
1404 | } | |
1405 | ||
52ebea74 | 1406 | #ifdef CONFIG_CGROUP_WRITEBACK |
841710aa | 1407 | |
841710aa | 1408 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); |
c5edf9cd TH |
1409 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
1410 | unsigned long *pheadroom, unsigned long *pdirty, | |
1411 | unsigned long *pwriteback); | |
841710aa | 1412 | |
97b27821 TH |
1413 | void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, |
1414 | struct bdi_writeback *wb); | |
1415 | ||
1416 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, | |
1417 | struct bdi_writeback *wb) | |
1418 | { | |
08d1d0e6 BH |
1419 | if (mem_cgroup_disabled()) |
1420 | return; | |
1421 | ||
97b27821 TH |
1422 | if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) |
1423 | mem_cgroup_track_foreign_dirty_slowpath(page, wb); | |
1424 | } | |
1425 | ||
1426 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb); | |
1427 | ||
841710aa TH |
1428 | #else /* CONFIG_CGROUP_WRITEBACK */ |
1429 | ||
1430 | static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) | |
1431 | { | |
1432 | return NULL; | |
1433 | } | |
1434 | ||
c2aa723a | 1435 | static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, |
c5edf9cd TH |
1436 | unsigned long *pfilepages, |
1437 | unsigned long *pheadroom, | |
c2aa723a TH |
1438 | unsigned long *pdirty, |
1439 | unsigned long *pwriteback) | |
1440 | { | |
1441 | } | |
1442 | ||
97b27821 TH |
1443 | static inline void mem_cgroup_track_foreign_dirty(struct page *page, |
1444 | struct bdi_writeback *wb) | |
1445 | { | |
1446 | } | |
1447 | ||
1448 | static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) | |
1449 | { | |
1450 | } | |
1451 | ||
841710aa | 1452 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
52ebea74 | 1453 | |
e1aab161 | 1454 | struct sock; |
baac50bb JW |
1455 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); |
1456 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); | |
d886f4e4 | 1457 | #ifdef CONFIG_MEMCG |
ef12947c JW |
1458 | extern struct static_key_false memcg_sockets_enabled_key; |
1459 | #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) | |
2d758073 JW |
1460 | void mem_cgroup_sk_alloc(struct sock *sk); |
1461 | void mem_cgroup_sk_free(struct sock *sk); | |
baac50bb | 1462 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c | 1463 | { |
0db15298 | 1464 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) |
8e8ae645 | 1465 | return true; |
8e8ae645 JW |
1466 | do { |
1467 | if (time_before(jiffies, memcg->socket_pressure)) | |
1468 | return true; | |
1469 | } while ((memcg = parent_mem_cgroup(memcg))); | |
1470 | return false; | |
e805605c | 1471 | } |
0a432dcb YS |
1472 | |
1473 | extern int memcg_expand_shrinker_maps(int new_id); | |
1474 | ||
1475 | extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, | |
1476 | int nid, int shrinker_id); | |
e805605c | 1477 | #else |
80e95fe0 | 1478 | #define mem_cgroup_sockets_enabled 0 |
2d758073 JW |
1479 | static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; |
1480 | static inline void mem_cgroup_sk_free(struct sock *sk) { }; | |
baac50bb | 1481 | static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) |
e805605c JW |
1482 | { |
1483 | return false; | |
1484 | } | |
0a432dcb YS |
1485 | |
1486 | static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, | |
1487 | int nid, int shrinker_id) | |
1488 | { | |
1489 | } | |
e805605c | 1490 | #endif |
7ae1e1d0 | 1491 | |
9b6f7e16 | 1492 | #ifdef CONFIG_MEMCG_KMEM |
4b13f64d RG |
1493 | int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, |
1494 | unsigned int nr_pages); | |
1495 | void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); | |
f4b00eab RG |
1496 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); |
1497 | void __memcg_kmem_uncharge_page(struct page *page, int order); | |
45264778 | 1498 | |
bf4f0599 RG |
1499 | struct obj_cgroup *get_obj_cgroup_from_current(void); |
1500 | ||
1501 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); | |
1502 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); | |
1503 | ||
ef12947c | 1504 | extern struct static_key_false memcg_kmem_enabled_key; |
749c5415 | 1505 | |
dbcf73e2 | 1506 | extern int memcg_nr_cache_ids; |
64219994 MH |
1507 | void memcg_get_cache_ids(void); |
1508 | void memcg_put_cache_ids(void); | |
ebe945c2 GC |
1509 | |
1510 | /* | |
1511 | * Helper macro to loop through all memcg-specific caches. Callers must still | |
1512 | * check if the cache is valid (it is either valid or NULL). | |
1513 | * the slab_mutex must be held when looping through those caches | |
1514 | */ | |
749c5415 | 1515 | #define for_each_memcg_cache_index(_idx) \ |
dbcf73e2 | 1516 | for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) |
749c5415 | 1517 | |
7ae1e1d0 GC |
1518 | static inline bool memcg_kmem_enabled(void) |
1519 | { | |
eda330e5 | 1520 | return static_branch_likely(&memcg_kmem_enabled_key); |
7ae1e1d0 GC |
1521 | } |
1522 | ||
0f876e4d RG |
1523 | static inline bool memcg_kmem_bypass(void) |
1524 | { | |
1525 | if (in_interrupt()) | |
1526 | return true; | |
1527 | ||
1528 | /* Allow remote memcg charging in kthread contexts. */ | |
1529 | if ((!current->mm || (current->flags & PF_KTHREAD)) && | |
1530 | !current->active_memcg) | |
1531 | return true; | |
1532 | return false; | |
1533 | } | |
1534 | ||
f4b00eab RG |
1535 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1536 | int order) | |
60cd4bcd SB |
1537 | { |
1538 | if (memcg_kmem_enabled()) | |
f4b00eab | 1539 | return __memcg_kmem_charge_page(page, gfp, order); |
60cd4bcd SB |
1540 | return 0; |
1541 | } | |
1542 | ||
f4b00eab | 1543 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1544 | { |
1545 | if (memcg_kmem_enabled()) | |
f4b00eab | 1546 | __memcg_kmem_uncharge_page(page, order); |
60cd4bcd SB |
1547 | } |
1548 | ||
4b13f64d RG |
1549 | static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, |
1550 | unsigned int nr_pages) | |
60cd4bcd SB |
1551 | { |
1552 | if (memcg_kmem_enabled()) | |
4b13f64d | 1553 | return __memcg_kmem_charge(memcg, gfp, nr_pages); |
60cd4bcd SB |
1554 | return 0; |
1555 | } | |
49a18eae | 1556 | |
4b13f64d RG |
1557 | static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg, |
1558 | unsigned int nr_pages) | |
49a18eae RG |
1559 | { |
1560 | if (memcg_kmem_enabled()) | |
4b13f64d | 1561 | __memcg_kmem_uncharge(memcg, nr_pages); |
49a18eae RG |
1562 | } |
1563 | ||
33398cf2 | 1564 | /* |
9f706d68 | 1565 | * helper for accessing a memcg's index. It will be used as an index in the |
33398cf2 MH |
1566 | * child cache array in kmem_cache, and also to derive its name. This function |
1567 | * will return -1 when this is not a kmem-limited memcg. | |
1568 | */ | |
1569 | static inline int memcg_cache_id(struct mem_cgroup *memcg) | |
1570 | { | |
1571 | return memcg ? memcg->kmemcg_id : -1; | |
1572 | } | |
5722d094 | 1573 | |
8380ce47 RG |
1574 | struct mem_cgroup *mem_cgroup_from_obj(void *p); |
1575 | ||
7ae1e1d0 | 1576 | #else |
9b6f7e16 | 1577 | |
f4b00eab RG |
1578 | static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1579 | int order) | |
9b6f7e16 RG |
1580 | { |
1581 | return 0; | |
1582 | } | |
1583 | ||
f4b00eab | 1584 | static inline void memcg_kmem_uncharge_page(struct page *page, int order) |
9b6f7e16 RG |
1585 | { |
1586 | } | |
1587 | ||
f4b00eab RG |
1588 | static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, |
1589 | int order) | |
60cd4bcd SB |
1590 | { |
1591 | return 0; | |
1592 | } | |
1593 | ||
f4b00eab | 1594 | static inline void __memcg_kmem_uncharge_page(struct page *page, int order) |
60cd4bcd SB |
1595 | { |
1596 | } | |
1597 | ||
749c5415 GC |
1598 | #define for_each_memcg_cache_index(_idx) \ |
1599 | for (; NULL; ) | |
1600 | ||
b9ce5ef4 GC |
1601 | static inline bool memcg_kmem_enabled(void) |
1602 | { | |
1603 | return false; | |
1604 | } | |
1605 | ||
2633d7a0 GC |
1606 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
1607 | { | |
1608 | return -1; | |
1609 | } | |
1610 | ||
05257a1a VD |
1611 | static inline void memcg_get_cache_ids(void) |
1612 | { | |
1613 | } | |
1614 | ||
1615 | static inline void memcg_put_cache_ids(void) | |
1616 | { | |
1617 | } | |
1618 | ||
8380ce47 RG |
1619 | static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) |
1620 | { | |
1621 | return NULL; | |
1622 | } | |
1623 | ||
84c07d11 | 1624 | #endif /* CONFIG_MEMCG_KMEM */ |
127424c8 | 1625 | |
8cdea7c0 | 1626 | #endif /* _LINUX_MEMCONTROL_H */ |