]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - include/linux/memcontrol.h
mm: vmscan: remove deadlock due to throttling failing to make progress
[thirdparty/kernel/stable.git] / include / linux / memcontrol.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
8cdea7c0
BS
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
8cdea7c0
BS
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
f8d66542 13#include <linux/cgroup.h>
456f998e 14#include <linux/vm_event_item.h>
7ae1e1d0 15#include <linux/hardirq.h>
a8964b9b 16#include <linux/jump_label.h>
33398cf2
MH
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
00f3ca2c
JW
20#include <linux/mm.h>
21#include <linux/vmstat.h>
33398cf2 22#include <linux/writeback.h>
fdf1cdb9 23#include <linux/page-flags.h>
456f998e 24
78fb7466 25struct mem_cgroup;
bf4f0599 26struct obj_cgroup;
8697d331
BS
27struct page;
28struct mm_struct;
2633d7a0 29struct kmem_cache;
78fb7466 30
71cd3113
JW
31/* Cgroup-specific page state, on top of universal node page state */
32enum memcg_stat_item {
468c3982 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
71cd3113 34 MEMCG_SOCK,
772616b0 35 MEMCG_PERCPU_B,
4e5aa1f4 36 MEMCG_VMALLOC,
b2807f07 37 MEMCG_NR_STAT,
2a7106f2
GT
38};
39
e27be240
JW
40enum memcg_memory_event {
41 MEMCG_LOW,
71cd3113
JW
42 MEMCG_HIGH,
43 MEMCG_MAX,
44 MEMCG_OOM,
fe6bdfc8 45 MEMCG_OOM_KILL,
b6bf9abb 46 MEMCG_OOM_GROUP_KILL,
4b82ab4f 47 MEMCG_SWAP_HIGH,
f3a53a3a
TH
48 MEMCG_SWAP_MAX,
49 MEMCG_SWAP_FAIL,
e27be240 50 MEMCG_NR_MEMORY_EVENTS,
71cd3113
JW
51};
52
5660048c 53struct mem_cgroup_reclaim_cookie {
ef8f2327 54 pg_data_t *pgdat;
5660048c
JW
55 unsigned int generation;
56};
57
71cd3113
JW
58#ifdef CONFIG_MEMCG
59
60#define MEM_CGROUP_ID_SHIFT 16
61#define MEM_CGROUP_ID_MAX USHRT_MAX
62
63struct mem_cgroup_id {
64 int id;
1c2d479a 65 refcount_t ref;
71cd3113
JW
66};
67
33398cf2
MH
68/*
69 * Per memcg event counter is incremented at every pagein/pageout. With THP,
0845f831
RD
70 * it will be incremented by the number of pages. This counter is used
71 * to trigger some periodic events. This is straightforward and better
33398cf2
MH
72 * than using jiffies etc. to handle periodic memcg event.
73 */
74enum mem_cgroup_events_target {
75 MEM_CGROUP_TARGET_THRESH,
76 MEM_CGROUP_TARGET_SOFTLIMIT,
33398cf2
MH
77 MEM_CGROUP_NTARGETS,
78};
79
871789d4 80struct memcg_vmstats_percpu {
2d146aa3
JW
81 /* Local (CPU and cgroup) page state & events */
82 long state[MEMCG_NR_STAT];
83 unsigned long events[NR_VM_EVENT_ITEMS];
84
85 /* Delta calculation for lockless upward propagation */
86 long state_prev[MEMCG_NR_STAT];
87 unsigned long events_prev[NR_VM_EVENT_ITEMS];
88
89 /* Cgroup1: threshold notifications & softlimit tree updates */
90 unsigned long nr_page_events;
91 unsigned long targets[MEM_CGROUP_NTARGETS];
92};
93
94struct memcg_vmstats {
95 /* Aggregated (CPU and subtree) page state & events */
96 long state[MEMCG_NR_STAT];
97 unsigned long events[NR_VM_EVENT_ITEMS];
98
99 /* Pending child counts during tree propagation */
100 long state_pending[MEMCG_NR_STAT];
101 unsigned long events_pending[NR_VM_EVENT_ITEMS];
33398cf2
MH
102};
103
104struct mem_cgroup_reclaim_iter {
105 struct mem_cgroup *position;
106 /* scan generation, increased every round-trip */
107 unsigned int generation;
108};
109
0a4465d3 110/*
3c6f17e6
YS
111 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
112 * shrinkers, which have elements charged to this memcg.
0a4465d3 113 */
e4262c4f 114struct shrinker_info {
0a4465d3 115 struct rcu_head rcu;
3c6f17e6
YS
116 atomic_long_t *nr_deferred;
117 unsigned long *map;
0a4465d3
KT
118};
119
7e1c0d6f
SB
120struct lruvec_stats_percpu {
121 /* Local (CPU and cgroup) state */
122 long state[NR_VM_NODE_STAT_ITEMS];
123
124 /* Delta calculation for lockless upward propagation */
125 long state_prev[NR_VM_NODE_STAT_ITEMS];
126};
127
128struct lruvec_stats {
129 /* Aggregated (CPU and subtree) state */
130 long state[NR_VM_NODE_STAT_ITEMS];
131
132 /* Pending child counts during tree propagation */
133 long state_pending[NR_VM_NODE_STAT_ITEMS];
134};
135
33398cf2 136/*
242c37b4 137 * per-node information in memory controller.
33398cf2 138 */
ef8f2327 139struct mem_cgroup_per_node {
33398cf2 140 struct lruvec lruvec;
a983b5eb 141
7e1c0d6f
SB
142 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
143 struct lruvec_stats lruvec_stats;
a983b5eb 144
b4536f0c 145 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2 146
9da83f3f 147 struct mem_cgroup_reclaim_iter iter;
33398cf2 148
e4262c4f 149 struct shrinker_info __rcu *shrinker_info;
0a432dcb 150
33398cf2
MH
151 struct rb_node tree_node; /* RB tree node */
152 unsigned long usage_in_excess;/* Set to the value by which */
153 /* the soft limit is exceeded*/
154 bool on_tree;
155 struct mem_cgroup *memcg; /* Back pointer, we cannot */
156 /* use container_of */
157};
158
33398cf2
MH
159struct mem_cgroup_threshold {
160 struct eventfd_ctx *eventfd;
161 unsigned long threshold;
162};
163
164/* For threshold */
165struct mem_cgroup_threshold_ary {
166 /* An array index points to threshold just below or equal to usage. */
167 int current_threshold;
168 /* Size of entries[] */
169 unsigned int size;
170 /* Array of thresholds */
307ed94c 171 struct mem_cgroup_threshold entries[];
33398cf2
MH
172};
173
174struct mem_cgroup_thresholds {
175 /* Primary thresholds array */
176 struct mem_cgroup_threshold_ary *primary;
177 /*
178 * Spare threshold array.
179 * This is needed to make mem_cgroup_unregister_event() "never fail".
180 * It must be able to store at least primary->size - 1 entries.
181 */
182 struct mem_cgroup_threshold_ary *spare;
183};
184
e81bf979
AL
185#if defined(CONFIG_SMP)
186struct memcg_padding {
187 char x[0];
188} ____cacheline_internodealigned_in_smp;
6a1803bb 189#define MEMCG_PADDING(name) struct memcg_padding name
e81bf979
AL
190#else
191#define MEMCG_PADDING(name)
192#endif
193
97b27821
TH
194/*
195 * Remember four most recent foreign writebacks with dirty pages in this
196 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
197 * one in a given round, we're likely to catch it later if it keeps
198 * foreign-dirtying, so a fairly low count should be enough.
199 *
200 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
201 */
202#define MEMCG_CGWB_FRN_CNT 4
203
204struct memcg_cgwb_frn {
205 u64 bdi_id; /* bdi->id of the foreign inode */
206 int memcg_id; /* memcg->css.id of foreign inode */
207 u64 at; /* jiffies_64 at the time of dirtying */
208 struct wb_completion done; /* tracks in-flight foreign writebacks */
209};
210
bf4f0599
RG
211/*
212 * Bucket for arbitrarily byte-sized objects charged to a memory
213 * cgroup. The bucket can be reparented in one piece when the cgroup
214 * is destroyed, without having to round up the individual references
215 * of all live memory objects in the wild.
216 */
217struct obj_cgroup {
218 struct percpu_ref refcnt;
219 struct mem_cgroup *memcg;
220 atomic_t nr_charged_bytes;
221 union {
222 struct list_head list;
223 struct rcu_head rcu;
224 };
225};
226
33398cf2
MH
227/*
228 * The memory controller data structure. The memory controller controls both
229 * page cache and RSS per cgroup. We would eventually like to provide
230 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
231 * to help the administrator determine what knobs to tune.
232 */
233struct mem_cgroup {
234 struct cgroup_subsys_state css;
235
73f576c0
JW
236 /* Private memcg ID. Used to ID objects that outlive the cgroup */
237 struct mem_cgroup_id id;
238
33398cf2 239 /* Accounted resources */
bd0b230f
WL
240 struct page_counter memory; /* Both v1 & v2 */
241
242 union {
243 struct page_counter swap; /* v2 only */
244 struct page_counter memsw; /* v1 only */
245 };
0db15298
JW
246
247 /* Legacy consumer-oriented counters */
bd0b230f
WL
248 struct page_counter kmem; /* v1 only */
249 struct page_counter tcpmem; /* v1 only */
33398cf2 250
f7e1cb6e
JW
251 /* Range enforcement for interrupt charges */
252 struct work_struct high_work;
253
33398cf2
MH
254 unsigned long soft_limit;
255
256 /* vmpressure notifications */
257 struct vmpressure vmpressure;
258
3d8b38eb
RG
259 /*
260 * Should the OOM killer kill all belonging tasks, had it kill one?
261 */
262 bool oom_group;
263
33398cf2
MH
264 /* protected by memcg_oom_lock */
265 bool oom_lock;
266 int under_oom;
267
268 int swappiness;
269 /* OOM-Killer disable */
270 int oom_kill_disable;
271
1e577f97 272 /* memory.events and memory.events.local */
472912a2 273 struct cgroup_file events_file;
1e577f97 274 struct cgroup_file events_local_file;
472912a2 275
f3a53a3a
TH
276 /* handle for "memory.swap.events" */
277 struct cgroup_file swap_events_file;
278
33398cf2
MH
279 /* protect arrays of thresholds */
280 struct mutex thresholds_lock;
281
282 /* thresholds for memory usage. RCU-protected */
283 struct mem_cgroup_thresholds thresholds;
284
285 /* thresholds for mem+swap usage. RCU-protected */
286 struct mem_cgroup_thresholds memsw_thresholds;
287
288 /* For oom notifier event fd */
289 struct list_head oom_notify;
290
291 /*
292 * Should we move charges of a task when a task is moved into this
293 * mem_cgroup ? And what type of charges should we move ?
294 */
295 unsigned long move_charge_at_immigrate;
e81bf979
AL
296 /* taken only while moving_account > 0 */
297 spinlock_t move_lock;
298 unsigned long move_lock_flags;
299
300 MEMCG_PADDING(_pad1_);
301
2d146aa3
JW
302 /* memory.stat */
303 struct memcg_vmstats vmstats;
42a30035 304
815744d7 305 /* memory.events */
42a30035 306 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
1e577f97 307 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
33398cf2 308
d886f4e4
JW
309 unsigned long socket_pressure;
310
311 /* Legacy tcp memory accounting */
0db15298
JW
312 bool tcpmem_active;
313 int tcpmem_pressure;
d886f4e4 314
84c07d11 315#ifdef CONFIG_MEMCG_KMEM
33398cf2 316 int kmemcg_id;
bf4f0599
RG
317 struct obj_cgroup __rcu *objcg;
318 struct list_head objcg_list; /* list of inherited objcgs */
33398cf2
MH
319#endif
320
4df91062
FT
321 MEMCG_PADDING(_pad2_);
322
323 /*
324 * set > 0 if pages under this cgroup are moving to other cgroup.
325 */
326 atomic_t moving_account;
327 struct task_struct *move_lock_task;
328
4df91062
FT
329 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
330
33398cf2
MH
331#ifdef CONFIG_CGROUP_WRITEBACK
332 struct list_head cgwb_list;
333 struct wb_domain cgwb_domain;
97b27821 334 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
33398cf2
MH
335#endif
336
337 /* List of events which userspace want to receive */
338 struct list_head event_list;
339 spinlock_t event_list_lock;
340
87eaceb3
YS
341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 struct deferred_split deferred_split_queue;
343#endif
344
b51478a0 345 struct mem_cgroup_per_node *nodeinfo[];
33398cf2 346};
7d828602 347
a983b5eb
JW
348/*
349 * size of first charge trial. "32" comes from vmscan.c's magic value.
350 * TODO: maybe necessary to use big numbers in big irons.
351 */
352#define MEMCG_CHARGE_BATCH 32U
353
7d828602 354extern struct mem_cgroup *root_mem_cgroup;
56161634 355
87944e29
RG
356enum page_memcg_data_flags {
357 /* page->memcg_data is a pointer to an objcgs vector */
358 MEMCG_DATA_OBJCGS = (1UL << 0),
18b2db3b
RG
359 /* page has been accounted as a non-slab kernel page */
360 MEMCG_DATA_KMEM = (1UL << 1),
87944e29 361 /* the next bit after the last actual flag */
18b2db3b 362 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
87944e29
RG
363};
364
365#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
366
1b7e4464 367static inline bool folio_memcg_kmem(struct folio *folio);
b4e0b68f
MS
368
369/*
370 * After the initialization objcg->memcg is always pointing at
371 * a valid memcg, but can be atomically swapped to the parent memcg.
372 *
373 * The caller must ensure that the returned memcg won't be released:
374 * e.g. acquire the rcu_read_lock or css_set_lock.
375 */
376static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
377{
378 return READ_ONCE(objcg->memcg);
379}
380
381/*
1b7e4464
MWO
382 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
383 * @folio: Pointer to the folio.
b4e0b68f 384 *
1b7e4464
MWO
385 * Returns a pointer to the memory cgroup associated with the folio,
386 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 387 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464
MWO
388 * against some type of folios, e.g. slab folios or ex-slab folios or
389 * kmem folios.
b4e0b68f 390 */
1b7e4464 391static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
b4e0b68f 392{
1b7e4464 393 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 394
1b7e4464
MWO
395 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
396 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
397 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
b4e0b68f
MS
398
399 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
400}
401
402/*
1b7e4464
MWO
403 * __folio_objcg - get the object cgroup associated with a kmem folio.
404 * @folio: Pointer to the folio.
b4e0b68f 405 *
1b7e4464
MWO
406 * Returns a pointer to the object cgroup associated with the folio,
407 * or NULL. This function assumes that the folio is known to have a
b4e0b68f 408 * proper object cgroup pointer. It's not safe to call this function
1b7e4464
MWO
409 * against some type of folios, e.g. slab folios or ex-slab folios or
410 * LRU folios.
b4e0b68f 411 */
1b7e4464 412static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
b4e0b68f 413{
1b7e4464 414 unsigned long memcg_data = folio->memcg_data;
b4e0b68f 415
1b7e4464
MWO
416 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
417 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
418 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
b4e0b68f
MS
419
420 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
421}
422
bcfe06bf 423/*
1b7e4464
MWO
424 * folio_memcg - Get the memory cgroup associated with a folio.
425 * @folio: Pointer to the folio.
bcfe06bf 426 *
1b7e4464
MWO
427 * Returns a pointer to the memory cgroup associated with the folio,
428 * or NULL. This function assumes that the folio is known to have a
bcfe06bf 429 * proper memory cgroup pointer. It's not safe to call this function
1b7e4464 430 * against some type of folios, e.g. slab folios or ex-slab folios.
bcfe06bf 431 *
1b7e4464 432 * For a non-kmem folio any of the following ensures folio and memcg binding
b4e0b68f
MS
433 * stability:
434 *
1b7e4464 435 * - the folio lock
bcfe06bf
RG
436 * - LRU isolation
437 * - lock_page_memcg()
438 * - exclusive reference
b4e0b68f 439 *
1b7e4464
MWO
440 * For a kmem folio a caller should hold an rcu read lock to protect memcg
441 * associated with a kmem folio from being released.
bcfe06bf 442 */
1b7e4464
MWO
443static inline struct mem_cgroup *folio_memcg(struct folio *folio)
444{
445 if (folio_memcg_kmem(folio))
446 return obj_cgroup_memcg(__folio_objcg(folio));
447 return __folio_memcg(folio);
448}
449
bcfe06bf
RG
450static inline struct mem_cgroup *page_memcg(struct page *page)
451{
1b7e4464 452 return folio_memcg(page_folio(page));
bcfe06bf
RG
453}
454
c5ce619a
MWO
455/**
456 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
457 * @folio: Pointer to the folio.
bcfe06bf 458 *
c5ce619a 459 * This function assumes that the folio is known to have a
bcfe06bf 460 * proper memory cgroup pointer. It's not safe to call this function
c5ce619a
MWO
461 * against some type of folios, e.g. slab folios or ex-slab folios.
462 *
463 * Return: A pointer to the memory cgroup associated with the folio,
464 * or NULL.
bcfe06bf 465 */
c5ce619a 466static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf 467{
c5ce619a 468 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
b4e0b68f 469
c5ce619a 470 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
bcfe06bf
RG
471 WARN_ON_ONCE(!rcu_read_lock_held());
472
b4e0b68f
MS
473 if (memcg_data & MEMCG_DATA_KMEM) {
474 struct obj_cgroup *objcg;
475
476 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
477 return obj_cgroup_memcg(objcg);
478 }
479
480 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
bcfe06bf
RG
481}
482
483/*
484 * page_memcg_check - get the memory cgroup associated with a page
485 * @page: a pointer to the page struct
486 *
487 * Returns a pointer to the memory cgroup associated with the page,
b4e0b68f 488 * or NULL. This function unlike page_memcg() can take any page
bcfe06bf 489 * as an argument. It has to be used in cases when it's not known if a page
b4e0b68f
MS
490 * has an associated memory cgroup pointer or an object cgroups vector or
491 * an object cgroup.
492 *
493 * For a non-kmem page any of the following ensures page and memcg binding
494 * stability:
bcfe06bf 495 *
bcfe06bf
RG
496 * - the page lock
497 * - LRU isolation
498 * - lock_page_memcg()
499 * - exclusive reference
b4e0b68f
MS
500 *
501 * For a kmem page a caller should hold an rcu read lock to protect memcg
502 * associated with a kmem page from being released.
bcfe06bf
RG
503 */
504static inline struct mem_cgroup *page_memcg_check(struct page *page)
505{
506 /*
507 * Because page->memcg_data might be changed asynchronously
508 * for slab pages, READ_ONCE() should be used here.
509 */
510 unsigned long memcg_data = READ_ONCE(page->memcg_data);
511
87944e29 512 if (memcg_data & MEMCG_DATA_OBJCGS)
bcfe06bf
RG
513 return NULL;
514
b4e0b68f
MS
515 if (memcg_data & MEMCG_DATA_KMEM) {
516 struct obj_cgroup *objcg;
517
518 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
519 return obj_cgroup_memcg(objcg);
520 }
521
18b2db3b
RG
522 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
523}
524
bd290e1e 525#ifdef CONFIG_MEMCG_KMEM
18b2db3b 526/*
1b7e4464
MWO
527 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
528 * @folio: Pointer to the folio.
18b2db3b 529 *
1b7e4464
MWO
530 * Checks if the folio has MemcgKmem flag set. The caller must ensure
531 * that the folio has an associated memory cgroup. It's not safe to call
532 * this function against some types of folios, e.g. slab folios.
18b2db3b 533 */
1b7e4464 534static inline bool folio_memcg_kmem(struct folio *folio)
18b2db3b 535{
1b7e4464
MWO
536 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
537 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
538 return folio->memcg_data & MEMCG_DATA_KMEM;
bcfe06bf
RG
539}
540
270c6a71 541
270c6a71 542#else
1b7e4464 543static inline bool folio_memcg_kmem(struct folio *folio)
bd290e1e
MS
544{
545 return false;
546}
547
270c6a71
RG
548#endif
549
1b7e4464
MWO
550static inline bool PageMemcgKmem(struct page *page)
551{
552 return folio_memcg_kmem(page_folio(page));
553}
554
dfd2f10c
KT
555static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
556{
557 return (memcg == root_mem_cgroup);
558}
559
23047a96
JW
560static inline bool mem_cgroup_disabled(void)
561{
562 return !cgroup_subsys_enabled(memory_cgrp_subsys);
563}
564
f56ce412
JW
565static inline void mem_cgroup_protection(struct mem_cgroup *root,
566 struct mem_cgroup *memcg,
567 unsigned long *min,
568 unsigned long *low)
9783aa99 569{
f56ce412
JW
570 *min = *low = 0;
571
1bc63fb1 572 if (mem_cgroup_disabled())
f56ce412 573 return;
1bc63fb1 574
22f7496f
YS
575 /*
576 * There is no reclaim protection applied to a targeted reclaim.
577 * We are special casing this specific case here because
578 * mem_cgroup_protected calculation is not robust enough to keep
579 * the protection invariant for calculated effective values for
580 * parallel reclaimers with different reclaim target. This is
581 * especially a problem for tail memcgs (as they have pages on LRU)
582 * which would want to have effective values 0 for targeted reclaim
583 * but a different value for external reclaim.
584 *
585 * Example
586 * Let's have global and A's reclaim in parallel:
587 * |
588 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
589 * |\
590 * | C (low = 1G, usage = 2.5G)
591 * B (low = 1G, usage = 0.5G)
592 *
593 * For the global reclaim
594 * A.elow = A.low
595 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
596 * C.elow = min(C.usage, C.low)
597 *
598 * With the effective values resetting we have A reclaim
599 * A.elow = 0
600 * B.elow = B.low
601 * C.elow = C.low
602 *
603 * If the global reclaim races with A's reclaim then
604 * B.elow = C.elow = 0 because children_low_usage > A.elow)
605 * is possible and reclaiming B would be violating the protection.
606 *
607 */
608 if (root == memcg)
f56ce412 609 return;
9783aa99 610
f56ce412
JW
611 *min = READ_ONCE(memcg->memory.emin);
612 *low = READ_ONCE(memcg->memory.elow);
9783aa99
CD
613}
614
45c7f7e1
CD
615void mem_cgroup_calculate_protection(struct mem_cgroup *root,
616 struct mem_cgroup *memcg);
617
618static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
619{
620 /*
621 * The root memcg doesn't account charges, and doesn't support
622 * protection.
623 */
624 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
625
626}
627
628static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
629{
630 if (!mem_cgroup_supports_protection(memcg))
631 return false;
632
633 return READ_ONCE(memcg->memory.elow) >=
634 page_counter_read(&memcg->memory);
635}
636
637static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
638{
639 if (!mem_cgroup_supports_protection(memcg))
640 return false;
641
642 return READ_ONCE(memcg->memory.emin) >=
643 page_counter_read(&memcg->memory);
644}
241994ed 645
8f425e4e
MWO
646int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
647
648/**
649 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
650 * @folio: Folio to charge.
651 * @mm: mm context of the allocating task.
652 * @gfp: Reclaim mode.
653 *
654 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
655 * pages according to @gfp if necessary. If @mm is NULL, try to
656 * charge to the active memcg.
657 *
658 * Do not use this for folios allocated for swapin.
659 *
660 * Return: 0 on success. Otherwise, an error code is returned.
661 */
662static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
663 gfp_t gfp)
2c8d8f97
SB
664{
665 if (mem_cgroup_disabled())
666 return 0;
8f425e4e 667 return __mem_cgroup_charge(folio, mm, gfp);
2c8d8f97
SB
668}
669
0add0c77
SB
670int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
671 gfp_t gfp, swp_entry_t entry);
672void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
3fea5a49 673
bbc6b703
MWO
674void __mem_cgroup_uncharge(struct folio *folio);
675
676/**
677 * mem_cgroup_uncharge - Uncharge a folio.
678 * @folio: Folio to uncharge.
679 *
680 * Uncharge a folio previously charged with mem_cgroup_charge().
681 */
682static inline void mem_cgroup_uncharge(struct folio *folio)
2c8d8f97
SB
683{
684 if (mem_cgroup_disabled())
685 return;
bbc6b703 686 __mem_cgroup_uncharge(folio);
2c8d8f97
SB
687}
688
689void __mem_cgroup_uncharge_list(struct list_head *page_list);
690static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
691{
692 if (mem_cgroup_disabled())
693 return;
694 __mem_cgroup_uncharge_list(page_list);
695}
569b846d 696
d21bba2b 697void mem_cgroup_migrate(struct folio *old, struct folio *new);
569b846d 698
55779ec7 699/**
867e5e1d 700 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
55779ec7 701 * @memcg: memcg of the wanted lruvec
9a1ac228 702 * @pgdat: pglist_data
55779ec7 703 *
867e5e1d 704 * Returns the lru list vector holding pages for a given @memcg &
9a1ac228 705 * @pgdat combination. This can be the node lruvec, if the memory
867e5e1d 706 * controller is disabled.
55779ec7 707 */
867e5e1d
JW
708static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
709 struct pglist_data *pgdat)
55779ec7 710{
ef8f2327 711 struct mem_cgroup_per_node *mz;
55779ec7
JW
712 struct lruvec *lruvec;
713
714 if (mem_cgroup_disabled()) {
867e5e1d 715 lruvec = &pgdat->__lruvec;
55779ec7
JW
716 goto out;
717 }
718
1b05117d
JW
719 if (!memcg)
720 memcg = root_mem_cgroup;
721
a3747b53 722 mz = memcg->nodeinfo[pgdat->node_id];
55779ec7
JW
723 lruvec = &mz->lruvec;
724out:
725 /*
726 * Since a node can be onlined after the mem_cgroup was created,
599d0c95 727 * we have to be prepared to initialize lruvec->pgdat here;
55779ec7
JW
728 * and if offlined then reonlined, we need to reinitialize it.
729 */
ef8f2327
MG
730 if (unlikely(lruvec->pgdat != pgdat))
731 lruvec->pgdat = pgdat;
55779ec7
JW
732 return lruvec;
733}
734
9a1ac228 735/**
b1baabd9
MWO
736 * folio_lruvec - return lruvec for isolating/putting an LRU folio
737 * @folio: Pointer to the folio.
9a1ac228 738 *
b1baabd9 739 * This function relies on folio->mem_cgroup being stable.
9a1ac228 740 */
b1baabd9 741static inline struct lruvec *folio_lruvec(struct folio *folio)
9a1ac228 742{
b1baabd9 743 struct mem_cgroup *memcg = folio_memcg(folio);
9a1ac228 744
b1baabd9
MWO
745 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
746 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
9a1ac228 747}
c9b0ed51 748
64219994 749struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905 750
d46eb14b
SB
751struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
752
e809c3fe
MWO
753struct lruvec *folio_lruvec_lock(struct folio *folio);
754struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
755struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
756 unsigned long *flags);
757
758#ifdef CONFIG_DEBUG_VM
e809c3fe 759void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
6168d0da 760#else
e809c3fe
MWO
761static inline
762void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
763{
764}
765#endif
766
33398cf2
MH
767static inline
768struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
769 return css ? container_of(css, struct mem_cgroup, css) : NULL;
770}
771
bf4f0599
RG
772static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
773{
774 return percpu_ref_tryget(&objcg->refcnt);
775}
776
777static inline void obj_cgroup_get(struct obj_cgroup *objcg)
778{
779 percpu_ref_get(&objcg->refcnt);
780}
781
b4e0b68f
MS
782static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
783 unsigned long nr)
bf4f0599 784{
b4e0b68f 785 percpu_ref_get_many(&objcg->refcnt, nr);
bf4f0599
RG
786}
787
b4e0b68f 788static inline void obj_cgroup_put(struct obj_cgroup *objcg)
bf4f0599 789{
b4e0b68f 790 percpu_ref_put(&objcg->refcnt);
bf4f0599
RG
791}
792
dc0b5864
RG
793static inline void mem_cgroup_put(struct mem_cgroup *memcg)
794{
d46eb14b
SB
795 if (memcg)
796 css_put(&memcg->css);
dc0b5864
RG
797}
798
8e8ae645
JW
799#define mem_cgroup_from_counter(counter, member) \
800 container_of(counter, struct mem_cgroup, member)
801
33398cf2
MH
802struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
803 struct mem_cgroup *,
804 struct mem_cgroup_reclaim_cookie *);
805void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
7c5f64f8
VD
806int mem_cgroup_scan_tasks(struct mem_cgroup *,
807 int (*)(struct task_struct *, void *), void *);
33398cf2 808
23047a96
JW
809static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
810{
811 if (mem_cgroup_disabled())
812 return 0;
813
73f576c0 814 return memcg->id.id;
23047a96 815}
73f576c0 816struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96 817
aa9694bb
CD
818static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
819{
820 return mem_cgroup_from_css(seq_css(m));
821}
822
2262185c
RG
823static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
824{
825 struct mem_cgroup_per_node *mz;
826
827 if (mem_cgroup_disabled())
828 return NULL;
829
830 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
831 return mz->memcg;
832}
833
8e8ae645
JW
834/**
835 * parent_mem_cgroup - find the accounting parent of a memcg
836 * @memcg: memcg whose parent to find
837 *
838 * Returns the parent memcg, or NULL if this is the root or the memory
839 * controller is in legacy no-hierarchy mode.
840 */
841static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
842{
843 if (!memcg->memory.parent)
844 return NULL;
845 return mem_cgroup_from_counter(memcg->memory.parent, memory);
846}
847
33398cf2
MH
848static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
849 struct mem_cgroup *root)
850{
851 if (root == memcg)
852 return true;
33398cf2
MH
853 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
854}
e1aab161 855
2314b42d
JW
856static inline bool mm_match_cgroup(struct mm_struct *mm,
857 struct mem_cgroup *memcg)
2e4d4091 858{
587af308 859 struct mem_cgroup *task_memcg;
413918bb 860 bool match = false;
c3ac9a8a 861
2e4d4091 862 rcu_read_lock();
587af308 863 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 864 if (task_memcg)
2314b42d 865 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 866 rcu_read_unlock();
c3ac9a8a 867 return match;
2e4d4091 868}
8a9f3ccd 869
64219994 870struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
2fc04524 871ino_t page_cgroup_ino(struct page *page);
d324236b 872
eb01aaab
VD
873static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
874{
875 if (mem_cgroup_disabled())
876 return true;
877 return !!(memcg->css.flags & CSS_ONLINE);
878}
879
33398cf2 880void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 881 int zid, int nr_pages);
33398cf2 882
b4536f0c
MH
883static inline
884unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
885 enum lru_list lru, int zone_idx)
886{
887 struct mem_cgroup_per_node *mz;
888
889 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
e0e3f42f 890 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
33398cf2
MH
891}
892
b23afb93
TH
893void mem_cgroup_handle_over_high(void);
894
bbec2e15 895unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
7c5f64f8 896
9783aa99
CD
897unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
898
f0c867d9 899void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
64219994 900 struct task_struct *p);
58ae83db 901
f0c867d9 902void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
903
29ef680a 904static inline void mem_cgroup_enter_user_fault(void)
519e5247 905{
29ef680a
MH
906 WARN_ON(current->in_user_fault);
907 current->in_user_fault = 1;
519e5247
JW
908}
909
29ef680a 910static inline void mem_cgroup_exit_user_fault(void)
519e5247 911{
29ef680a
MH
912 WARN_ON(!current->in_user_fault);
913 current->in_user_fault = 0;
519e5247
JW
914}
915
3812c8c8
JW
916static inline bool task_in_memcg_oom(struct task_struct *p)
917{
626ebc41 918 return p->memcg_in_oom;
3812c8c8
JW
919}
920
49426420 921bool mem_cgroup_oom_synchronize(bool wait);
3d8b38eb
RG
922struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
923 struct mem_cgroup *oom_domain);
924void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
3812c8c8 925
c255a458 926#ifdef CONFIG_MEMCG_SWAP
eccb52e7 927extern bool cgroup_memory_noswap;
c077719b 928#endif
f8d66542 929
f70ad448
MWO
930void folio_memcg_lock(struct folio *folio);
931void folio_memcg_unlock(struct folio *folio);
1c824a68 932void lock_page_memcg(struct page *page);
62cccb8c 933void unlock_page_memcg(struct page *page);
d7365e78 934
db9adbcb 935void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
2a2e4885 936
04fecbf5 937/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 938static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 939 int idx, int val)
2a2e4885 940{
c3cc3911
JW
941 unsigned long flags;
942
943 local_irq_save(flags);
a983b5eb 944 __mod_memcg_state(memcg, idx, val);
c3cc3911 945 local_irq_restore(flags);
2a2e4885
JW
946}
947
4e5aa1f4
SB
948static inline void mod_memcg_page_state(struct page *page,
949 int idx, int val)
950{
951 struct mem_cgroup *memcg;
952
953 if (mem_cgroup_disabled())
954 return;
955
956 rcu_read_lock();
957 memcg = page_memcg(page);
958 if (memcg)
959 mod_memcg_state(memcg, idx, val);
960 rcu_read_unlock();
961}
962
7490a2d2
SB
963static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
964{
96e51ccf 965 return READ_ONCE(memcg->vmstats.state[idx]);
7490a2d2
SB
966}
967
42a30035
JW
968static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
969 enum node_stat_item idx)
970{
971 struct mem_cgroup_per_node *pn;
42a30035
JW
972
973 if (mem_cgroup_disabled())
974 return node_page_state(lruvec_pgdat(lruvec), idx);
975
976 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
96e51ccf 977 return READ_ONCE(pn->lruvec_stats.state[idx]);
42a30035
JW
978}
979
205b20cc
JW
980static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
981 enum node_stat_item idx)
2a7106f2 982{
00f3ca2c 983 struct mem_cgroup_per_node *pn;
815744d7
JW
984 long x = 0;
985 int cpu;
00f3ca2c
JW
986
987 if (mem_cgroup_disabled())
988 return node_page_state(lruvec_pgdat(lruvec), idx);
989
990 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
815744d7 991 for_each_possible_cpu(cpu)
7e1c0d6f 992 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
a983b5eb
JW
993#ifdef CONFIG_SMP
994 if (x < 0)
995 x = 0;
996#endif
997 return x;
2a7106f2
GT
998}
999
aa48e47e
SB
1000void mem_cgroup_flush_stats(void);
1001
eedc4e5a
RG
1002void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1003 int val);
da3ceeff 1004void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
991e7673 1005
da3ceeff 1006static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1007 int val)
1008{
1009 unsigned long flags;
1010
1011 local_irq_save(flags);
da3ceeff 1012 __mod_lruvec_kmem_state(p, idx, val);
991e7673
SB
1013 local_irq_restore(flags);
1014}
1015
eedc4e5a
RG
1016static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1017 enum node_stat_item idx, int val)
1018{
1019 unsigned long flags;
1020
1021 local_irq_save(flags);
1022 __mod_memcg_lruvec_state(lruvec, idx, val);
1023 local_irq_restore(flags);
1024}
1025
db9adbcb
JW
1026void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1027 unsigned long count);
c9019e9b 1028
2262185c 1029static inline void count_memcg_events(struct mem_cgroup *memcg,
e27be240
JW
1030 enum vm_event_item idx,
1031 unsigned long count)
2262185c 1032{
c3cc3911
JW
1033 unsigned long flags;
1034
1035 local_irq_save(flags);
a983b5eb 1036 __count_memcg_events(memcg, idx, count);
c3cc3911 1037 local_irq_restore(flags);
2262185c
RG
1038}
1039
1040static inline void count_memcg_page_event(struct page *page,
e27be240 1041 enum vm_event_item idx)
2262185c 1042{
bcfe06bf
RG
1043 struct mem_cgroup *memcg = page_memcg(page);
1044
1045 if (memcg)
1046 count_memcg_events(memcg, idx, 1);
2262185c
RG
1047}
1048
1049static inline void count_memcg_event_mm(struct mm_struct *mm,
1050 enum vm_event_item idx)
68ae564b 1051{
33398cf2
MH
1052 struct mem_cgroup *memcg;
1053
68ae564b
DR
1054 if (mem_cgroup_disabled())
1055 return;
33398cf2
MH
1056
1057 rcu_read_lock();
1058 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
fe6bdfc8 1059 if (likely(memcg))
c9019e9b 1060 count_memcg_events(memcg, idx, 1);
33398cf2 1061 rcu_read_unlock();
68ae564b 1062}
c9019e9b 1063
e27be240
JW
1064static inline void memcg_memory_event(struct mem_cgroup *memcg,
1065 enum memcg_memory_event event)
c9019e9b 1066{
8b21ca02
MS
1067 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1068 event == MEMCG_SWAP_FAIL;
1069
1e577f97 1070 atomic_long_inc(&memcg->memory_events_local[event]);
8b21ca02
MS
1071 if (!swap_event)
1072 cgroup_file_notify(&memcg->events_local_file);
1e577f97 1073
9852ae3f
CD
1074 do {
1075 atomic_long_inc(&memcg->memory_events[event]);
8b21ca02
MS
1076 if (swap_event)
1077 cgroup_file_notify(&memcg->swap_events_file);
1078 else
1079 cgroup_file_notify(&memcg->events_file);
9852ae3f 1080
04fd61a4
YS
1081 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1082 break;
9852ae3f
CD
1083 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1084 break;
1085 } while ((memcg = parent_mem_cgroup(memcg)) &&
1086 !mem_cgroup_is_root(memcg));
c9019e9b
JW
1087}
1088
fe6bdfc8
RG
1089static inline void memcg_memory_event_mm(struct mm_struct *mm,
1090 enum memcg_memory_event event)
1091{
1092 struct mem_cgroup *memcg;
1093
1094 if (mem_cgroup_disabled())
1095 return;
1096
1097 rcu_read_lock();
1098 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1099 if (likely(memcg))
1100 memcg_memory_event(memcg, event);
1101 rcu_read_unlock();
1102}
1103
be6c8982 1104void split_page_memcg(struct page *head, unsigned int nr);
ca3e0214 1105
2d146aa3
JW
1106unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1107 gfp_t gfp_mask,
1108 unsigned long *total_scanned);
1109
c255a458 1110#else /* CONFIG_MEMCG */
23047a96
JW
1111
1112#define MEM_CGROUP_ID_SHIFT 0
1113#define MEM_CGROUP_ID_MAX 0
1114
1b7e4464
MWO
1115static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1116{
1117 return NULL;
1118}
1119
bcfe06bf
RG
1120static inline struct mem_cgroup *page_memcg(struct page *page)
1121{
1122 return NULL;
1123}
1124
c5ce619a 1125static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
bcfe06bf
RG
1126{
1127 WARN_ON_ONCE(!rcu_read_lock_held());
1128 return NULL;
1129}
1130
1131static inline struct mem_cgroup *page_memcg_check(struct page *page)
1132{
1133 return NULL;
1134}
1135
1b7e4464
MWO
1136static inline bool folio_memcg_kmem(struct folio *folio)
1137{
1138 return false;
1139}
1140
18b2db3b
RG
1141static inline bool PageMemcgKmem(struct page *page)
1142{
1143 return false;
1144}
1145
dfd2f10c
KT
1146static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1147{
1148 return true;
1149}
1150
23047a96
JW
1151static inline bool mem_cgroup_disabled(void)
1152{
1153 return true;
1154}
1155
e27be240
JW
1156static inline void memcg_memory_event(struct mem_cgroup *memcg,
1157 enum memcg_memory_event event)
241994ed
JW
1158{
1159}
1160
fe6bdfc8
RG
1161static inline void memcg_memory_event_mm(struct mm_struct *mm,
1162 enum memcg_memory_event event)
1163{
1164}
1165
f56ce412
JW
1166static inline void mem_cgroup_protection(struct mem_cgroup *root,
1167 struct mem_cgroup *memcg,
1168 unsigned long *min,
1169 unsigned long *low)
9783aa99 1170{
f56ce412 1171 *min = *low = 0;
9783aa99
CD
1172}
1173
45c7f7e1
CD
1174static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1175 struct mem_cgroup *memcg)
1176{
1177}
1178
1179static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1180{
1181 return false;
1182}
1183
1184static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
241994ed 1185{
45c7f7e1 1186 return false;
241994ed
JW
1187}
1188
8f425e4e
MWO
1189static inline int mem_cgroup_charge(struct folio *folio,
1190 struct mm_struct *mm, gfp_t gfp)
3fea5a49
JW
1191{
1192 return 0;
1193}
1194
0add0c77
SB
1195static inline int mem_cgroup_swapin_charge_page(struct page *page,
1196 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1197{
1198 return 0;
1199}
1200
1201static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1202{
1203}
1204
bbc6b703 1205static inline void mem_cgroup_uncharge(struct folio *folio)
569b846d
KH
1206{
1207}
1208
747db954 1209static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
1210{
1211}
1212
d21bba2b 1213static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
69029cd5
KH
1214{
1215}
1216
867e5e1d
JW
1217static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1218 struct pglist_data *pgdat)
08e552c6 1219{
867e5e1d 1220 return &pgdat->__lruvec;
08e552c6
KH
1221}
1222
b1baabd9 1223static inline struct lruvec *folio_lruvec(struct folio *folio)
66e1707b 1224{
b1baabd9 1225 struct pglist_data *pgdat = folio_pgdat(folio);
867e5e1d 1226 return &pgdat->__lruvec;
66e1707b
BS
1227}
1228
e809c3fe
MWO
1229static inline
1230void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
2d146aa3
JW
1231{
1232}
1233
b910718a
JW
1234static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1235{
1236 return NULL;
1237}
1238
587af308 1239static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 1240 struct mem_cgroup *memcg)
bed7161a 1241{
587af308 1242 return true;
bed7161a
BS
1243}
1244
d46eb14b
SB
1245static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1246{
1247 return NULL;
1248}
1249
c74d40e8
DS
1250static inline
1251struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1252{
1253 return NULL;
1254}
1255
dc0b5864
RG
1256static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1257{
1258}
1259
e809c3fe 1260static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1261{
e809c3fe 1262 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1263
1264 spin_lock(&pgdat->__lruvec.lru_lock);
1265 return &pgdat->__lruvec;
1266}
1267
e809c3fe 1268static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1269{
e809c3fe 1270 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1271
1272 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1273 return &pgdat->__lruvec;
1274}
1275
e809c3fe 1276static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
6168d0da
AS
1277 unsigned long *flagsp)
1278{
e809c3fe 1279 struct pglist_data *pgdat = folio_pgdat(folio);
6168d0da
AS
1280
1281 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1282 return &pgdat->__lruvec;
1283}
1284
5660048c
JW
1285static inline struct mem_cgroup *
1286mem_cgroup_iter(struct mem_cgroup *root,
1287 struct mem_cgroup *prev,
1288 struct mem_cgroup_reclaim_cookie *reclaim)
1289{
1290 return NULL;
1291}
1292
1293static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1294 struct mem_cgroup *prev)
1295{
1296}
1297
7c5f64f8
VD
1298static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1299 int (*fn)(struct task_struct *, void *), void *arg)
1300{
1301 return 0;
1302}
1303
23047a96 1304static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d66542 1305{
23047a96
JW
1306 return 0;
1307}
1308
1309static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1310{
1311 WARN_ON_ONCE(id);
1312 /* XXX: This should always return root_mem_cgroup */
1313 return NULL;
f8d66542 1314}
a636b327 1315
aa9694bb
CD
1316static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1317{
1318 return NULL;
1319}
1320
2262185c
RG
1321static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1322{
1323 return NULL;
1324}
1325
eb01aaab 1326static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e23 1327{
13308ca9 1328 return true;
14797e23
KM
1329}
1330
b4536f0c
MH
1331static inline
1332unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1333 enum lru_list lru, int zone_idx)
1334{
1335 return 0;
1336}
a3d8e054 1337
bbec2e15 1338static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
7c5f64f8
VD
1339{
1340 return 0;
1341}
1342
9783aa99
CD
1343static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1344{
1345 return 0;
1346}
1347
e222432b 1348static inline void
f0c867d9 1349mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1350{
1351}
1352
1353static inline void
1354mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
e222432b
BS
1355{
1356}
1357
1c824a68 1358static inline void lock_page_memcg(struct page *page)
89c06bd5
KH
1359{
1360}
1361
62cccb8c 1362static inline void unlock_page_memcg(struct page *page)
89c06bd5
KH
1363{
1364}
1365
f70ad448
MWO
1366static inline void folio_memcg_lock(struct folio *folio)
1367{
1368}
1369
1370static inline void folio_memcg_unlock(struct folio *folio)
1371{
1372}
1373
b23afb93
TH
1374static inline void mem_cgroup_handle_over_high(void)
1375{
1376}
1377
29ef680a 1378static inline void mem_cgroup_enter_user_fault(void)
519e5247
JW
1379{
1380}
1381
29ef680a 1382static inline void mem_cgroup_exit_user_fault(void)
519e5247
JW
1383{
1384}
1385
3812c8c8
JW
1386static inline bool task_in_memcg_oom(struct task_struct *p)
1387{
1388 return false;
1389}
1390
49426420 1391static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
1392{
1393 return false;
1394}
1395
3d8b38eb
RG
1396static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1397 struct task_struct *victim, struct mem_cgroup *oom_domain)
1398{
1399 return NULL;
1400}
1401
1402static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1403{
1404}
1405
00f3ca2c 1406static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1407 int idx,
00f3ca2c 1408 int nr)
2a2e4885
JW
1409{
1410}
1411
00f3ca2c 1412static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1413 int idx,
00f3ca2c 1414 int nr)
2a2e4885
JW
1415{
1416}
1417
4e5aa1f4
SB
1418static inline void mod_memcg_page_state(struct page *page,
1419 int idx, int val)
1420{
1421}
1422
7490a2d2
SB
1423static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1424{
1425 return 0;
1426}
1427
42a30035
JW
1428static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1429 enum node_stat_item idx)
1430{
1431 return node_page_state(lruvec_pgdat(lruvec), idx);
1432}
1433
205b20cc
JW
1434static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1435 enum node_stat_item idx)
2a7106f2 1436{
00f3ca2c 1437 return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2
GT
1438}
1439
aa48e47e
SB
1440static inline void mem_cgroup_flush_stats(void)
1441{
1442}
1443
eedc4e5a
RG
1444static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1445 enum node_stat_item idx, int val)
1446{
1447}
1448
da3ceeff 1449static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
ec9f0238
RG
1450 int val)
1451{
1452 struct page *page = virt_to_head_page(p);
1453
1454 __mod_node_page_state(page_pgdat(page), idx, val);
1455}
1456
da3ceeff 1457static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
991e7673
SB
1458 int val)
1459{
1460 struct page *page = virt_to_head_page(p);
1461
1462 mod_node_page_state(page_pgdat(page), idx, val);
1463}
1464
2262185c
RG
1465static inline void count_memcg_events(struct mem_cgroup *memcg,
1466 enum vm_event_item idx,
1467 unsigned long count)
1468{
1469}
1470
9851ac13
KT
1471static inline void __count_memcg_events(struct mem_cgroup *memcg,
1472 enum vm_event_item idx,
1473 unsigned long count)
1474{
1475}
1476
2262185c 1477static inline void count_memcg_page_event(struct page *page,
04fecbf5 1478 int idx)
2262185c
RG
1479{
1480}
1481
456f998e 1482static inline
2262185c 1483void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998e
YH
1484{
1485}
6168d0da 1486
2d146aa3
JW
1487static inline void split_page_memcg(struct page *head, unsigned int nr)
1488{
1489}
1490
1491static inline
1492unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1493 gfp_t gfp_mask,
1494 unsigned long *total_scanned)
6168d0da 1495{
2d146aa3 1496 return 0;
6168d0da 1497}
c255a458 1498#endif /* CONFIG_MEMCG */
78fb7466 1499
da3ceeff 1500static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1501{
da3ceeff 1502 __mod_lruvec_kmem_state(p, idx, 1);
ec9f0238
RG
1503}
1504
da3ceeff 1505static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
ec9f0238 1506{
da3ceeff 1507 __mod_lruvec_kmem_state(p, idx, -1);
ec9f0238
RG
1508}
1509
7cf111bc
JW
1510static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1511{
1512 struct mem_cgroup *memcg;
1513
1514 memcg = lruvec_memcg(lruvec);
1515 if (!memcg)
1516 return NULL;
1517 memcg = parent_mem_cgroup(memcg);
1518 if (!memcg)
1519 return NULL;
1520 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1521}
1522
6168d0da
AS
1523static inline void unlock_page_lruvec(struct lruvec *lruvec)
1524{
1525 spin_unlock(&lruvec->lru_lock);
1526}
1527
1528static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1529{
1530 spin_unlock_irq(&lruvec->lru_lock);
1531}
1532
1533static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1534 unsigned long flags)
1535{
1536 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1537}
1538
7467c391 1539/* Test requires a stable page->memcg binding, see page_memcg() */
0de340cb
MWO
1540static inline bool folio_matches_lruvec(struct folio *folio,
1541 struct lruvec *lruvec)
f2e4d28d 1542{
0de340cb
MWO
1543 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1544 lruvec_memcg(lruvec) == folio_memcg(folio);
f2e4d28d
MS
1545}
1546
2a5e4e34 1547/* Don't lock again iff page's lruvec locked */
0de340cb 1548static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
2a5e4e34
AD
1549 struct lruvec *locked_lruvec)
1550{
1551 if (locked_lruvec) {
0de340cb 1552 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1553 return locked_lruvec;
1554
1555 unlock_page_lruvec_irq(locked_lruvec);
1556 }
1557
e809c3fe 1558 return folio_lruvec_lock_irq(folio);
2a5e4e34
AD
1559}
1560
1561/* Don't lock again iff page's lruvec locked */
0de340cb 1562static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
2a5e4e34
AD
1563 struct lruvec *locked_lruvec, unsigned long *flags)
1564{
1565 if (locked_lruvec) {
0de340cb 1566 if (folio_matches_lruvec(folio, locked_lruvec))
2a5e4e34
AD
1567 return locked_lruvec;
1568
1569 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1570 }
1571
e809c3fe 1572 return folio_lruvec_lock_irqsave(folio, flags);
2a5e4e34
AD
1573}
1574
52ebea74 1575#ifdef CONFIG_CGROUP_WRITEBACK
841710aa 1576
841710aa 1577struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cd
TH
1578void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1579 unsigned long *pheadroom, unsigned long *pdirty,
1580 unsigned long *pwriteback);
841710aa 1581
9d8053fc 1582void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
1583 struct bdi_writeback *wb);
1584
203a3151 1585static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1586 struct bdi_writeback *wb)
1587{
08d1d0e6
BH
1588 if (mem_cgroup_disabled())
1589 return;
1590
9d8053fc
MWO
1591 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
1592 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
97b27821
TH
1593}
1594
1595void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1596
841710aa
TH
1597#else /* CONFIG_CGROUP_WRITEBACK */
1598
1599static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1600{
1601 return NULL;
1602}
1603
c2aa723a 1604static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cd
TH
1605 unsigned long *pfilepages,
1606 unsigned long *pheadroom,
c2aa723a
TH
1607 unsigned long *pdirty,
1608 unsigned long *pwriteback)
1609{
1610}
1611
203a3151 1612static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
97b27821
TH
1613 struct bdi_writeback *wb)
1614{
1615}
1616
1617static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1618{
1619}
1620
841710aa 1621#endif /* CONFIG_CGROUP_WRITEBACK */
52ebea74 1622
e1aab161 1623struct sock;
4b1327be
WW
1624bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1625 gfp_t gfp_mask);
baac50bb 1626void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e4 1627#ifdef CONFIG_MEMCG
ef12947c
JW
1628extern struct static_key_false memcg_sockets_enabled_key;
1629#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d758073
JW
1630void mem_cgroup_sk_alloc(struct sock *sk);
1631void mem_cgroup_sk_free(struct sock *sk);
baac50bb 1632static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c 1633{
0db15298 1634 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
8e8ae645 1635 return true;
8e8ae645 1636 do {
7e6ec49c 1637 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
8e8ae645
JW
1638 return true;
1639 } while ((memcg = parent_mem_cgroup(memcg)));
1640 return false;
e805605c 1641}
0a432dcb 1642
e4262c4f
YS
1643int alloc_shrinker_info(struct mem_cgroup *memcg);
1644void free_shrinker_info(struct mem_cgroup *memcg);
2bfd3637 1645void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
a178015c 1646void reparent_shrinker_deferred(struct mem_cgroup *memcg);
e805605c 1647#else
80e95fe0 1648#define mem_cgroup_sockets_enabled 0
2d758073
JW
1649static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1650static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bb 1651static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c
JW
1652{
1653 return false;
1654}
0a432dcb 1655
2bfd3637
YS
1656static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1657 int nid, int shrinker_id)
0a432dcb
YS
1658{
1659}
e805605c 1660#endif
7ae1e1d0 1661
9b6f7e16 1662#ifdef CONFIG_MEMCG_KMEM
4d5c8aed 1663bool mem_cgroup_kmem_disabled(void);
f4b00eab
RG
1664int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1665void __memcg_kmem_uncharge_page(struct page *page, int order);
45264778 1666
bf4f0599
RG
1667struct obj_cgroup *get_obj_cgroup_from_current(void);
1668
1669int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1670void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1671
ef12947c 1672extern struct static_key_false memcg_kmem_enabled_key;
749c5415 1673
dbcf73e2 1674extern int memcg_nr_cache_ids;
64219994
MH
1675void memcg_get_cache_ids(void);
1676void memcg_put_cache_ids(void);
ebe945c2
GC
1677
1678/*
1679 * Helper macro to loop through all memcg-specific caches. Callers must still
1680 * check if the cache is valid (it is either valid or NULL).
1681 * the slab_mutex must be held when looping through those caches
1682 */
749c5415 1683#define for_each_memcg_cache_index(_idx) \
dbcf73e2 1684 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
749c5415 1685
7ae1e1d0
GC
1686static inline bool memcg_kmem_enabled(void)
1687{
eda330e5 1688 return static_branch_likely(&memcg_kmem_enabled_key);
7ae1e1d0
GC
1689}
1690
f4b00eab
RG
1691static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1692 int order)
60cd4bcd
SB
1693{
1694 if (memcg_kmem_enabled())
f4b00eab 1695 return __memcg_kmem_charge_page(page, gfp, order);
60cd4bcd
SB
1696 return 0;
1697}
1698
f4b00eab 1699static inline void memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1700{
1701 if (memcg_kmem_enabled())
f4b00eab 1702 __memcg_kmem_uncharge_page(page, order);
60cd4bcd
SB
1703}
1704
33398cf2 1705/*
a7cb874b
RG
1706 * A helper for accessing memcg's kmem_id, used for getting
1707 * corresponding LRU lists.
33398cf2
MH
1708 */
1709static inline int memcg_cache_id(struct mem_cgroup *memcg)
1710{
1711 return memcg ? memcg->kmemcg_id : -1;
1712}
5722d094 1713
8380ce47
RG
1714struct mem_cgroup *mem_cgroup_from_obj(void *p);
1715
7ae1e1d0 1716#else
4d5c8aed
RG
1717static inline bool mem_cgroup_kmem_disabled(void)
1718{
1719 return true;
1720}
9b6f7e16 1721
f4b00eab
RG
1722static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1723 int order)
9b6f7e16
RG
1724{
1725 return 0;
1726}
1727
f4b00eab 1728static inline void memcg_kmem_uncharge_page(struct page *page, int order)
9b6f7e16
RG
1729{
1730}
1731
f4b00eab
RG
1732static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1733 int order)
60cd4bcd
SB
1734{
1735 return 0;
1736}
1737
f4b00eab 1738static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
60cd4bcd
SB
1739{
1740}
1741
749c5415
GC
1742#define for_each_memcg_cache_index(_idx) \
1743 for (; NULL; )
1744
b9ce5ef4
GC
1745static inline bool memcg_kmem_enabled(void)
1746{
1747 return false;
1748}
1749
2633d7a0
GC
1750static inline int memcg_cache_id(struct mem_cgroup *memcg)
1751{
1752 return -1;
1753}
1754
05257a1a
VD
1755static inline void memcg_get_cache_ids(void)
1756{
1757}
1758
1759static inline void memcg_put_cache_ids(void)
1760{
1761}
1762
8380ce47
RG
1763static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1764{
1765 return NULL;
1766}
1767
84c07d11 1768#endif /* CONFIG_MEMCG_KMEM */
127424c8 1769
8cdea7c0 1770#endif /* _LINUX_MEMCONTROL_H */