]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - include/linux/memcontrol.h
mm: kmem: cleanup memcg_kmem_uncharge_memcg() arguments
[thirdparty/kernel/stable.git] / include / linux / memcontrol.h
CommitLineData
c942fddf 1/* SPDX-License-Identifier: GPL-2.0-or-later */
8cdea7c0
BS
2/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
8cdea7c0
BS
9 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
f8d66542 13#include <linux/cgroup.h>
456f998e 14#include <linux/vm_event_item.h>
7ae1e1d0 15#include <linux/hardirq.h>
a8964b9b 16#include <linux/jump_label.h>
33398cf2
MH
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
00f3ca2c
JW
20#include <linux/mm.h>
21#include <linux/vmstat.h>
33398cf2 22#include <linux/writeback.h>
fdf1cdb9 23#include <linux/page-flags.h>
456f998e 24
78fb7466 25struct mem_cgroup;
8697d331
BS
26struct page;
27struct mm_struct;
2633d7a0 28struct kmem_cache;
78fb7466 29
71cd3113
JW
30/* Cgroup-specific page state, on top of universal node page state */
31enum memcg_stat_item {
32 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
33 MEMCG_RSS,
34 MEMCG_RSS_HUGE,
35 MEMCG_SWAP,
36 MEMCG_SOCK,
37 /* XXX: why are these zone and not node counters? */
38 MEMCG_KERNEL_STACK_KB,
b2807f07 39 MEMCG_NR_STAT,
2a7106f2
GT
40};
41
e27be240
JW
42enum memcg_memory_event {
43 MEMCG_LOW,
71cd3113
JW
44 MEMCG_HIGH,
45 MEMCG_MAX,
46 MEMCG_OOM,
fe6bdfc8 47 MEMCG_OOM_KILL,
f3a53a3a
TH
48 MEMCG_SWAP_MAX,
49 MEMCG_SWAP_FAIL,
e27be240 50 MEMCG_NR_MEMORY_EVENTS,
71cd3113
JW
51};
52
bf8d5d52
RG
53enum mem_cgroup_protection {
54 MEMCG_PROT_NONE,
55 MEMCG_PROT_LOW,
56 MEMCG_PROT_MIN,
57};
58
5660048c 59struct mem_cgroup_reclaim_cookie {
ef8f2327 60 pg_data_t *pgdat;
5660048c
JW
61 unsigned int generation;
62};
63
71cd3113
JW
64#ifdef CONFIG_MEMCG
65
66#define MEM_CGROUP_ID_SHIFT 16
67#define MEM_CGROUP_ID_MAX USHRT_MAX
68
69struct mem_cgroup_id {
70 int id;
1c2d479a 71 refcount_t ref;
71cd3113
JW
72};
73
33398cf2
MH
74/*
75 * Per memcg event counter is incremented at every pagein/pageout. With THP,
76 * it will be incremated by the number of pages. This counter is used for
77 * for trigger some periodic events. This is straightforward and better
78 * than using jiffies etc. to handle periodic memcg event.
79 */
80enum mem_cgroup_events_target {
81 MEM_CGROUP_TARGET_THRESH,
82 MEM_CGROUP_TARGET_SOFTLIMIT,
33398cf2
MH
83 MEM_CGROUP_NTARGETS,
84};
85
871789d4
CD
86struct memcg_vmstats_percpu {
87 long stat[MEMCG_NR_STAT];
e27be240 88 unsigned long events[NR_VM_EVENT_ITEMS];
33398cf2
MH
89 unsigned long nr_page_events;
90 unsigned long targets[MEM_CGROUP_NTARGETS];
91};
92
93struct mem_cgroup_reclaim_iter {
94 struct mem_cgroup *position;
95 /* scan generation, increased every round-trip */
96 unsigned int generation;
97};
98
00f3ca2c
JW
99struct lruvec_stat {
100 long count[NR_VM_NODE_STAT_ITEMS];
101};
102
0a4465d3
KT
103/*
104 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
105 * which have elements charged to this memcg.
106 */
107struct memcg_shrinker_map {
108 struct rcu_head rcu;
109 unsigned long map[0];
110};
111
33398cf2 112/*
242c37b4 113 * per-node information in memory controller.
33398cf2 114 */
ef8f2327 115struct mem_cgroup_per_node {
33398cf2 116 struct lruvec lruvec;
a983b5eb 117
815744d7
JW
118 /* Legacy local VM stats */
119 struct lruvec_stat __percpu *lruvec_stat_local;
120
121 /* Subtree VM stats (batched updates) */
a983b5eb
JW
122 struct lruvec_stat __percpu *lruvec_stat_cpu;
123 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
124
b4536f0c 125 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
33398cf2 126
9da83f3f 127 struct mem_cgroup_reclaim_iter iter;
33398cf2 128
0a4465d3 129 struct memcg_shrinker_map __rcu *shrinker_map;
0a432dcb 130
33398cf2
MH
131 struct rb_node tree_node; /* RB tree node */
132 unsigned long usage_in_excess;/* Set to the value by which */
133 /* the soft limit is exceeded*/
134 bool on_tree;
135 struct mem_cgroup *memcg; /* Back pointer, we cannot */
136 /* use container_of */
137};
138
33398cf2
MH
139struct mem_cgroup_threshold {
140 struct eventfd_ctx *eventfd;
141 unsigned long threshold;
142};
143
144/* For threshold */
145struct mem_cgroup_threshold_ary {
146 /* An array index points to threshold just below or equal to usage. */
147 int current_threshold;
148 /* Size of entries[] */
149 unsigned int size;
150 /* Array of thresholds */
151 struct mem_cgroup_threshold entries[0];
152};
153
154struct mem_cgroup_thresholds {
155 /* Primary thresholds array */
156 struct mem_cgroup_threshold_ary *primary;
157 /*
158 * Spare threshold array.
159 * This is needed to make mem_cgroup_unregister_event() "never fail".
160 * It must be able to store at least primary->size - 1 entries.
161 */
162 struct mem_cgroup_threshold_ary *spare;
163};
164
567e9ab2
JW
165enum memcg_kmem_state {
166 KMEM_NONE,
167 KMEM_ALLOCATED,
168 KMEM_ONLINE,
169};
170
e81bf979
AL
171#if defined(CONFIG_SMP)
172struct memcg_padding {
173 char x[0];
174} ____cacheline_internodealigned_in_smp;
175#define MEMCG_PADDING(name) struct memcg_padding name;
176#else
177#define MEMCG_PADDING(name)
178#endif
179
97b27821
TH
180/*
181 * Remember four most recent foreign writebacks with dirty pages in this
182 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
183 * one in a given round, we're likely to catch it later if it keeps
184 * foreign-dirtying, so a fairly low count should be enough.
185 *
186 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
187 */
188#define MEMCG_CGWB_FRN_CNT 4
189
190struct memcg_cgwb_frn {
191 u64 bdi_id; /* bdi->id of the foreign inode */
192 int memcg_id; /* memcg->css.id of foreign inode */
193 u64 at; /* jiffies_64 at the time of dirtying */
194 struct wb_completion done; /* tracks in-flight foreign writebacks */
195};
196
33398cf2
MH
197/*
198 * The memory controller data structure. The memory controller controls both
199 * page cache and RSS per cgroup. We would eventually like to provide
200 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
201 * to help the administrator determine what knobs to tune.
202 */
203struct mem_cgroup {
204 struct cgroup_subsys_state css;
205
73f576c0
JW
206 /* Private memcg ID. Used to ID objects that outlive the cgroup */
207 struct mem_cgroup_id id;
208
33398cf2
MH
209 /* Accounted resources */
210 struct page_counter memory;
37e84351 211 struct page_counter swap;
0db15298
JW
212
213 /* Legacy consumer-oriented counters */
33398cf2
MH
214 struct page_counter memsw;
215 struct page_counter kmem;
0db15298 216 struct page_counter tcpmem;
33398cf2 217
23067153 218 /* Upper bound of normal memory consumption range */
33398cf2
MH
219 unsigned long high;
220
f7e1cb6e
JW
221 /* Range enforcement for interrupt charges */
222 struct work_struct high_work;
223
33398cf2
MH
224 unsigned long soft_limit;
225
226 /* vmpressure notifications */
227 struct vmpressure vmpressure;
228
33398cf2
MH
229 /*
230 * Should the accounting and control be hierarchical, per subtree?
231 */
232 bool use_hierarchy;
233
3d8b38eb
RG
234 /*
235 * Should the OOM killer kill all belonging tasks, had it kill one?
236 */
237 bool oom_group;
238
33398cf2
MH
239 /* protected by memcg_oom_lock */
240 bool oom_lock;
241 int under_oom;
242
243 int swappiness;
244 /* OOM-Killer disable */
245 int oom_kill_disable;
246
1e577f97 247 /* memory.events and memory.events.local */
472912a2 248 struct cgroup_file events_file;
1e577f97 249 struct cgroup_file events_local_file;
472912a2 250
f3a53a3a
TH
251 /* handle for "memory.swap.events" */
252 struct cgroup_file swap_events_file;
253
33398cf2
MH
254 /* protect arrays of thresholds */
255 struct mutex thresholds_lock;
256
257 /* thresholds for memory usage. RCU-protected */
258 struct mem_cgroup_thresholds thresholds;
259
260 /* thresholds for mem+swap usage. RCU-protected */
261 struct mem_cgroup_thresholds memsw_thresholds;
262
263 /* For oom notifier event fd */
264 struct list_head oom_notify;
265
266 /*
267 * Should we move charges of a task when a task is moved into this
268 * mem_cgroup ? And what type of charges should we move ?
269 */
270 unsigned long move_charge_at_immigrate;
e81bf979
AL
271 /* taken only while moving_account > 0 */
272 spinlock_t move_lock;
273 unsigned long move_lock_flags;
274
275 MEMCG_PADDING(_pad1_);
276
33398cf2
MH
277 /*
278 * set > 0 if pages under this cgroup are moving to other cgroup.
279 */
280 atomic_t moving_account;
33398cf2 281 struct task_struct *move_lock_task;
a983b5eb 282
815744d7
JW
283 /* Legacy local VM stats and events */
284 struct memcg_vmstats_percpu __percpu *vmstats_local;
285
286 /* Subtree VM stats and events (batched updates) */
871789d4 287 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
e81bf979
AL
288
289 MEMCG_PADDING(_pad2_);
290
871789d4
CD
291 atomic_long_t vmstats[MEMCG_NR_STAT];
292 atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
42a30035 293
815744d7 294 /* memory.events */
42a30035 295 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
1e577f97 296 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
33398cf2 297
d886f4e4
JW
298 unsigned long socket_pressure;
299
300 /* Legacy tcp memory accounting */
0db15298
JW
301 bool tcpmem_active;
302 int tcpmem_pressure;
d886f4e4 303
84c07d11 304#ifdef CONFIG_MEMCG_KMEM
33398cf2
MH
305 /* Index in the kmem_cache->memcg_params.memcg_caches array */
306 int kmemcg_id;
567e9ab2 307 enum memcg_kmem_state kmem_state;
bc2791f8 308 struct list_head kmem_caches;
33398cf2
MH
309#endif
310
33398cf2
MH
311#ifdef CONFIG_CGROUP_WRITEBACK
312 struct list_head cgwb_list;
313 struct wb_domain cgwb_domain;
97b27821 314 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
33398cf2
MH
315#endif
316
317 /* List of events which userspace want to receive */
318 struct list_head event_list;
319 spinlock_t event_list_lock;
320
87eaceb3
YS
321#ifdef CONFIG_TRANSPARENT_HUGEPAGE
322 struct deferred_split deferred_split_queue;
323#endif
324
33398cf2
MH
325 struct mem_cgroup_per_node *nodeinfo[0];
326 /* WARNING: nodeinfo must be the last member here */
327};
7d828602 328
a983b5eb
JW
329/*
330 * size of first charge trial. "32" comes from vmscan.c's magic value.
331 * TODO: maybe necessary to use big numbers in big irons.
332 */
333#define MEMCG_CHARGE_BATCH 32U
334
7d828602 335extern struct mem_cgroup *root_mem_cgroup;
56161634 336
dfd2f10c
KT
337static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
338{
339 return (memcg == root_mem_cgroup);
340}
341
23047a96
JW
342static inline bool mem_cgroup_disabled(void)
343{
344 return !cgroup_subsys_enabled(memory_cgrp_subsys);
345}
346
1bc63fb1
CD
347static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
348 bool in_low_reclaim)
9783aa99 349{
1bc63fb1
CD
350 if (mem_cgroup_disabled())
351 return 0;
352
353 if (in_low_reclaim)
354 return READ_ONCE(memcg->memory.emin);
9783aa99 355
1bc63fb1
CD
356 return max(READ_ONCE(memcg->memory.emin),
357 READ_ONCE(memcg->memory.elow));
9783aa99
CD
358}
359
bf8d5d52
RG
360enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
361 struct mem_cgroup *memcg);
241994ed 362
00501b53 363int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
f627c2f5
KS
364 gfp_t gfp_mask, struct mem_cgroup **memcgp,
365 bool compound);
2cf85583
TH
366int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
367 gfp_t gfp_mask, struct mem_cgroup **memcgp,
368 bool compound);
00501b53 369void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
f627c2f5
KS
370 bool lrucare, bool compound);
371void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
372 bool compound);
0a31bc97 373void mem_cgroup_uncharge(struct page *page);
747db954 374void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846d 375
6a93ca8f 376void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
569b846d 377
ef8f2327
MG
378static struct mem_cgroup_per_node *
379mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
55779ec7 380{
ef8f2327 381 return memcg->nodeinfo[nid];
55779ec7
JW
382}
383
384/**
867e5e1d 385 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
55779ec7
JW
386 * @memcg: memcg of the wanted lruvec
387 *
867e5e1d
JW
388 * Returns the lru list vector holding pages for a given @memcg &
389 * @node combination. This can be the node lruvec, if the memory
390 * controller is disabled.
55779ec7 391 */
867e5e1d
JW
392static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
393 struct pglist_data *pgdat)
55779ec7 394{
ef8f2327 395 struct mem_cgroup_per_node *mz;
55779ec7
JW
396 struct lruvec *lruvec;
397
398 if (mem_cgroup_disabled()) {
867e5e1d 399 lruvec = &pgdat->__lruvec;
55779ec7
JW
400 goto out;
401 }
402
1b05117d
JW
403 if (!memcg)
404 memcg = root_mem_cgroup;
405
ef8f2327 406 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
55779ec7
JW
407 lruvec = &mz->lruvec;
408out:
409 /*
410 * Since a node can be onlined after the mem_cgroup was created,
599d0c95 411 * we have to be prepared to initialize lruvec->pgdat here;
55779ec7
JW
412 * and if offlined then reonlined, we need to reinitialize it.
413 */
ef8f2327
MG
414 if (unlikely(lruvec->pgdat != pgdat))
415 lruvec->pgdat = pgdat;
55779ec7
JW
416 return lruvec;
417}
418
599d0c95 419struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
c9b0ed51 420
64219994 421struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
e993d905 422
d46eb14b
SB
423struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
424
f745c6f5
SB
425struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
426
33398cf2
MH
427static inline
428struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
429 return css ? container_of(css, struct mem_cgroup, css) : NULL;
430}
431
dc0b5864
RG
432static inline void mem_cgroup_put(struct mem_cgroup *memcg)
433{
d46eb14b
SB
434 if (memcg)
435 css_put(&memcg->css);
dc0b5864
RG
436}
437
8e8ae645
JW
438#define mem_cgroup_from_counter(counter, member) \
439 container_of(counter, struct mem_cgroup, member)
440
33398cf2
MH
441struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
442 struct mem_cgroup *,
443 struct mem_cgroup_reclaim_cookie *);
444void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
7c5f64f8
VD
445int mem_cgroup_scan_tasks(struct mem_cgroup *,
446 int (*)(struct task_struct *, void *), void *);
33398cf2 447
23047a96
JW
448static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
449{
450 if (mem_cgroup_disabled())
451 return 0;
452
73f576c0 453 return memcg->id.id;
23047a96 454}
73f576c0 455struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
23047a96 456
aa9694bb
CD
457static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
458{
459 return mem_cgroup_from_css(seq_css(m));
460}
461
2262185c
RG
462static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
463{
464 struct mem_cgroup_per_node *mz;
465
466 if (mem_cgroup_disabled())
467 return NULL;
468
469 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
470 return mz->memcg;
471}
472
8e8ae645
JW
473/**
474 * parent_mem_cgroup - find the accounting parent of a memcg
475 * @memcg: memcg whose parent to find
476 *
477 * Returns the parent memcg, or NULL if this is the root or the memory
478 * controller is in legacy no-hierarchy mode.
479 */
480static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
481{
482 if (!memcg->memory.parent)
483 return NULL;
484 return mem_cgroup_from_counter(memcg->memory.parent, memory);
485}
486
33398cf2
MH
487static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
488 struct mem_cgroup *root)
489{
490 if (root == memcg)
491 return true;
492 if (!root->use_hierarchy)
493 return false;
494 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
495}
e1aab161 496
2314b42d
JW
497static inline bool mm_match_cgroup(struct mm_struct *mm,
498 struct mem_cgroup *memcg)
2e4d4091 499{
587af308 500 struct mem_cgroup *task_memcg;
413918bb 501 bool match = false;
c3ac9a8a 502
2e4d4091 503 rcu_read_lock();
587af308 504 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
413918bb 505 if (task_memcg)
2314b42d 506 match = mem_cgroup_is_descendant(task_memcg, memcg);
2e4d4091 507 rcu_read_unlock();
c3ac9a8a 508 return match;
2e4d4091 509}
8a9f3ccd 510
64219994 511struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
2fc04524 512ino_t page_cgroup_ino(struct page *page);
d324236b 513
eb01aaab
VD
514static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
515{
516 if (mem_cgroup_disabled())
517 return true;
518 return !!(memcg->css.flags & CSS_ONLINE);
519}
520
58ae83db
KH
521/*
522 * For memory reclaim.
523 */
889976db 524int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
33398cf2
MH
525
526void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 527 int zid, int nr_pages);
33398cf2 528
b4536f0c
MH
529static inline
530unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
531 enum lru_list lru, int zone_idx)
532{
533 struct mem_cgroup_per_node *mz;
534
535 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
536 return mz->lru_zone_size[zone_idx][lru];
33398cf2
MH
537}
538
b23afb93
TH
539void mem_cgroup_handle_over_high(void);
540
bbec2e15 541unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
7c5f64f8 542
9783aa99
CD
543unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
544
f0c867d9 545void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
64219994 546 struct task_struct *p);
58ae83db 547
f0c867d9 548void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
549
29ef680a 550static inline void mem_cgroup_enter_user_fault(void)
519e5247 551{
29ef680a
MH
552 WARN_ON(current->in_user_fault);
553 current->in_user_fault = 1;
519e5247
JW
554}
555
29ef680a 556static inline void mem_cgroup_exit_user_fault(void)
519e5247 557{
29ef680a
MH
558 WARN_ON(!current->in_user_fault);
559 current->in_user_fault = 0;
519e5247
JW
560}
561
3812c8c8
JW
562static inline bool task_in_memcg_oom(struct task_struct *p)
563{
626ebc41 564 return p->memcg_in_oom;
3812c8c8
JW
565}
566
49426420 567bool mem_cgroup_oom_synchronize(bool wait);
3d8b38eb
RG
568struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
569 struct mem_cgroup *oom_domain);
570void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
3812c8c8 571
c255a458 572#ifdef CONFIG_MEMCG_SWAP
c077719b
KH
573extern int do_swap_account;
574#endif
f8d66542 575
739f79fc
JW
576struct mem_cgroup *lock_page_memcg(struct page *page);
577void __unlock_page_memcg(struct mem_cgroup *memcg);
62cccb8c 578void unlock_page_memcg(struct page *page);
d7365e78 579
42a30035
JW
580/*
581 * idx can be of type enum memcg_stat_item or node_stat_item.
582 * Keep in sync with memcg_exact_page_state().
583 */
584static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
585{
586 long x = atomic_long_read(&memcg->vmstats[idx]);
587#ifdef CONFIG_SMP
588 if (x < 0)
589 x = 0;
590#endif
591 return x;
592}
593
0b3d6e6f
GT
594/*
595 * idx can be of type enum memcg_stat_item or node_stat_item.
596 * Keep in sync with memcg_exact_page_state().
597 */
205b20cc
JW
598static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
599 int idx)
2a2e4885 600{
815744d7
JW
601 long x = 0;
602 int cpu;
603
604 for_each_possible_cpu(cpu)
605 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
a983b5eb
JW
606#ifdef CONFIG_SMP
607 if (x < 0)
608 x = 0;
609#endif
610 return x;
2a2e4885
JW
611}
612
db9adbcb 613void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
2a2e4885 614
04fecbf5 615/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 616static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 617 int idx, int val)
2a2e4885 618{
c3cc3911
JW
619 unsigned long flags;
620
621 local_irq_save(flags);
a983b5eb 622 __mod_memcg_state(memcg, idx, val);
c3cc3911 623 local_irq_restore(flags);
2a2e4885
JW
624}
625
33398cf2 626/**
ccda7f43 627 * mod_memcg_page_state - update page state statistics
62cccb8c 628 * @page: the page
33398cf2
MH
629 * @idx: page state item to account
630 * @val: number of pages (positive or negative)
631 *
fdf1cdb9
JW
632 * The @page must be locked or the caller must use lock_page_memcg()
633 * to prevent double accounting when the page is concurrently being
634 * moved to another memcg:
81f8c3a4 635 *
fdf1cdb9 636 * lock_page(page) or lock_page_memcg(page)
81f8c3a4 637 * if (TestClearPageState(page))
ccda7f43 638 * mod_memcg_page_state(page, state, -1);
fdf1cdb9 639 * unlock_page(page) or unlock_page_memcg(page)
2a2e4885
JW
640 *
641 * Kernel pages are an exception to this, since they'll never move.
33398cf2 642 */
00f3ca2c 643static inline void __mod_memcg_page_state(struct page *page,
04fecbf5 644 int idx, int val)
00f3ca2c
JW
645{
646 if (page->mem_cgroup)
647 __mod_memcg_state(page->mem_cgroup, idx, val);
648}
649
ccda7f43 650static inline void mod_memcg_page_state(struct page *page,
04fecbf5 651 int idx, int val)
33398cf2 652{
62cccb8c 653 if (page->mem_cgroup)
ccda7f43 654 mod_memcg_state(page->mem_cgroup, idx, val);
33398cf2
MH
655}
656
42a30035
JW
657static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
658 enum node_stat_item idx)
659{
660 struct mem_cgroup_per_node *pn;
661 long x;
662
663 if (mem_cgroup_disabled())
664 return node_page_state(lruvec_pgdat(lruvec), idx);
665
666 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
667 x = atomic_long_read(&pn->lruvec_stat[idx]);
668#ifdef CONFIG_SMP
669 if (x < 0)
670 x = 0;
671#endif
672 return x;
673}
674
205b20cc
JW
675static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
676 enum node_stat_item idx)
2a7106f2 677{
00f3ca2c 678 struct mem_cgroup_per_node *pn;
815744d7
JW
679 long x = 0;
680 int cpu;
00f3ca2c
JW
681
682 if (mem_cgroup_disabled())
683 return node_page_state(lruvec_pgdat(lruvec), idx);
684
685 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
815744d7
JW
686 for_each_possible_cpu(cpu)
687 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
a983b5eb
JW
688#ifdef CONFIG_SMP
689 if (x < 0)
690 x = 0;
691#endif
692 return x;
2a7106f2
GT
693}
694
db9adbcb
JW
695void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
696 int val);
ec9f0238 697void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
8380ce47 698void mod_memcg_obj_state(void *p, int idx, int val);
00f3ca2c
JW
699
700static inline void mod_lruvec_state(struct lruvec *lruvec,
701 enum node_stat_item idx, int val)
702{
c3cc3911
JW
703 unsigned long flags;
704
705 local_irq_save(flags);
28454265 706 __mod_lruvec_state(lruvec, idx, val);
c3cc3911 707 local_irq_restore(flags);
00f3ca2c
JW
708}
709
710static inline void __mod_lruvec_page_state(struct page *page,
711 enum node_stat_item idx, int val)
712{
28454265
JW
713 pg_data_t *pgdat = page_pgdat(page);
714 struct lruvec *lruvec;
00f3ca2c 715
28454265
JW
716 /* Untracked pages have no memcg, no lruvec. Update only the node */
717 if (!page->mem_cgroup) {
718 __mod_node_page_state(pgdat, idx, val);
00f3ca2c 719 return;
28454265
JW
720 }
721
867e5e1d 722 lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
28454265 723 __mod_lruvec_state(lruvec, idx, val);
00f3ca2c
JW
724}
725
726static inline void mod_lruvec_page_state(struct page *page,
727 enum node_stat_item idx, int val)
728{
c3cc3911
JW
729 unsigned long flags;
730
731 local_irq_save(flags);
28454265 732 __mod_lruvec_page_state(page, idx, val);
c3cc3911 733 local_irq_restore(flags);
2a7106f2
GT
734}
735
ef8f2327 736unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
737 gfp_t gfp_mask,
738 unsigned long *total_scanned);
a63d83f4 739
db9adbcb
JW
740void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
741 unsigned long count);
c9019e9b 742
2262185c 743static inline void count_memcg_events(struct mem_cgroup *memcg,
e27be240
JW
744 enum vm_event_item idx,
745 unsigned long count)
2262185c 746{
c3cc3911
JW
747 unsigned long flags;
748
749 local_irq_save(flags);
a983b5eb 750 __count_memcg_events(memcg, idx, count);
c3cc3911 751 local_irq_restore(flags);
2262185c
RG
752}
753
754static inline void count_memcg_page_event(struct page *page,
e27be240 755 enum vm_event_item idx)
2262185c
RG
756{
757 if (page->mem_cgroup)
758 count_memcg_events(page->mem_cgroup, idx, 1);
759}
760
761static inline void count_memcg_event_mm(struct mm_struct *mm,
762 enum vm_event_item idx)
68ae564b 763{
33398cf2
MH
764 struct mem_cgroup *memcg;
765
68ae564b
DR
766 if (mem_cgroup_disabled())
767 return;
33398cf2
MH
768
769 rcu_read_lock();
770 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
fe6bdfc8 771 if (likely(memcg))
c9019e9b 772 count_memcg_events(memcg, idx, 1);
33398cf2 773 rcu_read_unlock();
68ae564b 774}
c9019e9b 775
e27be240
JW
776static inline void memcg_memory_event(struct mem_cgroup *memcg,
777 enum memcg_memory_event event)
c9019e9b 778{
1e577f97
SB
779 atomic_long_inc(&memcg->memory_events_local[event]);
780 cgroup_file_notify(&memcg->events_local_file);
781
9852ae3f
CD
782 do {
783 atomic_long_inc(&memcg->memory_events[event]);
784 cgroup_file_notify(&memcg->events_file);
785
786 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
787 break;
788 } while ((memcg = parent_mem_cgroup(memcg)) &&
789 !mem_cgroup_is_root(memcg));
c9019e9b
JW
790}
791
fe6bdfc8
RG
792static inline void memcg_memory_event_mm(struct mm_struct *mm,
793 enum memcg_memory_event event)
794{
795 struct mem_cgroup *memcg;
796
797 if (mem_cgroup_disabled())
798 return;
799
800 rcu_read_lock();
801 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
802 if (likely(memcg))
803 memcg_memory_event(memcg, event);
804 rcu_read_unlock();
805}
806
ca3e0214 807#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 808void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
809#endif
810
c255a458 811#else /* CONFIG_MEMCG */
23047a96
JW
812
813#define MEM_CGROUP_ID_SHIFT 0
814#define MEM_CGROUP_ID_MAX 0
815
7a81b88c
KH
816struct mem_cgroup;
817
dfd2f10c
KT
818static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
819{
820 return true;
821}
822
23047a96
JW
823static inline bool mem_cgroup_disabled(void)
824{
825 return true;
826}
827
e27be240
JW
828static inline void memcg_memory_event(struct mem_cgroup *memcg,
829 enum memcg_memory_event event)
241994ed
JW
830{
831}
832
fe6bdfc8
RG
833static inline void memcg_memory_event_mm(struct mm_struct *mm,
834 enum memcg_memory_event event)
835{
836}
837
1bc63fb1
CD
838static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
839 bool in_low_reclaim)
9783aa99 840{
1bc63fb1 841 return 0;
9783aa99
CD
842}
843
bf8d5d52
RG
844static inline enum mem_cgroup_protection mem_cgroup_protected(
845 struct mem_cgroup *root, struct mem_cgroup *memcg)
241994ed 846{
bf8d5d52 847 return MEMCG_PROT_NONE;
241994ed
JW
848}
849
00501b53
JW
850static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
851 gfp_t gfp_mask,
f627c2f5
KS
852 struct mem_cgroup **memcgp,
853 bool compound)
7a81b88c 854{
00501b53 855 *memcgp = NULL;
7a81b88c
KH
856 return 0;
857}
858
2cf85583
TH
859static inline int mem_cgroup_try_charge_delay(struct page *page,
860 struct mm_struct *mm,
861 gfp_t gfp_mask,
862 struct mem_cgroup **memcgp,
863 bool compound)
864{
865 *memcgp = NULL;
866 return 0;
867}
868
00501b53
JW
869static inline void mem_cgroup_commit_charge(struct page *page,
870 struct mem_cgroup *memcg,
f627c2f5 871 bool lrucare, bool compound)
7a81b88c
KH
872{
873}
874
00501b53 875static inline void mem_cgroup_cancel_charge(struct page *page,
f627c2f5
KS
876 struct mem_cgroup *memcg,
877 bool compound)
7a81b88c
KH
878{
879}
880
0a31bc97 881static inline void mem_cgroup_uncharge(struct page *page)
569b846d
KH
882{
883}
884
747db954 885static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
886{
887}
888
6a93ca8f 889static inline void mem_cgroup_migrate(struct page *old, struct page *new)
69029cd5
KH
890{
891}
892
867e5e1d
JW
893static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
894 struct pglist_data *pgdat)
08e552c6 895{
867e5e1d 896 return &pgdat->__lruvec;
08e552c6
KH
897}
898
fa9add64 899static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
599d0c95 900 struct pglist_data *pgdat)
66e1707b 901{
867e5e1d 902 return &pgdat->__lruvec;
66e1707b
BS
903}
904
b910718a
JW
905static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
906{
907 return NULL;
908}
909
587af308 910static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 911 struct mem_cgroup *memcg)
bed7161a 912{
587af308 913 return true;
bed7161a
BS
914}
915
d46eb14b
SB
916static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
917{
918 return NULL;
919}
920
f745c6f5
SB
921static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
922{
923 return NULL;
924}
925
dc0b5864
RG
926static inline void mem_cgroup_put(struct mem_cgroup *memcg)
927{
928}
929
5660048c
JW
930static inline struct mem_cgroup *
931mem_cgroup_iter(struct mem_cgroup *root,
932 struct mem_cgroup *prev,
933 struct mem_cgroup_reclaim_cookie *reclaim)
934{
935 return NULL;
936}
937
938static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
939 struct mem_cgroup *prev)
940{
941}
942
7c5f64f8
VD
943static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
944 int (*fn)(struct task_struct *, void *), void *arg)
945{
946 return 0;
947}
948
23047a96 949static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
f8d66542 950{
23047a96
JW
951 return 0;
952}
953
954static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
955{
956 WARN_ON_ONCE(id);
957 /* XXX: This should always return root_mem_cgroup */
958 return NULL;
f8d66542 959}
a636b327 960
aa9694bb
CD
961static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
962{
963 return NULL;
964}
965
2262185c
RG
966static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
967{
968 return NULL;
969}
970
eb01aaab 971static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
14797e23 972{
13308ca9 973 return true;
14797e23
KM
974}
975
b4536f0c
MH
976static inline
977unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
978 enum lru_list lru, int zone_idx)
979{
980 return 0;
981}
a3d8e054 982
bbec2e15 983static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
7c5f64f8
VD
984{
985 return 0;
986}
987
9783aa99
CD
988static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
989{
990 return 0;
991}
992
e222432b 993static inline void
f0c867d9 994mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
995{
996}
997
998static inline void
999mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
e222432b
BS
1000{
1001}
1002
739f79fc
JW
1003static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1004{
1005 return NULL;
1006}
1007
1008static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
89c06bd5
KH
1009{
1010}
1011
62cccb8c 1012static inline void unlock_page_memcg(struct page *page)
89c06bd5
KH
1013{
1014}
1015
b23afb93
TH
1016static inline void mem_cgroup_handle_over_high(void)
1017{
1018}
1019
29ef680a 1020static inline void mem_cgroup_enter_user_fault(void)
519e5247
JW
1021{
1022}
1023
29ef680a 1024static inline void mem_cgroup_exit_user_fault(void)
519e5247
JW
1025{
1026}
1027
3812c8c8
JW
1028static inline bool task_in_memcg_oom(struct task_struct *p)
1029{
1030 return false;
1031}
1032
49426420 1033static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
1034{
1035 return false;
1036}
1037
3d8b38eb
RG
1038static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1039 struct task_struct *victim, struct mem_cgroup *oom_domain)
1040{
1041 return NULL;
1042}
1043
1044static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1045{
1046}
1047
42a30035
JW
1048static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1049{
1050 return 0;
1051}
1052
205b20cc
JW
1053static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1054 int idx)
2a2e4885
JW
1055{
1056 return 0;
1057}
1058
00f3ca2c 1059static inline void __mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1060 int idx,
00f3ca2c 1061 int nr)
2a2e4885
JW
1062{
1063}
1064
00f3ca2c 1065static inline void mod_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1066 int idx,
00f3ca2c 1067 int nr)
2a2e4885
JW
1068{
1069}
1070
00f3ca2c 1071static inline void __mod_memcg_page_state(struct page *page,
04fecbf5 1072 int idx,
00f3ca2c 1073 int nr)
2a2e4885
JW
1074{
1075}
1076
ccda7f43 1077static inline void mod_memcg_page_state(struct page *page,
04fecbf5 1078 int idx,
ccda7f43 1079 int nr)
553af430
JW
1080{
1081}
1082
42a30035
JW
1083static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1084 enum node_stat_item idx)
1085{
1086 return node_page_state(lruvec_pgdat(lruvec), idx);
1087}
1088
205b20cc
JW
1089static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1090 enum node_stat_item idx)
2a7106f2 1091{
00f3ca2c 1092 return node_page_state(lruvec_pgdat(lruvec), idx);
2a7106f2
GT
1093}
1094
00f3ca2c
JW
1095static inline void __mod_lruvec_state(struct lruvec *lruvec,
1096 enum node_stat_item idx, int val)
d69b042f 1097{
00f3ca2c
JW
1098 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1099}
1100
1101static inline void mod_lruvec_state(struct lruvec *lruvec,
1102 enum node_stat_item idx, int val)
1103{
1104 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1105}
1106
1107static inline void __mod_lruvec_page_state(struct page *page,
1108 enum node_stat_item idx, int val)
1109{
1110 __mod_node_page_state(page_pgdat(page), idx, val);
1111}
1112
1113static inline void mod_lruvec_page_state(struct page *page,
1114 enum node_stat_item idx, int val)
1115{
1116 mod_node_page_state(page_pgdat(page), idx, val);
d69b042f
BS
1117}
1118
ec9f0238
RG
1119static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1120 int val)
1121{
1122 struct page *page = virt_to_head_page(p);
1123
1124 __mod_node_page_state(page_pgdat(page), idx, val);
1125}
1126
8380ce47
RG
1127static inline void mod_memcg_obj_state(void *p, int idx, int val)
1128{
1129}
1130
4e416953 1131static inline
ef8f2327 1132unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
1133 gfp_t gfp_mask,
1134 unsigned long *total_scanned)
4e416953 1135{
0608f43d 1136 return 0;
4e416953
BS
1137}
1138
e94c8a9c 1139static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
1140{
1141}
1142
2262185c
RG
1143static inline void count_memcg_events(struct mem_cgroup *memcg,
1144 enum vm_event_item idx,
1145 unsigned long count)
1146{
1147}
1148
9851ac13
KT
1149static inline void __count_memcg_events(struct mem_cgroup *memcg,
1150 enum vm_event_item idx,
1151 unsigned long count)
1152{
1153}
1154
2262185c 1155static inline void count_memcg_page_event(struct page *page,
04fecbf5 1156 int idx)
2262185c
RG
1157{
1158}
1159
456f998e 1160static inline
2262185c 1161void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
456f998e
YH
1162{
1163}
c255a458 1164#endif /* CONFIG_MEMCG */
78fb7466 1165
04fecbf5 1166/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1167static inline void __inc_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1168 int idx)
00f3ca2c
JW
1169{
1170 __mod_memcg_state(memcg, idx, 1);
1171}
1172
04fecbf5 1173/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1174static inline void __dec_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1175 int idx)
00f3ca2c
JW
1176{
1177 __mod_memcg_state(memcg, idx, -1);
1178}
1179
04fecbf5 1180/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1181static inline void __inc_memcg_page_state(struct page *page,
04fecbf5 1182 int idx)
00f3ca2c
JW
1183{
1184 __mod_memcg_page_state(page, idx, 1);
1185}
1186
04fecbf5 1187/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1188static inline void __dec_memcg_page_state(struct page *page,
04fecbf5 1189 int idx)
00f3ca2c
JW
1190{
1191 __mod_memcg_page_state(page, idx, -1);
1192}
1193
1194static inline void __inc_lruvec_state(struct lruvec *lruvec,
1195 enum node_stat_item idx)
1196{
1197 __mod_lruvec_state(lruvec, idx, 1);
1198}
1199
1200static inline void __dec_lruvec_state(struct lruvec *lruvec,
1201 enum node_stat_item idx)
1202{
1203 __mod_lruvec_state(lruvec, idx, -1);
1204}
1205
1206static inline void __inc_lruvec_page_state(struct page *page,
1207 enum node_stat_item idx)
1208{
1209 __mod_lruvec_page_state(page, idx, 1);
1210}
1211
1212static inline void __dec_lruvec_page_state(struct page *page,
1213 enum node_stat_item idx)
1214{
1215 __mod_lruvec_page_state(page, idx, -1);
1216}
1217
ec9f0238
RG
1218static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1219{
1220 __mod_lruvec_slab_state(p, idx, 1);
1221}
1222
1223static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1224{
1225 __mod_lruvec_slab_state(p, idx, -1);
1226}
1227
04fecbf5 1228/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1229static inline void inc_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1230 int idx)
00f3ca2c
JW
1231{
1232 mod_memcg_state(memcg, idx, 1);
1233}
1234
04fecbf5 1235/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1236static inline void dec_memcg_state(struct mem_cgroup *memcg,
04fecbf5 1237 int idx)
00f3ca2c
JW
1238{
1239 mod_memcg_state(memcg, idx, -1);
1240}
1241
04fecbf5 1242/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1243static inline void inc_memcg_page_state(struct page *page,
04fecbf5 1244 int idx)
00f3ca2c
JW
1245{
1246 mod_memcg_page_state(page, idx, 1);
1247}
1248
04fecbf5 1249/* idx can be of type enum memcg_stat_item or node_stat_item */
00f3ca2c 1250static inline void dec_memcg_page_state(struct page *page,
04fecbf5 1251 int idx)
00f3ca2c
JW
1252{
1253 mod_memcg_page_state(page, idx, -1);
1254}
1255
1256static inline void inc_lruvec_state(struct lruvec *lruvec,
1257 enum node_stat_item idx)
1258{
1259 mod_lruvec_state(lruvec, idx, 1);
1260}
1261
1262static inline void dec_lruvec_state(struct lruvec *lruvec,
1263 enum node_stat_item idx)
1264{
1265 mod_lruvec_state(lruvec, idx, -1);
1266}
1267
1268static inline void inc_lruvec_page_state(struct page *page,
1269 enum node_stat_item idx)
1270{
1271 mod_lruvec_page_state(page, idx, 1);
1272}
1273
1274static inline void dec_lruvec_page_state(struct page *page,
1275 enum node_stat_item idx)
1276{
1277 mod_lruvec_page_state(page, idx, -1);
1278}
1279
52ebea74 1280#ifdef CONFIG_CGROUP_WRITEBACK
841710aa 1281
841710aa 1282struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
c5edf9cd
TH
1283void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1284 unsigned long *pheadroom, unsigned long *pdirty,
1285 unsigned long *pwriteback);
841710aa 1286
97b27821
TH
1287void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1288 struct bdi_writeback *wb);
1289
1290static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1291 struct bdi_writeback *wb)
1292{
08d1d0e6
BH
1293 if (mem_cgroup_disabled())
1294 return;
1295
97b27821
TH
1296 if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1297 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1298}
1299
1300void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1301
841710aa
TH
1302#else /* CONFIG_CGROUP_WRITEBACK */
1303
1304static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1305{
1306 return NULL;
1307}
1308
c2aa723a 1309static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
c5edf9cd
TH
1310 unsigned long *pfilepages,
1311 unsigned long *pheadroom,
c2aa723a
TH
1312 unsigned long *pdirty,
1313 unsigned long *pwriteback)
1314{
1315}
1316
97b27821
TH
1317static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1318 struct bdi_writeback *wb)
1319{
1320}
1321
1322static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1323{
1324}
1325
841710aa 1326#endif /* CONFIG_CGROUP_WRITEBACK */
52ebea74 1327
e1aab161 1328struct sock;
baac50bb
JW
1329bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1330void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
d886f4e4 1331#ifdef CONFIG_MEMCG
ef12947c
JW
1332extern struct static_key_false memcg_sockets_enabled_key;
1333#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
2d758073
JW
1334void mem_cgroup_sk_alloc(struct sock *sk);
1335void mem_cgroup_sk_free(struct sock *sk);
baac50bb 1336static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c 1337{
0db15298 1338 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
8e8ae645 1339 return true;
8e8ae645
JW
1340 do {
1341 if (time_before(jiffies, memcg->socket_pressure))
1342 return true;
1343 } while ((memcg = parent_mem_cgroup(memcg)));
1344 return false;
e805605c 1345}
0a432dcb
YS
1346
1347extern int memcg_expand_shrinker_maps(int new_id);
1348
1349extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1350 int nid, int shrinker_id);
e805605c 1351#else
80e95fe0 1352#define mem_cgroup_sockets_enabled 0
2d758073
JW
1353static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1354static inline void mem_cgroup_sk_free(struct sock *sk) { };
baac50bb 1355static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
e805605c
JW
1356{
1357 return false;
1358}
0a432dcb
YS
1359
1360static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1361 int nid, int shrinker_id)
1362{
1363}
e805605c 1364#endif
7ae1e1d0 1365
45264778
VD
1366struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1367void memcg_kmem_put_cache(struct kmem_cache *cachep);
9b6f7e16
RG
1368
1369#ifdef CONFIG_MEMCG_KMEM
60cd4bcd
SB
1370int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1371void __memcg_kmem_uncharge(struct page *page, int order);
10eaec2f 1372int __memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp, int order);
49a18eae
RG
1373void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
1374 unsigned int nr_pages);
45264778 1375
ef12947c 1376extern struct static_key_false memcg_kmem_enabled_key;
17cc4dfe 1377extern struct workqueue_struct *memcg_kmem_cache_wq;
749c5415 1378
dbcf73e2 1379extern int memcg_nr_cache_ids;
64219994
MH
1380void memcg_get_cache_ids(void);
1381void memcg_put_cache_ids(void);
ebe945c2
GC
1382
1383/*
1384 * Helper macro to loop through all memcg-specific caches. Callers must still
1385 * check if the cache is valid (it is either valid or NULL).
1386 * the slab_mutex must be held when looping through those caches
1387 */
749c5415 1388#define for_each_memcg_cache_index(_idx) \
dbcf73e2 1389 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
749c5415 1390
7ae1e1d0
GC
1391static inline bool memcg_kmem_enabled(void)
1392{
ef12947c 1393 return static_branch_unlikely(&memcg_kmem_enabled_key);
7ae1e1d0
GC
1394}
1395
60cd4bcd
SB
1396static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1397{
1398 if (memcg_kmem_enabled())
1399 return __memcg_kmem_charge(page, gfp, order);
1400 return 0;
1401}
1402
1403static inline void memcg_kmem_uncharge(struct page *page, int order)
1404{
1405 if (memcg_kmem_enabled())
1406 __memcg_kmem_uncharge(page, order);
1407}
1408
10eaec2f
RG
1409static inline int memcg_kmem_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp,
1410 int order)
60cd4bcd
SB
1411{
1412 if (memcg_kmem_enabled())
10eaec2f 1413 return __memcg_kmem_charge_memcg(memcg, gfp, order);
60cd4bcd
SB
1414 return 0;
1415}
49a18eae 1416
50591183
RG
1417static inline void memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
1418 int order)
49a18eae
RG
1419{
1420 if (memcg_kmem_enabled())
1421 __memcg_kmem_uncharge_memcg(memcg, 1 << order);
1422}
1423
33398cf2 1424/*
9f706d68 1425 * helper for accessing a memcg's index. It will be used as an index in the
33398cf2
MH
1426 * child cache array in kmem_cache, and also to derive its name. This function
1427 * will return -1 when this is not a kmem-limited memcg.
1428 */
1429static inline int memcg_cache_id(struct mem_cgroup *memcg)
1430{
1431 return memcg ? memcg->kmemcg_id : -1;
1432}
5722d094 1433
8380ce47
RG
1434struct mem_cgroup *mem_cgroup_from_obj(void *p);
1435
7ae1e1d0 1436#else
9b6f7e16
RG
1437
1438static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1439{
1440 return 0;
1441}
1442
1443static inline void memcg_kmem_uncharge(struct page *page, int order)
1444{
1445}
1446
60cd4bcd
SB
1447static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1448{
1449 return 0;
1450}
1451
1452static inline void __memcg_kmem_uncharge(struct page *page, int order)
1453{
1454}
1455
749c5415
GC
1456#define for_each_memcg_cache_index(_idx) \
1457 for (; NULL; )
1458
b9ce5ef4
GC
1459static inline bool memcg_kmem_enabled(void)
1460{
1461 return false;
1462}
1463
2633d7a0
GC
1464static inline int memcg_cache_id(struct mem_cgroup *memcg)
1465{
1466 return -1;
1467}
1468
05257a1a
VD
1469static inline void memcg_get_cache_ids(void)
1470{
1471}
1472
1473static inline void memcg_put_cache_ids(void)
1474{
1475}
1476
8380ce47
RG
1477static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1478{
1479 return NULL;
1480}
1481
84c07d11 1482#endif /* CONFIG_MEMCG_KMEM */
127424c8 1483
8cdea7c0 1484#endif /* _LINUX_MEMCONTROL_H */