]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/memcontrol.c
Merge tag 'nfs-for-6.10-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[thirdparty/kernel/stable.git] / mm / memcontrol.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
8cdea7c0
BS
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
2e72b634
KS
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
7ae1e1d0
GC
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
1575e68b
JW
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
6168d0da
AS
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
8cdea7c0
BS
26 */
27
3e32cb2e 28#include <linux/page_counter.h>
8cdea7c0
BS
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
a520110e 31#include <linux/pagewalk.h>
6e84f315 32#include <linux/sched/mm.h>
3a4f8a0b 33#include <linux/shmem_fs.h>
4ffef5fe 34#include <linux/hugetlb.h>
d13d1443 35#include <linux/pagemap.h>
4882c809 36#include <linux/pagevec.h>
1ff9e6e1 37#include <linux/vm_event_item.h>
d52aa412 38#include <linux/smp.h>
8a9f3ccd 39#include <linux/page-flags.h>
66e1707b 40#include <linux/backing-dev.h>
8a9f3ccd
BS
41#include <linux/bit_spinlock.h>
42#include <linux/rcupdate.h>
e222432b 43#include <linux/limits.h>
b9e15baf 44#include <linux/export.h>
8c7c6e34 45#include <linux/mutex.h>
bb4cc1a8 46#include <linux/rbtree.h>
b6ac57d5 47#include <linux/slab.h>
66e1707b 48#include <linux/swap.h>
02491447 49#include <linux/swapops.h>
66e1707b 50#include <linux/spinlock.h>
2e72b634 51#include <linux/eventfd.h>
79bd9814 52#include <linux/poll.h>
2e72b634 53#include <linux/sort.h>
66e1707b 54#include <linux/fs.h>
d2ceb9b7 55#include <linux/seq_file.h>
70ddf637 56#include <linux/vmpressure.h>
dc90f084 57#include <linux/memremap.h>
b69408e8 58#include <linux/mm_inline.h>
5d1ea48b 59#include <linux/swap_cgroup.h>
cdec2e42 60#include <linux/cpu.h>
158e0a2d 61#include <linux/oom.h>
0056f4e6 62#include <linux/lockdep.h>
79bd9814 63#include <linux/file.h>
03248add 64#include <linux/resume_user_mode.h>
0e4b01df 65#include <linux/psi.h>
c8713d0b 66#include <linux/seq_buf.h>
6a792697 67#include <linux/sched/isolation.h>
6011be59 68#include <linux/kmemleak.h>
08e552c6 69#include "internal.h"
d1a4c0b3 70#include <net/sock.h>
4bd2c1ee 71#include <net/ip.h>
f35c3a8e 72#include "slab.h"
014bb1de 73#include "swap.h"
8cdea7c0 74
7c0f6ba6 75#include <linux/uaccess.h>
8697d331 76
cc8e970c
KM
77#include <trace/events/vmscan.h>
78
073219e9
TH
79struct cgroup_subsys memory_cgrp_subsys __read_mostly;
80EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 81
7d828602
JW
82struct mem_cgroup *root_mem_cgroup __read_mostly;
83
37d5985c
RG
84/* Active memory cgroup to use from an interrupt context */
85DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
c74d40e8 86EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
37d5985c 87
f7e1cb6e 88/* Socket memory accounting disabled? */
0f0cace3 89static bool cgroup_memory_nosocket __ro_after_init;
f7e1cb6e 90
04823c83 91/* Kernel memory accounting disabled? */
17c17367 92static bool cgroup_memory_nokmem __ro_after_init;
04823c83 93
b6c1a8af
YS
94/* BPF memory accounting disabled? */
95static bool cgroup_memory_nobpf __ro_after_init;
96
97b27821
TH
97#ifdef CONFIG_CGROUP_WRITEBACK
98static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
99#endif
100
7941d214
JW
101/* Whether legacy memory+swap accounting is active */
102static bool do_memsw_account(void)
103{
b25806dc 104 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
7941d214
JW
105}
106
a0db00fc
KS
107#define THRESHOLDS_EVENTS_TARGET 128
108#define SOFTLIMIT_EVENTS_TARGET 1024
e9f8974f 109
bb4cc1a8
AM
110/*
111 * Cgroups above their limits are maintained in a RB-Tree, independent of
112 * their hierarchy representation
113 */
114
ef8f2327 115struct mem_cgroup_tree_per_node {
bb4cc1a8 116 struct rb_root rb_root;
fa90b2fd 117 struct rb_node *rb_rightmost;
bb4cc1a8
AM
118 spinlock_t lock;
119};
120
bb4cc1a8
AM
121struct mem_cgroup_tree {
122 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
123};
124
125static struct mem_cgroup_tree soft_limit_tree __read_mostly;
126
9490ff27
KH
127/* for OOM */
128struct mem_cgroup_eventfd_list {
129 struct list_head list;
130 struct eventfd_ctx *eventfd;
131};
2e72b634 132
79bd9814
TH
133/*
134 * cgroup_event represents events which userspace want to receive.
135 */
3bc942f3 136struct mem_cgroup_event {
79bd9814 137 /*
59b6f873 138 * memcg which the event belongs to.
79bd9814 139 */
59b6f873 140 struct mem_cgroup *memcg;
79bd9814
TH
141 /*
142 * eventfd to signal userspace about the event.
143 */
144 struct eventfd_ctx *eventfd;
145 /*
146 * Each of these stored in a list by the cgroup.
147 */
148 struct list_head list;
fba94807
TH
149 /*
150 * register_event() callback will be used to add new userspace
151 * waiter for changes related to this event. Use eventfd_signal()
152 * on eventfd to send notification to userspace.
153 */
59b6f873 154 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 155 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
156 /*
157 * unregister_event() callback will be called when userspace closes
158 * the eventfd or on cgroup removing. This callback must be set,
159 * if you want provide notification functionality.
160 */
59b6f873 161 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 162 struct eventfd_ctx *eventfd);
79bd9814
TH
163 /*
164 * All fields below needed to unregister event when
165 * userspace closes eventfd.
166 */
167 poll_table pt;
168 wait_queue_head_t *wqh;
ac6424b9 169 wait_queue_entry_t wait;
79bd9814
TH
170 struct work_struct remove;
171};
172
c0ff4b85
R
173static void mem_cgroup_threshold(struct mem_cgroup *memcg);
174static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 175
7dc74be0
DN
176/* Stuffs for move charges at task migration. */
177/*
1dfab5ab 178 * Types of charges to be moved.
7dc74be0 179 */
1dfab5ab
JW
180#define MOVE_ANON 0x1U
181#define MOVE_FILE 0x2U
182#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
7dc74be0 183
4ffef5fe
DN
184/* "mc" and its members are protected by cgroup_mutex */
185static struct move_charge_struct {
b1dd693e 186 spinlock_t lock; /* for from, to */
264a0ae1 187 struct mm_struct *mm;
4ffef5fe
DN
188 struct mem_cgroup *from;
189 struct mem_cgroup *to;
1dfab5ab 190 unsigned long flags;
4ffef5fe 191 unsigned long precharge;
854ffa8d 192 unsigned long moved_charge;
483c30b5 193 unsigned long moved_swap;
8033b97c
DN
194 struct task_struct *moving_task; /* a task moving charges */
195 wait_queue_head_t waitq; /* a waitq for other context */
196} mc = {
2bd9bb20 197 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
198 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
199};
4ffef5fe 200
4e416953 201/*
f4d005af 202 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
4e416953
BS
203 * limit reclaim to prevent infinite loops, if they ever occur.
204 */
a0db00fc 205#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 206#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 207
8c7c6e34 208/* for encoding cft->private value on file */
86ae53e1
GC
209enum res_type {
210 _MEM,
211 _MEMSWAP,
510fc4e1 212 _KMEM,
d55f90bf 213 _TCP,
86ae53e1
GC
214};
215
a0db00fc
KS
216#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
217#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34
KH
218#define MEMFILE_ATTR(val) ((val) & 0xffff)
219
b05706f1
KT
220/*
221 * Iteration constructs for visiting all cgroups (under a tree). If
222 * loops are exited prematurely (break), mem_cgroup_iter_break() must
223 * be used for reference counting.
224 */
225#define for_each_mem_cgroup_tree(iter, root) \
226 for (iter = mem_cgroup_iter(root, NULL, NULL); \
227 iter != NULL; \
228 iter = mem_cgroup_iter(root, iter, NULL))
229
230#define for_each_mem_cgroup(iter) \
231 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
232 iter != NULL; \
233 iter = mem_cgroup_iter(NULL, iter, NULL))
234
a4ebf1b6 235static inline bool task_is_dying(void)
7775face
TH
236{
237 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
238 (current->flags & PF_EXITING);
239}
240
70ddf637
AV
241/* Some nice accessors for the vmpressure. */
242struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
243{
244 if (!memcg)
245 memcg = root_mem_cgroup;
246 return &memcg->vmpressure;
247}
248
9647875b 249struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
70ddf637 250{
9647875b 251 return container_of(vmpr, struct mem_cgroup, vmpressure);
70ddf637
AV
252}
253
1aacbd35
RG
254#define CURRENT_OBJCG_UPDATE_BIT 0
255#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
256
84c07d11 257#ifdef CONFIG_MEMCG_KMEM
0764db9b 258static DEFINE_SPINLOCK(objcg_lock);
bf4f0599 259
4d5c8aed
RG
260bool mem_cgroup_kmem_disabled(void)
261{
262 return cgroup_memory_nokmem;
263}
264
f1286fae
MS
265static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
266 unsigned int nr_pages);
c1a660de 267
bf4f0599
RG
268static void obj_cgroup_release(struct percpu_ref *ref)
269{
270 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
bf4f0599
RG
271 unsigned int nr_bytes;
272 unsigned int nr_pages;
273 unsigned long flags;
274
275 /*
276 * At this point all allocated objects are freed, and
277 * objcg->nr_charged_bytes can't have an arbitrary byte value.
278 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
279 *
280 * The following sequence can lead to it:
281 * 1) CPU0: objcg == stock->cached_objcg
282 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
283 * PAGE_SIZE bytes are charged
284 * 3) CPU1: a process from another memcg is allocating something,
285 * the stock if flushed,
286 * objcg->nr_charged_bytes = PAGE_SIZE - 92
287 * 5) CPU0: we do release this object,
288 * 92 bytes are added to stock->nr_bytes
289 * 6) CPU0: stock is flushed,
290 * 92 bytes are added to objcg->nr_charged_bytes
291 *
292 * In the result, nr_charged_bytes == PAGE_SIZE.
293 * This page will be uncharged in obj_cgroup_release().
294 */
295 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
296 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
297 nr_pages = nr_bytes >> PAGE_SHIFT;
298
bf4f0599 299 if (nr_pages)
f1286fae 300 obj_cgroup_uncharge_pages(objcg, nr_pages);
271dd6b1 301
0764db9b 302 spin_lock_irqsave(&objcg_lock, flags);
bf4f0599 303 list_del(&objcg->list);
0764db9b 304 spin_unlock_irqrestore(&objcg_lock, flags);
bf4f0599
RG
305
306 percpu_ref_exit(ref);
307 kfree_rcu(objcg, rcu);
308}
309
310static struct obj_cgroup *obj_cgroup_alloc(void)
311{
312 struct obj_cgroup *objcg;
313 int ret;
314
315 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316 if (!objcg)
317 return NULL;
318
319 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
320 GFP_KERNEL);
321 if (ret) {
322 kfree(objcg);
323 return NULL;
324 }
325 INIT_LIST_HEAD(&objcg->list);
326 return objcg;
327}
328
329static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
330 struct mem_cgroup *parent)
331{
332 struct obj_cgroup *objcg, *iter;
333
334 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
335
0764db9b 336 spin_lock_irq(&objcg_lock);
bf4f0599 337
9838354e
MS
338 /* 1) Ready to reparent active objcg. */
339 list_add(&objcg->list, &memcg->objcg_list);
340 /* 2) Reparent active objcg and already reparented objcgs to parent. */
341 list_for_each_entry(iter, &memcg->objcg_list, list)
342 WRITE_ONCE(iter->memcg, parent);
343 /* 3) Move already reparented objcgs to the parent's list */
bf4f0599
RG
344 list_splice(&memcg->objcg_list, &parent->objcg_list);
345
0764db9b 346 spin_unlock_irq(&objcg_lock);
bf4f0599
RG
347
348 percpu_ref_kill(&objcg->refcnt);
349}
350
d7f25f8a
GC
351/*
352 * A lot of the calls to the cache allocation functions are expected to be
9f9796b4 353 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
d7f25f8a
GC
354 * conditional to this static branch, we'll have to allow modules that does
355 * kmem_cache_alloc and the such to see this symbol as well
356 */
f7a449f7
RG
357DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
358EXPORT_SYMBOL(memcg_kmem_online_key);
b6c1a8af
YS
359
360DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
361EXPORT_SYMBOL(memcg_bpf_enabled_key);
0a432dcb 362#endif
17cc4dfe 363
ad7fa852 364/**
75376c6f
MWO
365 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
366 * @folio: folio of interest
ad7fa852
TH
367 *
368 * If memcg is bound to the default hierarchy, css of the memcg associated
75376c6f 369 * with @folio is returned. The returned css remains associated with @folio
ad7fa852
TH
370 * until it is released.
371 *
372 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
373 * is returned.
ad7fa852 374 */
75376c6f 375struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
ad7fa852 376{
75376c6f 377 struct mem_cgroup *memcg = folio_memcg(folio);
ad7fa852 378
9e10a130 379 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
ad7fa852
TH
380 memcg = root_mem_cgroup;
381
ad7fa852
TH
382 return &memcg->css;
383}
384
2fc04524
VD
385/**
386 * page_cgroup_ino - return inode number of the memcg a page is charged to
387 * @page: the page
388 *
389 * Look up the closest online ancestor of the memory cgroup @page is charged to
390 * and return its inode number or 0 if @page is not charged to any cgroup. It
391 * is safe to call this function without holding a reference to @page.
392 *
393 * Note, this function is inherently racy, because there is nothing to prevent
394 * the cgroup inode from getting torn down and potentially reallocated a moment
395 * after page_cgroup_ino() returns, so it only should be used by callers that
396 * do not care (such as procfs interfaces).
397 */
398ino_t page_cgroup_ino(struct page *page)
399{
400 struct mem_cgroup *memcg;
401 unsigned long ino = 0;
402
403 rcu_read_lock();
ec342603
YA
404 /* page_folio() is racy here, but the entire function is racy anyway */
405 memcg = folio_memcg_check(page_folio(page));
286e04b8 406
2fc04524
VD
407 while (memcg && !(memcg->css.flags & CSS_ONLINE))
408 memcg = parent_mem_cgroup(memcg);
409 if (memcg)
410 ino = cgroup_ino(memcg->css.cgroup);
411 rcu_read_unlock();
412 return ino;
413}
414
ef8f2327
MG
415static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
416 struct mem_cgroup_tree_per_node *mctz,
3e32cb2e 417 unsigned long new_usage_in_excess)
bb4cc1a8
AM
418{
419 struct rb_node **p = &mctz->rb_root.rb_node;
420 struct rb_node *parent = NULL;
ef8f2327 421 struct mem_cgroup_per_node *mz_node;
fa90b2fd 422 bool rightmost = true;
bb4cc1a8
AM
423
424 if (mz->on_tree)
425 return;
426
427 mz->usage_in_excess = new_usage_in_excess;
428 if (!mz->usage_in_excess)
429 return;
430 while (*p) {
431 parent = *p;
ef8f2327 432 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
bb4cc1a8 433 tree_node);
fa90b2fd 434 if (mz->usage_in_excess < mz_node->usage_in_excess) {
bb4cc1a8 435 p = &(*p)->rb_left;
fa90b2fd 436 rightmost = false;
378876b0 437 } else {
bb4cc1a8 438 p = &(*p)->rb_right;
378876b0 439 }
bb4cc1a8 440 }
fa90b2fd
DB
441
442 if (rightmost)
443 mctz->rb_rightmost = &mz->tree_node;
444
bb4cc1a8
AM
445 rb_link_node(&mz->tree_node, parent, p);
446 rb_insert_color(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = true;
448}
449
ef8f2327
MG
450static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
451 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
452{
453 if (!mz->on_tree)
454 return;
fa90b2fd
DB
455
456 if (&mz->tree_node == mctz->rb_rightmost)
457 mctz->rb_rightmost = rb_prev(&mz->tree_node);
458
bb4cc1a8
AM
459 rb_erase(&mz->tree_node, &mctz->rb_root);
460 mz->on_tree = false;
461}
462
ef8f2327
MG
463static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
464 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 465{
0a31bc97
JW
466 unsigned long flags;
467
468 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 469 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 470 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
471}
472
3e32cb2e
JW
473static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
474{
475 unsigned long nr_pages = page_counter_read(&memcg->memory);
4db0c3c2 476 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
3e32cb2e
JW
477 unsigned long excess = 0;
478
479 if (nr_pages > soft_limit)
480 excess = nr_pages - soft_limit;
481
482 return excess;
483}
bb4cc1a8 484
658b69c9 485static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
bb4cc1a8 486{
3e32cb2e 487 unsigned long excess;
ef8f2327
MG
488 struct mem_cgroup_per_node *mz;
489 struct mem_cgroup_tree_per_node *mctz;
bb4cc1a8 490
e4dde56c 491 if (lru_gen_enabled()) {
36c7b4db 492 if (soft_limit_excess(memcg))
5c7e7a0d 493 lru_gen_soft_reclaim(memcg, nid);
e4dde56c
YZ
494 return;
495 }
496
2ab082ba 497 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
498 if (!mctz)
499 return;
bb4cc1a8
AM
500 /*
501 * Necessary to update all ancestors when hierarchy is used.
502 * because their event counter is not touched.
503 */
504 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
658b69c9 505 mz = memcg->nodeinfo[nid];
3e32cb2e 506 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
507 /*
508 * We have to update the tree if mz is on RB-tree or
509 * mem is over its softlimit.
510 */
511 if (excess || mz->on_tree) {
0a31bc97
JW
512 unsigned long flags;
513
514 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
515 /* if on-tree, remove it */
516 if (mz->on_tree)
cf2c8127 517 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
518 /*
519 * Insert again. mz->usage_in_excess will be updated.
520 * If excess is 0, no tree ops.
521 */
cf2c8127 522 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 523 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
524 }
525 }
526}
527
528static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
529{
ef8f2327
MG
530 struct mem_cgroup_tree_per_node *mctz;
531 struct mem_cgroup_per_node *mz;
532 int nid;
bb4cc1a8 533
e231875b 534 for_each_node(nid) {
a3747b53 535 mz = memcg->nodeinfo[nid];
2ab082ba 536 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
537 if (mctz)
538 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
539 }
540}
541
ef8f2327
MG
542static struct mem_cgroup_per_node *
543__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 544{
ef8f2327 545 struct mem_cgroup_per_node *mz;
bb4cc1a8
AM
546
547retry:
548 mz = NULL;
fa90b2fd 549 if (!mctz->rb_rightmost)
bb4cc1a8
AM
550 goto done; /* Nothing to reclaim from */
551
fa90b2fd
DB
552 mz = rb_entry(mctz->rb_rightmost,
553 struct mem_cgroup_per_node, tree_node);
bb4cc1a8
AM
554 /*
555 * Remove the node now but someone else can add it back,
556 * we will to add it back at the end of reclaim to its correct
557 * position in the tree.
558 */
cf2c8127 559 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 560 if (!soft_limit_excess(mz->memcg) ||
8965aa28 561 !css_tryget(&mz->memcg->css))
bb4cc1a8
AM
562 goto retry;
563done:
564 return mz;
565}
566
ef8f2327
MG
567static struct mem_cgroup_per_node *
568mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 569{
ef8f2327 570 struct mem_cgroup_per_node *mz;
bb4cc1a8 571
0a31bc97 572 spin_lock_irq(&mctz->lock);
bb4cc1a8 573 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 574 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
575 return mz;
576}
577
ff48c71c
SB
578/* Subset of node_stat_item for memcg stats */
579static const unsigned int memcg_node_stat_items[] = {
580 NR_INACTIVE_ANON,
581 NR_ACTIVE_ANON,
582 NR_INACTIVE_FILE,
583 NR_ACTIVE_FILE,
584 NR_UNEVICTABLE,
585 NR_SLAB_RECLAIMABLE_B,
586 NR_SLAB_UNRECLAIMABLE_B,
587 WORKINGSET_REFAULT_ANON,
588 WORKINGSET_REFAULT_FILE,
589 WORKINGSET_ACTIVATE_ANON,
590 WORKINGSET_ACTIVATE_FILE,
591 WORKINGSET_RESTORE_ANON,
592 WORKINGSET_RESTORE_FILE,
593 WORKINGSET_NODERECLAIM,
594 NR_ANON_MAPPED,
595 NR_FILE_MAPPED,
596 NR_FILE_PAGES,
597 NR_FILE_DIRTY,
598 NR_WRITEBACK,
599 NR_SHMEM,
600 NR_SHMEM_THPS,
601 NR_FILE_THPS,
602 NR_ANON_THPS,
603 NR_KERNEL_STACK_KB,
604 NR_PAGETABLE,
605 NR_SECONDARY_PAGETABLE,
606#ifdef CONFIG_SWAP
607 NR_SWAPCACHE,
608#endif
609};
610
611static const unsigned int memcg_stat_items[] = {
612 MEMCG_SWAP,
613 MEMCG_SOCK,
614 MEMCG_PERCPU_B,
615 MEMCG_VMALLOC,
616 MEMCG_KMEM,
617 MEMCG_ZSWAP_B,
618 MEMCG_ZSWAPPED,
619};
620
621#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
622#define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
623 ARRAY_SIZE(memcg_stat_items))
624static int8_t mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
625
626static void init_memcg_stats(void)
627{
628 int8_t i, j = 0;
629
630 BUILD_BUG_ON(MEMCG_NR_STAT >= S8_MAX);
631
632 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i)
633 mem_cgroup_stats_index[memcg_node_stat_items[i]] = ++j;
634
635 for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i)
636 mem_cgroup_stats_index[memcg_stat_items[i]] = ++j;
637}
638
639static inline int memcg_stats_index(int idx)
640{
641 return mem_cgroup_stats_index[idx] - 1;
642}
643
70a64b79
SB
644struct lruvec_stats_percpu {
645 /* Local (CPU and cgroup) state */
ff48c71c 646 long state[NR_MEMCG_NODE_STAT_ITEMS];
70a64b79
SB
647
648 /* Delta calculation for lockless upward propagation */
ff48c71c 649 long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
70a64b79
SB
650};
651
652struct lruvec_stats {
653 /* Aggregated (CPU and subtree) state */
ff48c71c 654 long state[NR_MEMCG_NODE_STAT_ITEMS];
70a64b79
SB
655
656 /* Non-hierarchical (CPU aggregated) state */
ff48c71c 657 long state_local[NR_MEMCG_NODE_STAT_ITEMS];
70a64b79
SB
658
659 /* Pending child counts during tree propagation */
ff48c71c 660 long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
70a64b79
SB
661};
662
663unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
664{
665 struct mem_cgroup_per_node *pn;
666 long x;
ff48c71c 667 int i;
70a64b79
SB
668
669 if (mem_cgroup_disabled())
670 return node_page_state(lruvec_pgdat(lruvec), idx);
671
ff48c71c 672 i = memcg_stats_index(idx);
acb5fe2f 673 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
ff48c71c
SB
674 return 0;
675
70a64b79 676 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
ff48c71c 677 x = READ_ONCE(pn->lruvec_stats->state[i]);
70a64b79
SB
678#ifdef CONFIG_SMP
679 if (x < 0)
680 x = 0;
681#endif
682 return x;
683}
684
685unsigned long lruvec_page_state_local(struct lruvec *lruvec,
686 enum node_stat_item idx)
687{
688 struct mem_cgroup_per_node *pn;
acb5fe2f 689 long x;
ff48c71c 690 int i;
70a64b79
SB
691
692 if (mem_cgroup_disabled())
693 return node_page_state(lruvec_pgdat(lruvec), idx);
694
ff48c71c 695 i = memcg_stats_index(idx);
acb5fe2f 696 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
ff48c71c
SB
697 return 0;
698
70a64b79 699 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
ff48c71c 700 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
70a64b79
SB
701#ifdef CONFIG_SMP
702 if (x < 0)
703 x = 0;
704#endif
705 return x;
706}
707
d396def5
SB
708/* Subset of vm_event_item to report for memcg event stats */
709static const unsigned int memcg_vm_event_stat[] = {
8278f1c7
SB
710 PGPGIN,
711 PGPGOUT,
d396def5
SB
712 PGSCAN_KSWAPD,
713 PGSCAN_DIRECT,
57e9cc50 714 PGSCAN_KHUGEPAGED,
d396def5
SB
715 PGSTEAL_KSWAPD,
716 PGSTEAL_DIRECT,
57e9cc50 717 PGSTEAL_KHUGEPAGED,
d396def5
SB
718 PGFAULT,
719 PGMAJFAULT,
720 PGREFILL,
721 PGACTIVATE,
722 PGDEACTIVATE,
723 PGLAZYFREE,
724 PGLAZYFREED,
725#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
726 ZSWPIN,
727 ZSWPOUT,
e0bf1dc8 728 ZSWPWB,
d396def5
SB
729#endif
730#ifdef CONFIG_TRANSPARENT_HUGEPAGE
731 THP_FAULT_ALLOC,
732 THP_COLLAPSE_ALLOC,
811244a5
XH
733 THP_SWPOUT,
734 THP_SWPOUT_FALLBACK,
d396def5
SB
735#endif
736};
737
8278f1c7 738#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
59142d87 739static int8_t mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
8278f1c7
SB
740
741static void init_memcg_events(void)
742{
59142d87
SB
743 int8_t i;
744
745 BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= S8_MAX);
8278f1c7
SB
746
747 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
748 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
749}
750
751static inline int memcg_events_index(enum vm_event_item idx)
752{
753 return mem_cgroup_events_index[idx] - 1;
754}
755
410f8e82 756struct memcg_vmstats_percpu {
9cee7e8e
YA
757 /* Stats updates since the last flush */
758 unsigned int stats_updates;
759
760 /* Cached pointers for fast iteration in memcg_rstat_updated() */
761 struct memcg_vmstats_percpu *parent;
762 struct memcg_vmstats *vmstats;
763
764 /* The above should fit a single cacheline for memcg_rstat_updated() */
765
410f8e82 766 /* Local (CPU and cgroup) page state & events */
ff48c71c 767 long state[MEMCG_VMSTAT_SIZE];
8278f1c7 768 unsigned long events[NR_MEMCG_EVENTS];
410f8e82
SB
769
770 /* Delta calculation for lockless upward propagation */
ff48c71c 771 long state_prev[MEMCG_VMSTAT_SIZE];
8278f1c7 772 unsigned long events_prev[NR_MEMCG_EVENTS];
410f8e82
SB
773
774 /* Cgroup1: threshold notifications & softlimit tree updates */
775 unsigned long nr_page_events;
776 unsigned long targets[MEM_CGROUP_NTARGETS];
9cee7e8e 777} ____cacheline_aligned;
410f8e82
SB
778
779struct memcg_vmstats {
780 /* Aggregated (CPU and subtree) page state & events */
ff48c71c 781 long state[MEMCG_VMSTAT_SIZE];
8278f1c7 782 unsigned long events[NR_MEMCG_EVENTS];
410f8e82 783
f82e6bf9 784 /* Non-hierarchical (CPU aggregated) page state & events */
ff48c71c 785 long state_local[MEMCG_VMSTAT_SIZE];
f82e6bf9
YA
786 unsigned long events_local[NR_MEMCG_EVENTS];
787
410f8e82 788 /* Pending child counts during tree propagation */
ff48c71c 789 long state_pending[MEMCG_VMSTAT_SIZE];
8278f1c7 790 unsigned long events_pending[NR_MEMCG_EVENTS];
8d59d221
YA
791
792 /* Stats updates since the last flush */
793 atomic64_t stats_updates;
410f8e82
SB
794};
795
11192d9c
SB
796/*
797 * memcg and lruvec stats flushing
798 *
799 * Many codepaths leading to stats update or read are performance sensitive and
800 * adding stats flushing in such codepaths is not desirable. So, to optimize the
801 * flushing the kernel does:
802 *
803 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
804 * rstat update tree grow unbounded.
805 *
806 * 2) Flush the stats synchronously on reader side only when there are more than
807 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
808 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
809 * only for 2 seconds due to (1).
810 */
811static void flush_memcg_stats_dwork(struct work_struct *w);
812static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
508bed88 813static u64 flush_last_time;
9b301615
SB
814
815#define FLUSH_TIME (2UL*HZ)
11192d9c 816
be3e67b5
SAS
817/*
818 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
819 * not rely on this as part of an acquired spinlock_t lock. These functions are
820 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
821 * is sufficient.
822 */
823static void memcg_stats_lock(void)
824{
e575d401
TG
825 preempt_disable_nested();
826 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
827}
828
829static void __memcg_stats_lock(void)
830{
e575d401 831 preempt_disable_nested();
be3e67b5
SAS
832}
833
834static void memcg_stats_unlock(void)
835{
e575d401 836 preempt_enable_nested();
be3e67b5
SAS
837}
838
8d59d221 839
9cee7e8e 840static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
8d59d221 841{
9cee7e8e 842 return atomic64_read(&vmstats->stats_updates) >
8d59d221
YA
843 MEMCG_CHARGE_BATCH * num_online_cpus();
844}
845
5b3be698 846static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
11192d9c 847{
9cee7e8e 848 struct memcg_vmstats_percpu *statc;
8d59d221 849 int cpu = smp_processor_id();
78ec6f9d 850 unsigned int stats_updates;
5b3be698 851
f9d911ca
YA
852 if (!val)
853 return;
854
8d59d221 855 cgroup_rstat_updated(memcg->css.cgroup, cpu);
9cee7e8e
YA
856 statc = this_cpu_ptr(memcg->vmstats_percpu);
857 for (; statc; statc = statc->parent) {
78ec6f9d
BL
858 stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
859 WRITE_ONCE(statc->stats_updates, stats_updates);
860 if (stats_updates < MEMCG_CHARGE_BATCH)
8d59d221 861 continue;
5b3be698 862
873f64b7 863 /*
8d59d221
YA
864 * If @memcg is already flush-able, increasing stats_updates is
865 * redundant. Avoid the overhead of the atomic update.
873f64b7 866 */
9cee7e8e 867 if (!memcg_vmstats_needs_flush(statc->vmstats))
78ec6f9d 868 atomic64_add(stats_updates,
9cee7e8e 869 &statc->vmstats->stats_updates);
78ec6f9d 870 WRITE_ONCE(statc->stats_updates, 0);
5b3be698 871 }
11192d9c
SB
872}
873
7d7ef0a4 874static void do_flush_stats(struct mem_cgroup *memcg)
11192d9c 875{
7d7ef0a4
YA
876 if (mem_cgroup_is_root(memcg))
877 WRITE_ONCE(flush_last_time, jiffies_64);
9fad9aee 878
7d7ef0a4 879 cgroup_rstat_flush(memcg->css.cgroup);
11192d9c
SB
880}
881
7d7ef0a4
YA
882/*
883 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
884 * @memcg: root of the subtree to flush
885 *
886 * Flushing is serialized by the underlying global rstat lock. There is also a
887 * minimum amount of work to be done even if there are no stat updates to flush.
888 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
889 * avoids unnecessary work and contention on the underlying lock.
890 */
891void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
11192d9c 892{
7d7ef0a4
YA
893 if (mem_cgroup_disabled())
894 return;
895
896 if (!memcg)
897 memcg = root_mem_cgroup;
898
9cee7e8e 899 if (memcg_vmstats_needs_flush(memcg->vmstats))
7d7ef0a4 900 do_flush_stats(memcg);
9fad9aee
YA
901}
902
7d7ef0a4 903void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
9b301615 904{
508bed88
YA
905 /* Only flush if the periodic flusher is one full cycle late */
906 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
7d7ef0a4 907 mem_cgroup_flush_stats(memcg);
9b301615
SB
908}
909
11192d9c
SB
910static void flush_memcg_stats_dwork(struct work_struct *w)
911{
9fad9aee 912 /*
9cee7e8e 913 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
8d59d221 914 * in latency-sensitive paths is as cheap as possible.
9fad9aee 915 */
7d7ef0a4 916 do_flush_stats(root_mem_cgroup);
9b301615 917 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
11192d9c
SB
918}
919
410f8e82
SB
920unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
921{
ff48c71c
SB
922 long x;
923 int i = memcg_stats_index(idx);
924
acb5fe2f 925 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
ff48c71c
SB
926 return 0;
927
928 x = READ_ONCE(memcg->vmstats->state[i]);
410f8e82
SB
929#ifdef CONFIG_SMP
930 if (x < 0)
931 x = 0;
932#endif
933 return x;
934}
935
7bd5bc3c
YA
936static int memcg_page_state_unit(int item);
937
938/*
939 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
940 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
941 */
942static int memcg_state_val_in_pages(int idx, int val)
943{
944 int unit = memcg_page_state_unit(idx);
945
946 if (!val || unit == PAGE_SIZE)
947 return val;
948 else
949 return max(val * unit / PAGE_SIZE, 1UL);
950}
951
db9adbcb
JW
952/**
953 * __mod_memcg_state - update cgroup memory statistics
954 * @memcg: the memory cgroup
955 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
956 * @val: delta to add to the counter, can be negative
957 */
a94032b3
SB
958void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
959 int val)
db9adbcb 960{
ff48c71c
SB
961 int i = memcg_stats_index(idx);
962
acb5fe2f
SB
963 if (mem_cgroup_disabled())
964 return;
965
966 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
db9adbcb
JW
967 return;
968
ff48c71c 969 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
7bd5bc3c 970 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
db9adbcb
JW
971}
972
2d146aa3 973/* idx can be of type enum memcg_stat_item or node_stat_item. */
a18e6e6e
JW
974static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
975{
ff48c71c
SB
976 long x;
977 int i = memcg_stats_index(idx);
978
acb5fe2f 979 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
ff48c71c 980 return 0;
a18e6e6e 981
ff48c71c 982 x = READ_ONCE(memcg->vmstats->state_local[i]);
a18e6e6e
JW
983#ifdef CONFIG_SMP
984 if (x < 0)
985 x = 0;
986#endif
987 return x;
988}
989
91882c16
SB
990static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
991 enum node_stat_item idx,
992 int val)
db9adbcb
JW
993{
994 struct mem_cgroup_per_node *pn;
42a30035 995 struct mem_cgroup *memcg;
ff48c71c
SB
996 int i = memcg_stats_index(idx);
997
acb5fe2f 998 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
ff48c71c 999 return;
db9adbcb 1000
db9adbcb 1001 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
42a30035 1002 memcg = pn->memcg;
db9adbcb 1003
be3e67b5 1004 /*
be16dd76 1005 * The caller from rmap relies on disabled preemption because they never
be3e67b5
SAS
1006 * update their counter from in-interrupt context. For these two
1007 * counters we check that the update is never performed from an
1008 * interrupt context while other caller need to have disabled interrupt.
1009 */
1010 __memcg_stats_lock();
e575d401 1011 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
be3e67b5
SAS
1012 switch (idx) {
1013 case NR_ANON_MAPPED:
1014 case NR_FILE_MAPPED:
1015 case NR_ANON_THPS:
be3e67b5
SAS
1016 WARN_ON_ONCE(!in_task());
1017 break;
1018 default:
e575d401 1019 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
1020 }
1021 }
1022
db9adbcb 1023 /* Update memcg */
ff48c71c 1024 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
db9adbcb 1025
b4c46484 1026 /* Update lruvec */
ff48c71c 1027 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
11192d9c 1028
7bd5bc3c 1029 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
be3e67b5 1030 memcg_stats_unlock();
db9adbcb
JW
1031}
1032
eedc4e5a
RG
1033/**
1034 * __mod_lruvec_state - update lruvec memory statistics
1035 * @lruvec: the lruvec
1036 * @idx: the stat item
1037 * @val: delta to add to the counter, can be negative
1038 *
1039 * The lruvec is the intersection of the NUMA node and a cgroup. This
1040 * function updates the all three counters that are affected by a
1041 * change of state at this level: per-node, per-cgroup, per-lruvec.
1042 */
1043void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1044 int val)
1045{
1046 /* Update node */
1047 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1048
1049 /* Update memcg and lruvec */
1050 if (!mem_cgroup_disabled())
1051 __mod_memcg_lruvec_state(lruvec, idx, val);
1052}
1053
c701123b 1054void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
c47d5032
SB
1055 int val)
1056{
b4e0b68f 1057 struct mem_cgroup *memcg;
c701123b 1058 pg_data_t *pgdat = folio_pgdat(folio);
c47d5032
SB
1059 struct lruvec *lruvec;
1060
b4e0b68f 1061 rcu_read_lock();
c701123b 1062 memcg = folio_memcg(folio);
c47d5032 1063 /* Untracked pages have no memcg, no lruvec. Update only the node */
d635a69d 1064 if (!memcg) {
b4e0b68f 1065 rcu_read_unlock();
c47d5032
SB
1066 __mod_node_page_state(pgdat, idx, val);
1067 return;
1068 }
1069
d635a69d 1070 lruvec = mem_cgroup_lruvec(memcg, pgdat);
c47d5032 1071 __mod_lruvec_state(lruvec, idx, val);
b4e0b68f 1072 rcu_read_unlock();
c47d5032 1073}
c701123b 1074EXPORT_SYMBOL(__lruvec_stat_mod_folio);
c47d5032 1075
da3ceeff 1076void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
ec9f0238 1077{
4f103c63 1078 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
ec9f0238
RG
1079 struct mem_cgroup *memcg;
1080 struct lruvec *lruvec;
1081
1082 rcu_read_lock();
fc4db90f 1083 memcg = mem_cgroup_from_slab_obj(p);
ec9f0238 1084
8faeb1ff
MS
1085 /*
1086 * Untracked pages have no memcg, no lruvec. Update only the
1087 * node. If we reparent the slab objects to the root memcg,
1088 * when we free the slab object, we need to update the per-memcg
1089 * vmstats to keep it correct for the root memcg.
1090 */
1091 if (!memcg) {
ec9f0238
RG
1092 __mod_node_page_state(pgdat, idx, val);
1093 } else {
867e5e1d 1094 lruvec = mem_cgroup_lruvec(memcg, pgdat);
ec9f0238
RG
1095 __mod_lruvec_state(lruvec, idx, val);
1096 }
1097 rcu_read_unlock();
1098}
1099
db9adbcb
JW
1100/**
1101 * __count_memcg_events - account VM events in a cgroup
1102 * @memcg: the memory cgroup
1103 * @idx: the event item
f0953a1b 1104 * @count: the number of events that occurred
db9adbcb
JW
1105 */
1106void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1107 unsigned long count)
1108{
acb5fe2f 1109 int i = memcg_events_index(idx);
8278f1c7 1110
acb5fe2f
SB
1111 if (mem_cgroup_disabled())
1112 return;
1113
1114 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx))
db9adbcb
JW
1115 return;
1116
be3e67b5 1117 memcg_stats_lock();
acb5fe2f 1118 __this_cpu_add(memcg->vmstats_percpu->events[i], count);
5b3be698 1119 memcg_rstat_updated(memcg, count);
be3e67b5 1120 memcg_stats_unlock();
db9adbcb
JW
1121}
1122
42a30035 1123static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
e9f8974f 1124{
acb5fe2f 1125 int i = memcg_events_index(event);
8278f1c7 1126
acb5fe2f 1127 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
8278f1c7 1128 return 0;
acb5fe2f
SB
1129
1130 return READ_ONCE(memcg->vmstats->events[i]);
e9f8974f
JW
1131}
1132
42a30035
JW
1133static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
1134{
acb5fe2f 1135 int i = memcg_events_index(event);
8278f1c7 1136
acb5fe2f 1137 if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event))
8278f1c7 1138 return 0;
815744d7 1139
acb5fe2f 1140 return READ_ONCE(memcg->vmstats->events_local[i]);
42a30035
JW
1141}
1142
c0ff4b85 1143static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
3fba69a5 1144 int nr_pages)
d52aa412 1145{
e401f176
KH
1146 /* pagein of a big page is an event. So, ignore page size */
1147 if (nr_pages > 0)
c9019e9b 1148 __count_memcg_events(memcg, PGPGIN, 1);
3751d604 1149 else {
c9019e9b 1150 __count_memcg_events(memcg, PGPGOUT, 1);
3751d604
KH
1151 nr_pages = -nr_pages; /* for event */
1152 }
e401f176 1153
871789d4 1154 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
6d12e2d8
KH
1155}
1156
f53d7ce3
JW
1157static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1158 enum mem_cgroup_events_target target)
7a159cc9
JW
1159{
1160 unsigned long val, next;
1161
871789d4
CD
1162 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1163 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
7a159cc9 1164 /* from time_after() in jiffies.h */
6a1a8b80 1165 if ((long)(next - val) < 0) {
f53d7ce3
JW
1166 switch (target) {
1167 case MEM_CGROUP_TARGET_THRESH:
1168 next = val + THRESHOLDS_EVENTS_TARGET;
1169 break;
bb4cc1a8
AM
1170 case MEM_CGROUP_TARGET_SOFTLIMIT:
1171 next = val + SOFTLIMIT_EVENTS_TARGET;
1172 break;
f53d7ce3
JW
1173 default:
1174 break;
1175 }
871789d4 1176 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
f53d7ce3 1177 return true;
7a159cc9 1178 }
f53d7ce3 1179 return false;
d2265e6f
KH
1180}
1181
1182/*
1183 * Check events in order.
1184 *
1185 */
8e88bd2d 1186static void memcg_check_events(struct mem_cgroup *memcg, int nid)
d2265e6f 1187{
2343e88d
SAS
1188 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1189 return;
1190
d2265e6f 1191 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
1192 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1193 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 1194 bool do_softlimit;
f53d7ce3 1195
bb4cc1a8
AM
1196 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1197 MEM_CGROUP_TARGET_SOFTLIMIT);
c0ff4b85 1198 mem_cgroup_threshold(memcg);
bb4cc1a8 1199 if (unlikely(do_softlimit))
8e88bd2d 1200 mem_cgroup_update_tree(memcg, nid);
0a31bc97 1201 }
d2265e6f
KH
1202}
1203
cf475ad2 1204struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 1205{
31a78f23
BS
1206 /*
1207 * mm_update_next_owner() may clear mm->owner to NULL
1208 * if it races with swapoff, page migration, etc.
1209 * So this can be called with p == NULL.
1210 */
1211 if (unlikely(!p))
1212 return NULL;
1213
073219e9 1214 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466 1215}
33398cf2 1216EXPORT_SYMBOL(mem_cgroup_from_task);
78fb7466 1217
04f94e3f
DS
1218static __always_inline struct mem_cgroup *active_memcg(void)
1219{
55a68c82 1220 if (!in_task())
04f94e3f
DS
1221 return this_cpu_read(int_active_memcg);
1222 else
1223 return current->active_memcg;
1224}
1225
d46eb14b
SB
1226/**
1227 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1228 * @mm: mm from which memcg should be extracted. It can be NULL.
1229 *
04f94e3f
DS
1230 * Obtain a reference on mm->memcg and returns it if successful. If mm
1231 * is NULL, then the memcg is chosen as follows:
1232 * 1) The active memcg, if set.
1233 * 2) current->mm->memcg, if available
1234 * 3) root memcg
1235 * If mem_cgroup is disabled, NULL is returned.
d46eb14b
SB
1236 */
1237struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1238{
d46eb14b
SB
1239 struct mem_cgroup *memcg;
1240
1241 if (mem_cgroup_disabled())
1242 return NULL;
0b7f569e 1243
2884b6b7
MS
1244 /*
1245 * Page cache insertions can happen without an
1246 * actual mm context, e.g. during disk probing
1247 * on boot, loopback IO, acct() writes etc.
1248 *
1249 * No need to css_get on root memcg as the reference
1250 * counting is disabled on the root level in the
1251 * cgroup core. See CSS_NO_REF.
1252 */
04f94e3f
DS
1253 if (unlikely(!mm)) {
1254 memcg = active_memcg();
1255 if (unlikely(memcg)) {
1256 /* remote memcg must hold a ref */
1257 css_get(&memcg->css);
1258 return memcg;
1259 }
1260 mm = current->mm;
1261 if (unlikely(!mm))
1262 return root_mem_cgroup;
1263 }
2884b6b7 1264
54595fe2
KH
1265 rcu_read_lock();
1266 do {
2884b6b7
MS
1267 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1268 if (unlikely(!memcg))
df381975 1269 memcg = root_mem_cgroup;
00d484f3 1270 } while (!css_tryget(&memcg->css));
54595fe2 1271 rcu_read_unlock();
c0ff4b85 1272 return memcg;
54595fe2 1273}
d46eb14b
SB
1274EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1275
4b569387
NP
1276/**
1277 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1278 */
1279struct mem_cgroup *get_mem_cgroup_from_current(void)
1280{
1281 struct mem_cgroup *memcg;
1282
1283 if (mem_cgroup_disabled())
1284 return NULL;
1285
1286again:
1287 rcu_read_lock();
1288 memcg = mem_cgroup_from_task(current);
1289 if (!css_tryget(&memcg->css)) {
1290 rcu_read_unlock();
1291 goto again;
1292 }
1293 rcu_read_unlock();
1294 return memcg;
1295}
1296
5660048c
JW
1297/**
1298 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1299 * @root: hierarchy root
1300 * @prev: previously returned memcg, NULL on first invocation
1301 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1302 *
1303 * Returns references to children of the hierarchy below @root, or
1304 * @root itself, or %NULL after a full round-trip.
1305 *
1306 * Caller must pass the return value in @prev on subsequent
1307 * invocations for reference counting, or use mem_cgroup_iter_break()
1308 * to cancel a hierarchy walk before the round-trip is complete.
1309 *
05bdc520
ML
1310 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1311 * in the hierarchy among all concurrent reclaimers operating on the
1312 * same node.
5660048c 1313 */
694fbc0f 1314struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1315 struct mem_cgroup *prev,
694fbc0f 1316 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1317{
3f649ab7 1318 struct mem_cgroup_reclaim_iter *iter;
5ac8fb31 1319 struct cgroup_subsys_state *css = NULL;
9f3a0d09 1320 struct mem_cgroup *memcg = NULL;
5ac8fb31 1321 struct mem_cgroup *pos = NULL;
711d3d2c 1322
694fbc0f
AM
1323 if (mem_cgroup_disabled())
1324 return NULL;
5660048c 1325
9f3a0d09
JW
1326 if (!root)
1327 root = root_mem_cgroup;
7d74b06f 1328
542f85f9 1329 rcu_read_lock();
5f578161 1330
5ac8fb31 1331 if (reclaim) {
ef8f2327 1332 struct mem_cgroup_per_node *mz;
5ac8fb31 1333
a3747b53 1334 mz = root->nodeinfo[reclaim->pgdat->node_id];
9da83f3f 1335 iter = &mz->iter;
5ac8fb31 1336
a9320aae
WY
1337 /*
1338 * On start, join the current reclaim iteration cycle.
1339 * Exit when a concurrent walker completes it.
1340 */
1341 if (!prev)
1342 reclaim->generation = iter->generation;
1343 else if (reclaim->generation != iter->generation)
5ac8fb31
JW
1344 goto out_unlock;
1345
6df38689 1346 while (1) {
4db0c3c2 1347 pos = READ_ONCE(iter->position);
6df38689
VD
1348 if (!pos || css_tryget(&pos->css))
1349 break;
5ac8fb31 1350 /*
6df38689
VD
1351 * css reference reached zero, so iter->position will
1352 * be cleared by ->css_released. However, we should not
1353 * rely on this happening soon, because ->css_released
1354 * is called from a work queue, and by busy-waiting we
1355 * might block it. So we clear iter->position right
1356 * away.
5ac8fb31 1357 */
6df38689
VD
1358 (void)cmpxchg(&iter->position, pos, NULL);
1359 }
89d8330c
WY
1360 } else if (prev) {
1361 pos = prev;
5ac8fb31
JW
1362 }
1363
1364 if (pos)
1365 css = &pos->css;
1366
1367 for (;;) {
1368 css = css_next_descendant_pre(css, &root->css);
1369 if (!css) {
1370 /*
1371 * Reclaimers share the hierarchy walk, and a
1372 * new one might jump in right at the end of
1373 * the hierarchy - make sure they see at least
1374 * one group and restart from the beginning.
1375 */
1376 if (!prev)
1377 continue;
1378 break;
527a5ec9 1379 }
7d74b06f 1380
5ac8fb31
JW
1381 /*
1382 * Verify the css and acquire a reference. The root
1383 * is provided by the caller, so we know it's alive
1384 * and kicking, and don't take an extra reference.
1385 */
41555dad
WY
1386 if (css == &root->css || css_tryget(css)) {
1387 memcg = mem_cgroup_from_css(css);
0b8f73e1 1388 break;
41555dad 1389 }
9f3a0d09 1390 }
5ac8fb31
JW
1391
1392 if (reclaim) {
5ac8fb31 1393 /*
6df38689
VD
1394 * The position could have already been updated by a competing
1395 * thread, so check that the value hasn't changed since we read
1396 * it to avoid reclaiming from the same cgroup twice.
5ac8fb31 1397 */
6df38689
VD
1398 (void)cmpxchg(&iter->position, pos, memcg);
1399
5ac8fb31
JW
1400 if (pos)
1401 css_put(&pos->css);
1402
1403 if (!memcg)
1404 iter->generation++;
9f3a0d09 1405 }
5ac8fb31 1406
542f85f9
MH
1407out_unlock:
1408 rcu_read_unlock();
c40046f3
MH
1409 if (prev && prev != root)
1410 css_put(&prev->css);
1411
9f3a0d09 1412 return memcg;
14067bb3 1413}
7d74b06f 1414
5660048c
JW
1415/**
1416 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1417 * @root: hierarchy root
1418 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1419 */
1420void mem_cgroup_iter_break(struct mem_cgroup *root,
1421 struct mem_cgroup *prev)
9f3a0d09
JW
1422{
1423 if (!root)
1424 root = root_mem_cgroup;
1425 if (prev && prev != root)
1426 css_put(&prev->css);
1427}
7d74b06f 1428
54a83d6b
MC
1429static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1430 struct mem_cgroup *dead_memcg)
6df38689 1431{
6df38689 1432 struct mem_cgroup_reclaim_iter *iter;
ef8f2327
MG
1433 struct mem_cgroup_per_node *mz;
1434 int nid;
6df38689 1435
54a83d6b 1436 for_each_node(nid) {
a3747b53 1437 mz = from->nodeinfo[nid];
9da83f3f
YS
1438 iter = &mz->iter;
1439 cmpxchg(&iter->position, dead_memcg, NULL);
6df38689
VD
1440 }
1441}
1442
54a83d6b
MC
1443static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1444{
1445 struct mem_cgroup *memcg = dead_memcg;
1446 struct mem_cgroup *last;
1447
1448 do {
1449 __invalidate_reclaim_iterators(memcg, dead_memcg);
1450 last = memcg;
1451 } while ((memcg = parent_mem_cgroup(memcg)));
1452
1453 /*
b8dd3ee9 1454 * When cgroup1 non-hierarchy mode is used,
54a83d6b
MC
1455 * parent_mem_cgroup() does not walk all the way up to the
1456 * cgroup root (root_mem_cgroup). So we have to handle
1457 * dead_memcg from cgroup root separately.
1458 */
7848ed62 1459 if (!mem_cgroup_is_root(last))
54a83d6b
MC
1460 __invalidate_reclaim_iterators(root_mem_cgroup,
1461 dead_memcg);
1462}
1463
7c5f64f8
VD
1464/**
1465 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1466 * @memcg: hierarchy root
1467 * @fn: function to call for each task
1468 * @arg: argument passed to @fn
1469 *
1470 * This function iterates over tasks attached to @memcg or to any of its
1471 * descendants and calls @fn for each task. If @fn returns a non-zero
025b7799
Z
1472 * value, the function breaks the iteration loop. Otherwise, it will iterate
1473 * over all tasks and return 0.
7c5f64f8
VD
1474 *
1475 * This function must not be called for the root memory cgroup.
1476 */
025b7799
Z
1477void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1478 int (*fn)(struct task_struct *, void *), void *arg)
7c5f64f8
VD
1479{
1480 struct mem_cgroup *iter;
1481 int ret = 0;
1482
7848ed62 1483 BUG_ON(mem_cgroup_is_root(memcg));
7c5f64f8
VD
1484
1485 for_each_mem_cgroup_tree(iter, memcg) {
1486 struct css_task_iter it;
1487 struct task_struct *task;
1488
f168a9a5 1489 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
7c5f64f8
VD
1490 while (!ret && (task = css_task_iter_next(&it)))
1491 ret = fn(task, arg);
1492 css_task_iter_end(&it);
1493 if (ret) {
1494 mem_cgroup_iter_break(memcg, iter);
1495 break;
1496 }
1497 }
7c5f64f8
VD
1498}
1499
6168d0da 1500#ifdef CONFIG_DEBUG_VM
e809c3fe 1501void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
1502{
1503 struct mem_cgroup *memcg;
1504
1505 if (mem_cgroup_disabled())
1506 return;
1507
e809c3fe 1508 memcg = folio_memcg(folio);
6168d0da
AS
1509
1510 if (!memcg)
7848ed62 1511 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
6168d0da 1512 else
e809c3fe 1513 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
6168d0da
AS
1514}
1515#endif
1516
6168d0da 1517/**
e809c3fe
MWO
1518 * folio_lruvec_lock - Lock the lruvec for a folio.
1519 * @folio: Pointer to the folio.
6168d0da 1520 *
d7e3aba5 1521 * These functions are safe to use under any of the following conditions:
e809c3fe
MWO
1522 * - folio locked
1523 * - folio_test_lru false
1524 * - folio_memcg_lock()
1525 * - folio frozen (refcount of 0)
1526 *
1527 * Return: The lruvec this folio is on with its lock held.
6168d0da 1528 */
e809c3fe 1529struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1530{
e809c3fe 1531 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1532
6168d0da 1533 spin_lock(&lruvec->lru_lock);
e809c3fe 1534 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1535
1536 return lruvec;
1537}
1538
e809c3fe
MWO
1539/**
1540 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1541 * @folio: Pointer to the folio.
1542 *
1543 * These functions are safe to use under any of the following conditions:
1544 * - folio locked
1545 * - folio_test_lru false
1546 * - folio_memcg_lock()
1547 * - folio frozen (refcount of 0)
1548 *
1549 * Return: The lruvec this folio is on with its lock held and interrupts
1550 * disabled.
1551 */
1552struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1553{
e809c3fe 1554 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1555
6168d0da 1556 spin_lock_irq(&lruvec->lru_lock);
e809c3fe 1557 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1558
1559 return lruvec;
1560}
1561
e809c3fe
MWO
1562/**
1563 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1564 * @folio: Pointer to the folio.
1565 * @flags: Pointer to irqsave flags.
1566 *
1567 * These functions are safe to use under any of the following conditions:
1568 * - folio locked
1569 * - folio_test_lru false
1570 * - folio_memcg_lock()
1571 * - folio frozen (refcount of 0)
1572 *
1573 * Return: The lruvec this folio is on with its lock held and interrupts
1574 * disabled.
1575 */
1576struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1577 unsigned long *flags)
6168d0da 1578{
e809c3fe 1579 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1580
6168d0da 1581 spin_lock_irqsave(&lruvec->lru_lock, *flags);
e809c3fe 1582 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1583
1584 return lruvec;
1585}
1586
925b7673 1587/**
fa9add64
HD
1588 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1589 * @lruvec: mem_cgroup per zone lru vector
1590 * @lru: index of lru list the page is sitting on
b4536f0c 1591 * @zid: zone id of the accounted pages
fa9add64 1592 * @nr_pages: positive when adding or negative when removing
925b7673 1593 *
ca707239 1594 * This function must be called under lru_lock, just before a page is added
07ca7606 1595 * to or just after a page is removed from an lru list.
3f58a829 1596 */
fa9add64 1597void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 1598 int zid, int nr_pages)
3f58a829 1599{
ef8f2327 1600 struct mem_cgroup_per_node *mz;
fa9add64 1601 unsigned long *lru_size;
ca707239 1602 long size;
3f58a829
MK
1603
1604 if (mem_cgroup_disabled())
1605 return;
1606
ef8f2327 1607 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c 1608 lru_size = &mz->lru_zone_size[zid][lru];
ca707239
HD
1609
1610 if (nr_pages < 0)
1611 *lru_size += nr_pages;
1612
1613 size = *lru_size;
b4536f0c
MH
1614 if (WARN_ONCE(size < 0,
1615 "%s(%p, %d, %d): lru_size %ld\n",
1616 __func__, lruvec, lru, nr_pages, size)) {
ca707239
HD
1617 VM_BUG_ON(1);
1618 *lru_size = 0;
1619 }
1620
1621 if (nr_pages > 0)
1622 *lru_size += nr_pages;
08e552c6 1623}
544122e5 1624
19942822 1625/**
9d11ea9f 1626 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1627 * @memcg: the memory cgroup
19942822 1628 *
9d11ea9f 1629 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1630 * pages.
19942822 1631 */
c0ff4b85 1632static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1633{
3e32cb2e
JW
1634 unsigned long margin = 0;
1635 unsigned long count;
1636 unsigned long limit;
9d11ea9f 1637
3e32cb2e 1638 count = page_counter_read(&memcg->memory);
bbec2e15 1639 limit = READ_ONCE(memcg->memory.max);
3e32cb2e
JW
1640 if (count < limit)
1641 margin = limit - count;
1642
7941d214 1643 if (do_memsw_account()) {
3e32cb2e 1644 count = page_counter_read(&memcg->memsw);
bbec2e15 1645 limit = READ_ONCE(memcg->memsw.max);
1c4448ed 1646 if (count < limit)
3e32cb2e 1647 margin = min(margin, limit - count);
cbedbac3
LR
1648 else
1649 margin = 0;
3e32cb2e
JW
1650 }
1651
1652 return margin;
19942822
JW
1653}
1654
32047e2a 1655/*
bdcbb659 1656 * A routine for checking "mem" is under move_account() or not.
32047e2a 1657 *
bdcbb659
QH
1658 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1659 * moving cgroups. This is for waiting at high-memory pressure
1660 * caused by "move".
32047e2a 1661 */
c0ff4b85 1662static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1663{
2bd9bb20
KH
1664 struct mem_cgroup *from;
1665 struct mem_cgroup *to;
4b534334 1666 bool ret = false;
2bd9bb20
KH
1667 /*
1668 * Unlike task_move routines, we access mc.to, mc.from not under
1669 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1670 */
1671 spin_lock(&mc.lock);
1672 from = mc.from;
1673 to = mc.to;
1674 if (!from)
1675 goto unlock;
3e92041d 1676
2314b42d
JW
1677 ret = mem_cgroup_is_descendant(from, memcg) ||
1678 mem_cgroup_is_descendant(to, memcg);
2bd9bb20
KH
1679unlock:
1680 spin_unlock(&mc.lock);
4b534334
KH
1681 return ret;
1682}
1683
c0ff4b85 1684static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1685{
1686 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1687 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1688 DEFINE_WAIT(wait);
1689 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1690 /* moving charge context might have finished. */
1691 if (mc.moving_task)
1692 schedule();
1693 finish_wait(&mc.waitq, &wait);
1694 return true;
1695 }
1696 }
1697 return false;
1698}
1699
5f9a4f4a
MS
1700struct memory_stat {
1701 const char *name;
5f9a4f4a
MS
1702 unsigned int idx;
1703};
1704
57b2847d 1705static const struct memory_stat memory_stats[] = {
fff66b79
MS
1706 { "anon", NR_ANON_MAPPED },
1707 { "file", NR_FILE_PAGES },
a8c49af3 1708 { "kernel", MEMCG_KMEM },
fff66b79
MS
1709 { "kernel_stack", NR_KERNEL_STACK_KB },
1710 { "pagetables", NR_PAGETABLE },
ebc97a52 1711 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
fff66b79
MS
1712 { "percpu", MEMCG_PERCPU_B },
1713 { "sock", MEMCG_SOCK },
4e5aa1f4 1714 { "vmalloc", MEMCG_VMALLOC },
fff66b79 1715 { "shmem", NR_SHMEM },
f4840ccf
JW
1716#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1717 { "zswap", MEMCG_ZSWAP_B },
1718 { "zswapped", MEMCG_ZSWAPPED },
1719#endif
fff66b79
MS
1720 { "file_mapped", NR_FILE_MAPPED },
1721 { "file_dirty", NR_FILE_DIRTY },
1722 { "file_writeback", NR_WRITEBACK },
b6038942
SB
1723#ifdef CONFIG_SWAP
1724 { "swapcached", NR_SWAPCACHE },
1725#endif
5f9a4f4a 1726#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fff66b79
MS
1727 { "anon_thp", NR_ANON_THPS },
1728 { "file_thp", NR_FILE_THPS },
1729 { "shmem_thp", NR_SHMEM_THPS },
5f9a4f4a 1730#endif
fff66b79
MS
1731 { "inactive_anon", NR_INACTIVE_ANON },
1732 { "active_anon", NR_ACTIVE_ANON },
1733 { "inactive_file", NR_INACTIVE_FILE },
1734 { "active_file", NR_ACTIVE_FILE },
1735 { "unevictable", NR_UNEVICTABLE },
1736 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1737 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
5f9a4f4a
MS
1738
1739 /* The memory events */
fff66b79
MS
1740 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1741 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1742 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1743 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1744 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1745 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1746 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
5f9a4f4a
MS
1747};
1748
ff841a06 1749/* The actual unit of the state item, not the same as the output unit */
fff66b79
MS
1750static int memcg_page_state_unit(int item)
1751{
1752 switch (item) {
1753 case MEMCG_PERCPU_B:
f4840ccf 1754 case MEMCG_ZSWAP_B:
fff66b79
MS
1755 case NR_SLAB_RECLAIMABLE_B:
1756 case NR_SLAB_UNRECLAIMABLE_B:
ff841a06
YA
1757 return 1;
1758 case NR_KERNEL_STACK_KB:
1759 return SZ_1K;
1760 default:
1761 return PAGE_SIZE;
1762 }
1763}
1764
1765/* Translate stat items to the correct unit for memory.stat output */
1766static int memcg_page_state_output_unit(int item)
1767{
1768 /*
1769 * Workingset state is actually in pages, but we export it to userspace
1770 * as a scalar count of events, so special case it here.
1771 */
1772 switch (item) {
fff66b79
MS
1773 case WORKINGSET_REFAULT_ANON:
1774 case WORKINGSET_REFAULT_FILE:
1775 case WORKINGSET_ACTIVATE_ANON:
1776 case WORKINGSET_ACTIVATE_FILE:
1777 case WORKINGSET_RESTORE_ANON:
1778 case WORKINGSET_RESTORE_FILE:
1779 case WORKINGSET_NODERECLAIM:
1780 return 1;
fff66b79 1781 default:
ff841a06 1782 return memcg_page_state_unit(item);
fff66b79
MS
1783 }
1784}
1785
1786static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1787 int item)
1788{
ff841a06
YA
1789 return memcg_page_state(memcg, item) *
1790 memcg_page_state_output_unit(item);
1791}
1792
1793static inline unsigned long memcg_page_state_local_output(
1794 struct mem_cgroup *memcg, int item)
1795{
1796 return memcg_page_state_local(memcg, item) *
1797 memcg_page_state_output_unit(item);
fff66b79
MS
1798}
1799
dddb44ff 1800static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
c8713d0b 1801{
c8713d0b 1802 int i;
71cd3113 1803
c8713d0b
JW
1804 /*
1805 * Provide statistics on the state of the memory subsystem as
1806 * well as cumulative event counters that show past behavior.
1807 *
1808 * This list is ordered following a combination of these gradients:
1809 * 1) generic big picture -> specifics and details
1810 * 2) reflecting userspace activity -> reflecting kernel heuristics
1811 *
1812 * Current memory state:
1813 */
7d7ef0a4 1814 mem_cgroup_flush_stats(memcg);
c8713d0b 1815
5f9a4f4a
MS
1816 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1817 u64 size;
c8713d0b 1818
fff66b79 1819 size = memcg_page_state_output(memcg, memory_stats[i].idx);
5b42360c 1820 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
c8713d0b 1821
5f9a4f4a 1822 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
fff66b79
MS
1823 size += memcg_page_state_output(memcg,
1824 NR_SLAB_RECLAIMABLE_B);
5b42360c 1825 seq_buf_printf(s, "slab %llu\n", size);
5f9a4f4a
MS
1826 }
1827 }
c8713d0b
JW
1828
1829 /* Accumulated memory events */
5b42360c 1830 seq_buf_printf(s, "pgscan %lu\n",
c8713d0b 1831 memcg_events(memcg, PGSCAN_KSWAPD) +
57e9cc50
JW
1832 memcg_events(memcg, PGSCAN_DIRECT) +
1833 memcg_events(memcg, PGSCAN_KHUGEPAGED));
5b42360c 1834 seq_buf_printf(s, "pgsteal %lu\n",
c8713d0b 1835 memcg_events(memcg, PGSTEAL_KSWAPD) +
57e9cc50
JW
1836 memcg_events(memcg, PGSTEAL_DIRECT) +
1837 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
c8713d0b 1838
8278f1c7
SB
1839 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1840 if (memcg_vm_event_stat[i] == PGPGIN ||
1841 memcg_vm_event_stat[i] == PGPGOUT)
1842 continue;
1843
5b42360c 1844 seq_buf_printf(s, "%s %lu\n",
673520f8
QZ
1845 vm_event_name(memcg_vm_event_stat[i]),
1846 memcg_events(memcg, memcg_vm_event_stat[i]));
8278f1c7 1847 }
c8713d0b
JW
1848
1849 /* The above should easily fit into one page */
5b42360c 1850 WARN_ON_ONCE(seq_buf_has_overflowed(s));
c8713d0b 1851}
71cd3113 1852
dddb44ff
YA
1853static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1854
1855static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1856{
1857 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1858 memcg_stat_format(memcg, s);
1859 else
1860 memcg1_stat_format(memcg, s);
1861 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1862}
1863
e222432b 1864/**
f0c867d9 1865 * mem_cgroup_print_oom_context: Print OOM information relevant to
1866 * memory controller.
e222432b
BS
1867 * @memcg: The memory cgroup that went over limit
1868 * @p: Task that is going to be killed
1869 *
1870 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1871 * enabled
1872 */
f0c867d9 1873void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
e222432b 1874{
e222432b
BS
1875 rcu_read_lock();
1876
f0c867d9 1877 if (memcg) {
1878 pr_cont(",oom_memcg=");
1879 pr_cont_cgroup_path(memcg->css.cgroup);
1880 } else
1881 pr_cont(",global_oom");
2415b9f5 1882 if (p) {
f0c867d9 1883 pr_cont(",task_memcg=");
2415b9f5 1884 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
2415b9f5 1885 }
e222432b 1886 rcu_read_unlock();
f0c867d9 1887}
1888
1889/**
1890 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1891 * memory controller.
1892 * @memcg: The memory cgroup that went over limit
1893 */
1894void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1895{
68aaee14
TH
1896 /* Use static buffer, for the caller is holding oom_lock. */
1897 static char buf[PAGE_SIZE];
5b42360c 1898 struct seq_buf s;
68aaee14
TH
1899
1900 lockdep_assert_held(&oom_lock);
e222432b 1901
3e32cb2e
JW
1902 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1903 K((u64)page_counter_read(&memcg->memory)),
15b42562 1904 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
c8713d0b
JW
1905 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1906 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1907 K((u64)page_counter_read(&memcg->swap)),
32d087cd 1908 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
c8713d0b
JW
1909 else {
1910 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1911 K((u64)page_counter_read(&memcg->memsw)),
1912 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1913 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1914 K((u64)page_counter_read(&memcg->kmem)),
1915 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
58cf188e 1916 }
c8713d0b
JW
1917
1918 pr_info("Memory cgroup stats for ");
1919 pr_cont_cgroup_path(memcg->css.cgroup);
1920 pr_cont(":");
5b42360c
YA
1921 seq_buf_init(&s, buf, sizeof(buf));
1922 memory_stat_format(memcg, &s);
1923 seq_buf_do_printk(&s, KERN_INFO);
e222432b
BS
1924}
1925
a63d83f4
DR
1926/*
1927 * Return the memory (and swap, if configured) limit for a memcg.
1928 */
bbec2e15 1929unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
a63d83f4 1930{
8d387a5f
WL
1931 unsigned long max = READ_ONCE(memcg->memory.max);
1932
b94c4e94 1933 if (do_memsw_account()) {
8d387a5f
WL
1934 if (mem_cgroup_swappiness(memcg)) {
1935 /* Calculate swap excess capacity from memsw limit */
1936 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1937
1938 max += min(swap, (unsigned long)total_swap_pages);
1939 }
b94c4e94
JW
1940 } else {
1941 if (mem_cgroup_swappiness(memcg))
1942 max += min(READ_ONCE(memcg->swap.max),
1943 (unsigned long)total_swap_pages);
9a5a8f19 1944 }
bbec2e15 1945 return max;
a63d83f4
DR
1946}
1947
9783aa99
CD
1948unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1949{
1950 return page_counter_read(&memcg->memory);
1951}
1952
b6e6edcf 1953static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
19965460 1954 int order)
9cbb78bb 1955{
6e0fc46d
DR
1956 struct oom_control oc = {
1957 .zonelist = NULL,
1958 .nodemask = NULL,
2a966b77 1959 .memcg = memcg,
6e0fc46d
DR
1960 .gfp_mask = gfp_mask,
1961 .order = order,
6e0fc46d 1962 };
1378b37d 1963 bool ret = true;
9cbb78bb 1964
7775face
TH
1965 if (mutex_lock_killable(&oom_lock))
1966 return true;
1378b37d
YS
1967
1968 if (mem_cgroup_margin(memcg) >= (1 << order))
1969 goto unlock;
1970
7775face
TH
1971 /*
1972 * A few threads which were not waiting at mutex_lock_killable() can
1973 * fail to bail out. Therefore, check again after holding oom_lock.
1974 */
a4ebf1b6 1975 ret = task_is_dying() || out_of_memory(&oc);
1378b37d
YS
1976
1977unlock:
dc56401f 1978 mutex_unlock(&oom_lock);
7c5f64f8 1979 return ret;
9cbb78bb
DR
1980}
1981
0608f43d 1982static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
ef8f2327 1983 pg_data_t *pgdat,
0608f43d
AM
1984 gfp_t gfp_mask,
1985 unsigned long *total_scanned)
1986{
1987 struct mem_cgroup *victim = NULL;
1988 int total = 0;
1989 int loop = 0;
1990 unsigned long excess;
1991 unsigned long nr_scanned;
1992 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 1993 .pgdat = pgdat,
0608f43d
AM
1994 };
1995
3e32cb2e 1996 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1997
1998 while (1) {
1999 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2000 if (!victim) {
2001 loop++;
2002 if (loop >= 2) {
2003 /*
2004 * If we have not been able to reclaim
2005 * anything, it might because there are
2006 * no reclaimable pages under this hierarchy
2007 */
2008 if (!total)
2009 break;
2010 /*
2011 * We want to do more targeted reclaim.
2012 * excess >> 2 is not to excessive so as to
2013 * reclaim too much, nor too less that we keep
2014 * coming back to reclaim from this cgroup
2015 */
2016 if (total >= (excess >> 2) ||
2017 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2018 break;
2019 }
2020 continue;
2021 }
a9dd0a83 2022 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
ef8f2327 2023 pgdat, &nr_scanned);
0608f43d 2024 *total_scanned += nr_scanned;
3e32cb2e 2025 if (!soft_limit_excess(root_memcg))
0608f43d 2026 break;
6d61ef40 2027 }
0608f43d
AM
2028 mem_cgroup_iter_break(root_memcg, victim);
2029 return total;
6d61ef40
BS
2030}
2031
0056f4e6
JW
2032#ifdef CONFIG_LOCKDEP
2033static struct lockdep_map memcg_oom_lock_dep_map = {
2034 .name = "memcg_oom_lock",
2035};
2036#endif
2037
fb2a6fc5
JW
2038static DEFINE_SPINLOCK(memcg_oom_lock);
2039
867578cb
KH
2040/*
2041 * Check OOM-Killer is already running under our hierarchy.
2042 * If someone is running, return false.
2043 */
fb2a6fc5 2044static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 2045{
79dfdacc 2046 struct mem_cgroup *iter, *failed = NULL;
a636b327 2047
fb2a6fc5
JW
2048 spin_lock(&memcg_oom_lock);
2049
9f3a0d09 2050 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 2051 if (iter->oom_lock) {
79dfdacc
MH
2052 /*
2053 * this subtree of our hierarchy is already locked
2054 * so we cannot give a lock.
2055 */
79dfdacc 2056 failed = iter;
9f3a0d09
JW
2057 mem_cgroup_iter_break(memcg, iter);
2058 break;
23751be0
JW
2059 } else
2060 iter->oom_lock = true;
7d74b06f 2061 }
867578cb 2062
fb2a6fc5
JW
2063 if (failed) {
2064 /*
2065 * OK, we failed to lock the whole subtree so we have
2066 * to clean up what we set up to the failing subtree
2067 */
2068 for_each_mem_cgroup_tree(iter, memcg) {
2069 if (iter == failed) {
2070 mem_cgroup_iter_break(memcg, iter);
2071 break;
2072 }
2073 iter->oom_lock = false;
79dfdacc 2074 }
0056f4e6
JW
2075 } else
2076 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
2077
2078 spin_unlock(&memcg_oom_lock);
2079
2080 return !failed;
a636b327 2081}
0b7f569e 2082
fb2a6fc5 2083static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 2084{
7d74b06f
KH
2085 struct mem_cgroup *iter;
2086
fb2a6fc5 2087 spin_lock(&memcg_oom_lock);
5facae4f 2088 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
c0ff4b85 2089 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 2090 iter->oom_lock = false;
fb2a6fc5 2091 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
2092}
2093
c0ff4b85 2094static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2095{
2096 struct mem_cgroup *iter;
2097
c2b42d3c 2098 spin_lock(&memcg_oom_lock);
c0ff4b85 2099 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
2100 iter->under_oom++;
2101 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
2102}
2103
c0ff4b85 2104static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2105{
2106 struct mem_cgroup *iter;
2107
867578cb 2108 /*
f0953a1b 2109 * Be careful about under_oom underflows because a child memcg
7a52d4d8 2110 * could have been added after mem_cgroup_mark_under_oom.
867578cb 2111 */
c2b42d3c 2112 spin_lock(&memcg_oom_lock);
c0ff4b85 2113 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
2114 if (iter->under_oom > 0)
2115 iter->under_oom--;
2116 spin_unlock(&memcg_oom_lock);
0b7f569e
KH
2117}
2118
867578cb
KH
2119static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2120
dc98df5a 2121struct oom_wait_info {
d79154bb 2122 struct mem_cgroup *memcg;
ac6424b9 2123 wait_queue_entry_t wait;
dc98df5a
KH
2124};
2125
ac6424b9 2126static int memcg_oom_wake_function(wait_queue_entry_t *wait,
dc98df5a
KH
2127 unsigned mode, int sync, void *arg)
2128{
d79154bb
HD
2129 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2130 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
2131 struct oom_wait_info *oom_wait_info;
2132
2133 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 2134 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 2135
2314b42d
JW
2136 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
2137 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
dc98df5a 2138 return 0;
dc98df5a
KH
2139 return autoremove_wake_function(wait, mode, sync, arg);
2140}
2141
c0ff4b85 2142static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 2143{
c2b42d3c
TH
2144 /*
2145 * For the following lockless ->under_oom test, the only required
2146 * guarantee is that it must see the state asserted by an OOM when
2147 * this function is called as a result of userland actions
2148 * triggered by the notification of the OOM. This is trivially
2149 * achieved by invoking mem_cgroup_mark_under_oom() before
2150 * triggering notification.
2151 */
2152 if (memcg && memcg->under_oom)
f4b90b70 2153 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
3c11ecf4
KH
2154}
2155
becdf89d
SB
2156/*
2157 * Returns true if successfully killed one or more processes. Though in some
2158 * corner cases it can return true even without killing any process.
2159 */
2160static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 2161{
becdf89d 2162 bool locked, ret;
7056d3a3 2163
29ef680a 2164 if (order > PAGE_ALLOC_COSTLY_ORDER)
becdf89d 2165 return false;
29ef680a 2166
7a1adfdd
RG
2167 memcg_memory_event(memcg, MEMCG_OOM);
2168
867578cb 2169 /*
49426420
JW
2170 * We are in the middle of the charge context here, so we
2171 * don't want to block when potentially sitting on a callstack
2172 * that holds all kinds of filesystem and mm locks.
2173 *
29ef680a
MH
2174 * cgroup1 allows disabling the OOM killer and waiting for outside
2175 * handling until the charge can succeed; remember the context and put
2176 * the task to sleep at the end of the page fault when all locks are
2177 * released.
49426420 2178 *
29ef680a
MH
2179 * On the other hand, in-kernel OOM killer allows for an async victim
2180 * memory reclaim (oom_reaper) and that means that we are not solely
2181 * relying on the oom victim to make a forward progress and we can
2182 * invoke the oom killer here.
2183 *
2184 * Please note that mem_cgroup_out_of_memory might fail to find a
2185 * victim and then we have to bail out from the charge path.
867578cb 2186 */
17c56de6 2187 if (READ_ONCE(memcg->oom_kill_disable)) {
becdf89d
SB
2188 if (current->in_user_fault) {
2189 css_get(&memcg->css);
2190 current->memcg_in_oom = memcg;
becdf89d
SB
2191 }
2192 return false;
29ef680a
MH
2193 }
2194
7056d3a3
MH
2195 mem_cgroup_mark_under_oom(memcg);
2196
2197 locked = mem_cgroup_oom_trylock(memcg);
2198
2199 if (locked)
2200 mem_cgroup_oom_notify(memcg);
2201
2202 mem_cgroup_unmark_under_oom(memcg);
becdf89d 2203 ret = mem_cgroup_out_of_memory(memcg, mask, order);
7056d3a3
MH
2204
2205 if (locked)
2206 mem_cgroup_oom_unlock(memcg);
29ef680a 2207
7056d3a3 2208 return ret;
3812c8c8
JW
2209}
2210
2211/**
2212 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 2213 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 2214 *
49426420
JW
2215 * This has to be called at the end of a page fault if the memcg OOM
2216 * handler was enabled.
3812c8c8 2217 *
49426420 2218 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
2219 * sleep on a waitqueue until the userspace task resolves the
2220 * situation. Sleeping directly in the charge context with all kinds
2221 * of locks held is not a good idea, instead we remember an OOM state
2222 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 2223 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
2224 *
2225 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 2226 * completed, %false otherwise.
3812c8c8 2227 */
49426420 2228bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 2229{
626ebc41 2230 struct mem_cgroup *memcg = current->memcg_in_oom;
3812c8c8 2231 struct oom_wait_info owait;
49426420 2232 bool locked;
3812c8c8
JW
2233
2234 /* OOM is global, do not handle */
3812c8c8 2235 if (!memcg)
49426420 2236 return false;
3812c8c8 2237
7c5f64f8 2238 if (!handle)
49426420 2239 goto cleanup;
3812c8c8
JW
2240
2241 owait.memcg = memcg;
2242 owait.wait.flags = 0;
2243 owait.wait.func = memcg_oom_wake_function;
2244 owait.wait.private = current;
2055da97 2245 INIT_LIST_HEAD(&owait.wait.entry);
867578cb 2246
3812c8c8 2247 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
2248 mem_cgroup_mark_under_oom(memcg);
2249
2250 locked = mem_cgroup_oom_trylock(memcg);
2251
2252 if (locked)
2253 mem_cgroup_oom_notify(memcg);
2254
857f2139
HX
2255 schedule();
2256 mem_cgroup_unmark_under_oom(memcg);
2257 finish_wait(&memcg_oom_waitq, &owait.wait);
49426420 2258
18b1d18b 2259 if (locked)
fb2a6fc5 2260 mem_cgroup_oom_unlock(memcg);
49426420 2261cleanup:
626ebc41 2262 current->memcg_in_oom = NULL;
3812c8c8 2263 css_put(&memcg->css);
867578cb 2264 return true;
0b7f569e
KH
2265}
2266
3d8b38eb
RG
2267/**
2268 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2269 * @victim: task to be killed by the OOM killer
2270 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2271 *
2272 * Returns a pointer to a memory cgroup, which has to be cleaned up
2273 * by killing all belonging OOM-killable tasks.
2274 *
2275 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2276 */
2277struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2278 struct mem_cgroup *oom_domain)
2279{
2280 struct mem_cgroup *oom_group = NULL;
2281 struct mem_cgroup *memcg;
2282
2283 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2284 return NULL;
2285
2286 if (!oom_domain)
2287 oom_domain = root_mem_cgroup;
2288
2289 rcu_read_lock();
2290
2291 memcg = mem_cgroup_from_task(victim);
7848ed62 2292 if (mem_cgroup_is_root(memcg))
3d8b38eb
RG
2293 goto out;
2294
48fe267c
RG
2295 /*
2296 * If the victim task has been asynchronously moved to a different
2297 * memory cgroup, we might end up killing tasks outside oom_domain.
2298 * In this case it's better to ignore memory.group.oom.
2299 */
2300 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2301 goto out;
2302
3d8b38eb
RG
2303 /*
2304 * Traverse the memory cgroup hierarchy from the victim task's
2305 * cgroup up to the OOMing cgroup (or root) to find the
2306 * highest-level memory cgroup with oom.group set.
2307 */
2308 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
eaf7b66b 2309 if (READ_ONCE(memcg->oom_group))
3d8b38eb
RG
2310 oom_group = memcg;
2311
2312 if (memcg == oom_domain)
2313 break;
2314 }
2315
2316 if (oom_group)
2317 css_get(&oom_group->css);
2318out:
2319 rcu_read_unlock();
2320
2321 return oom_group;
2322}
2323
2324void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2325{
2326 pr_info("Tasks in ");
2327 pr_cont_cgroup_path(memcg->css.cgroup);
2328 pr_cont(" are going to be killed due to memory.oom.group set\n");
2329}
2330
d7365e78 2331/**
f70ad448
MWO
2332 * folio_memcg_lock - Bind a folio to its memcg.
2333 * @folio: The folio.
32047e2a 2334 *
f70ad448 2335 * This function prevents unlocked LRU folios from being moved to
739f79fc
JW
2336 * another cgroup.
2337 *
f70ad448
MWO
2338 * It ensures lifetime of the bound memcg. The caller is responsible
2339 * for the lifetime of the folio.
d69b042f 2340 */
f70ad448 2341void folio_memcg_lock(struct folio *folio)
89c06bd5
KH
2342{
2343 struct mem_cgroup *memcg;
6de22619 2344 unsigned long flags;
89c06bd5 2345
6de22619
JW
2346 /*
2347 * The RCU lock is held throughout the transaction. The fast
2348 * path can get away without acquiring the memcg->move_lock
2349 * because page moving starts with an RCU grace period.
739f79fc 2350 */
d7365e78
JW
2351 rcu_read_lock();
2352
2353 if (mem_cgroup_disabled())
1c824a68 2354 return;
89c06bd5 2355again:
f70ad448 2356 memcg = folio_memcg(folio);
29833315 2357 if (unlikely(!memcg))
1c824a68 2358 return;
d7365e78 2359
20ad50d6
AS
2360#ifdef CONFIG_PROVE_LOCKING
2361 local_irq_save(flags);
2362 might_lock(&memcg->move_lock);
2363 local_irq_restore(flags);
2364#endif
2365
bdcbb659 2366 if (atomic_read(&memcg->moving_account) <= 0)
1c824a68 2367 return;
89c06bd5 2368
6de22619 2369 spin_lock_irqsave(&memcg->move_lock, flags);
f70ad448 2370 if (memcg != folio_memcg(folio)) {
6de22619 2371 spin_unlock_irqrestore(&memcg->move_lock, flags);
89c06bd5
KH
2372 goto again;
2373 }
6de22619
JW
2374
2375 /*
1c824a68
JW
2376 * When charge migration first begins, we can have multiple
2377 * critical sections holding the fast-path RCU lock and one
2378 * holding the slowpath move_lock. Track the task who has the
6c77b607 2379 * move_lock for folio_memcg_unlock().
6de22619
JW
2380 */
2381 memcg->move_lock_task = current;
2382 memcg->move_lock_flags = flags;
89c06bd5 2383}
f70ad448 2384
f70ad448 2385static void __folio_memcg_unlock(struct mem_cgroup *memcg)
89c06bd5 2386{
6de22619
JW
2387 if (memcg && memcg->move_lock_task == current) {
2388 unsigned long flags = memcg->move_lock_flags;
2389
2390 memcg->move_lock_task = NULL;
2391 memcg->move_lock_flags = 0;
2392
2393 spin_unlock_irqrestore(&memcg->move_lock, flags);
2394 }
89c06bd5 2395
d7365e78 2396 rcu_read_unlock();
89c06bd5 2397}
739f79fc
JW
2398
2399/**
f70ad448
MWO
2400 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2401 * @folio: The folio.
2402 *
2403 * This releases the binding created by folio_memcg_lock(). This does
2404 * not change the accounting of this folio to its memcg, but it does
2405 * permit others to change it.
739f79fc 2406 */
f70ad448 2407void folio_memcg_unlock(struct folio *folio)
739f79fc 2408{
f70ad448
MWO
2409 __folio_memcg_unlock(folio_memcg(folio));
2410}
9da7b521 2411
fead2b86 2412struct memcg_stock_pcp {
56751146 2413 local_lock_t stock_lock;
fead2b86
MH
2414 struct mem_cgroup *cached; /* this never be root cgroup */
2415 unsigned int nr_pages;
2416
bf4f0599
RG
2417#ifdef CONFIG_MEMCG_KMEM
2418 struct obj_cgroup *cached_objcg;
68ac5b3c 2419 struct pglist_data *cached_pgdat;
bf4f0599 2420 unsigned int nr_bytes;
68ac5b3c
WL
2421 int nr_slab_reclaimable_b;
2422 int nr_slab_unreclaimable_b;
bf4f0599
RG
2423#endif
2424
cdec2e42 2425 struct work_struct work;
26fe6168 2426 unsigned long flags;
a0db00fc 2427#define FLUSHING_CACHED_CHARGE 0
cdec2e42 2428};
56751146
SAS
2429static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2430 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2431};
9f50fad6 2432static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2433
bf4f0599 2434#ifdef CONFIG_MEMCG_KMEM
56751146 2435static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
bf4f0599
RG
2436static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2437 struct mem_cgroup *root_memcg);
a8c49af3 2438static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
bf4f0599
RG
2439
2440#else
56751146 2441static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599 2442{
56751146 2443 return NULL;
bf4f0599
RG
2444}
2445static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2446 struct mem_cgroup *root_memcg)
2447{
2448 return false;
2449}
a8c49af3
YA
2450static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2451{
2452}
bf4f0599
RG
2453#endif
2454
a0956d54
SS
2455/**
2456 * consume_stock: Try to consume stocked charge on this cpu.
2457 * @memcg: memcg to consume from.
2458 * @nr_pages: how many pages to charge.
2459 *
2460 * The charges will only happen if @memcg matches the current cpu's memcg
2461 * stock, and at least @nr_pages are available in that stock. Failure to
2462 * service an allocation will refill the stock.
2463 *
2464 * returns true if successful, false otherwise.
cdec2e42 2465 */
a0956d54 2466static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2467{
2468 struct memcg_stock_pcp *stock;
1872b3bc 2469 unsigned int stock_pages;
db2ba40c 2470 unsigned long flags;
3e32cb2e 2471 bool ret = false;
cdec2e42 2472
a983b5eb 2473 if (nr_pages > MEMCG_CHARGE_BATCH)
3e32cb2e 2474 return ret;
a0956d54 2475
56751146 2476 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2477
2478 stock = this_cpu_ptr(&memcg_stock);
1872b3bc
BL
2479 stock_pages = READ_ONCE(stock->nr_pages);
2480 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
2481 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
3e32cb2e
JW
2482 ret = true;
2483 }
db2ba40c 2484
56751146 2485 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
db2ba40c 2486
cdec2e42
KH
2487 return ret;
2488}
2489
2490/*
3e32cb2e 2491 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
2492 */
2493static void drain_stock(struct memcg_stock_pcp *stock)
2494{
1872b3bc 2495 unsigned int stock_pages = READ_ONCE(stock->nr_pages);
f785a8f2 2496 struct mem_cgroup *old = READ_ONCE(stock->cached);
cdec2e42 2497
1a3e1f40
JW
2498 if (!old)
2499 return;
2500
1872b3bc
BL
2501 if (stock_pages) {
2502 page_counter_uncharge(&old->memory, stock_pages);
7941d214 2503 if (do_memsw_account())
1872b3bc
BL
2504 page_counter_uncharge(&old->memsw, stock_pages);
2505
2506 WRITE_ONCE(stock->nr_pages, 0);
cdec2e42 2507 }
1a3e1f40
JW
2508
2509 css_put(&old->css);
f785a8f2 2510 WRITE_ONCE(stock->cached, NULL);
cdec2e42
KH
2511}
2512
cdec2e42
KH
2513static void drain_local_stock(struct work_struct *dummy)
2514{
db2ba40c 2515 struct memcg_stock_pcp *stock;
56751146 2516 struct obj_cgroup *old = NULL;
db2ba40c
JW
2517 unsigned long flags;
2518
72f0184c 2519 /*
5c49cf9a
MH
2520 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2521 * drain_stock races is that we always operate on local CPU stock
2522 * here with IRQ disabled
72f0184c 2523 */
56751146 2524 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2525
2526 stock = this_cpu_ptr(&memcg_stock);
56751146 2527 old = drain_obj_stock(stock);
cdec2e42 2528 drain_stock(stock);
26fe6168 2529 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
db2ba40c 2530
56751146 2531 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
91b71e78 2532 obj_cgroup_put(old);
cdec2e42
KH
2533}
2534
2535/*
3e32cb2e 2536 * Cache charges(val) to local per_cpu area.
320cc51d 2537 * This will be consumed by consume_stock() function, later.
cdec2e42 2538 */
af9a3b69 2539static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42 2540{
db2ba40c 2541 struct memcg_stock_pcp *stock;
1872b3bc 2542 unsigned int stock_pages;
cdec2e42 2543
db2ba40c 2544 stock = this_cpu_ptr(&memcg_stock);
f785a8f2 2545 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
cdec2e42 2546 drain_stock(stock);
1a3e1f40 2547 css_get(&memcg->css);
f785a8f2 2548 WRITE_ONCE(stock->cached, memcg);
cdec2e42 2549 }
1872b3bc
BL
2550 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
2551 WRITE_ONCE(stock->nr_pages, stock_pages);
db2ba40c 2552
1872b3bc 2553 if (stock_pages > MEMCG_CHARGE_BATCH)
475d0487 2554 drain_stock(stock);
af9a3b69
JW
2555}
2556
2557static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2558{
2559 unsigned long flags;
475d0487 2560
56751146 2561 local_lock_irqsave(&memcg_stock.stock_lock, flags);
af9a3b69 2562 __refill_stock(memcg, nr_pages);
56751146 2563 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
cdec2e42
KH
2564}
2565
2566/*
c0ff4b85 2567 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 2568 * of the hierarchy under it.
cdec2e42 2569 */
6d3d6aa2 2570static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 2571{
26fe6168 2572 int cpu, curcpu;
d38144b7 2573
6d3d6aa2
JW
2574 /* If someone's already draining, avoid adding running more workers. */
2575 if (!mutex_trylock(&percpu_charge_mutex))
2576 return;
72f0184c
MH
2577 /*
2578 * Notify other cpus that system-wide "drain" is running
2579 * We do not care about races with the cpu hotplug because cpu down
2580 * as well as workers from this path always operate on the local
2581 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2582 */
0790ed62
SAS
2583 migrate_disable();
2584 curcpu = smp_processor_id();
cdec2e42
KH
2585 for_each_online_cpu(cpu) {
2586 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2587 struct mem_cgroup *memcg;
e1a366be 2588 bool flush = false;
26fe6168 2589
e1a366be 2590 rcu_read_lock();
f785a8f2 2591 memcg = READ_ONCE(stock->cached);
1872b3bc 2592 if (memcg && READ_ONCE(stock->nr_pages) &&
e1a366be
RG
2593 mem_cgroup_is_descendant(memcg, root_memcg))
2594 flush = true;
27fb0956 2595 else if (obj_stock_flush_required(stock, root_memcg))
bf4f0599 2596 flush = true;
e1a366be
RG
2597 rcu_read_unlock();
2598
2599 if (flush &&
2600 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
d1a05b69
MH
2601 if (cpu == curcpu)
2602 drain_local_stock(&stock->work);
6a792697 2603 else if (!cpu_is_isolated(cpu))
d1a05b69
MH
2604 schedule_work_on(cpu, &stock->work);
2605 }
cdec2e42 2606 }
0790ed62 2607 migrate_enable();
9f50fad6 2608 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2609}
2610
2cd21c89
JW
2611static int memcg_hotplug_cpu_dead(unsigned int cpu)
2612{
2613 struct memcg_stock_pcp *stock;
a3d4c05a 2614
2cd21c89
JW
2615 stock = &per_cpu(memcg_stock, cpu);
2616 drain_stock(stock);
a3d4c05a 2617
308167fc 2618 return 0;
cdec2e42
KH
2619}
2620
b3ff9291
CD
2621static unsigned long reclaim_high(struct mem_cgroup *memcg,
2622 unsigned int nr_pages,
2623 gfp_t gfp_mask)
f7e1cb6e 2624{
b3ff9291
CD
2625 unsigned long nr_reclaimed = 0;
2626
f7e1cb6e 2627 do {
e22c6ed9
JW
2628 unsigned long pflags;
2629
d1663a90
JK
2630 if (page_counter_read(&memcg->memory) <=
2631 READ_ONCE(memcg->memory.high))
f7e1cb6e 2632 continue;
e22c6ed9 2633
e27be240 2634 memcg_memory_event(memcg, MEMCG_HIGH);
e22c6ed9
JW
2635
2636 psi_memstall_enter(&pflags);
b3ff9291 2637 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
73b73bac 2638 gfp_mask,
55ab834a 2639 MEMCG_RECLAIM_MAY_SWAP);
e22c6ed9 2640 psi_memstall_leave(&pflags);
4bf17307
CD
2641 } while ((memcg = parent_mem_cgroup(memcg)) &&
2642 !mem_cgroup_is_root(memcg));
b3ff9291
CD
2643
2644 return nr_reclaimed;
f7e1cb6e
JW
2645}
2646
2647static void high_work_func(struct work_struct *work)
2648{
2649 struct mem_cgroup *memcg;
2650
2651 memcg = container_of(work, struct mem_cgroup, high_work);
a983b5eb 2652 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
f7e1cb6e
JW
2653}
2654
0e4b01df
CD
2655/*
2656 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2657 * enough to still cause a significant slowdown in most cases, while still
2658 * allowing diagnostics and tracing to proceed without becoming stuck.
2659 */
2660#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2661
2662/*
2663 * When calculating the delay, we use these either side of the exponentiation to
2664 * maintain precision and scale to a reasonable number of jiffies (see the table
2665 * below.
2666 *
2667 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2668 * overage ratio to a delay.
ac5ddd0f 2669 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
0e4b01df
CD
2670 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2671 * to produce a reasonable delay curve.
2672 *
2673 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2674 * reasonable delay curve compared to precision-adjusted overage, not
2675 * penalising heavily at first, but still making sure that growth beyond the
2676 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2677 * example, with a high of 100 megabytes:
2678 *
2679 * +-------+------------------------+
2680 * | usage | time to allocate in ms |
2681 * +-------+------------------------+
2682 * | 100M | 0 |
2683 * | 101M | 6 |
2684 * | 102M | 25 |
2685 * | 103M | 57 |
2686 * | 104M | 102 |
2687 * | 105M | 159 |
2688 * | 106M | 230 |
2689 * | 107M | 313 |
2690 * | 108M | 409 |
2691 * | 109M | 518 |
2692 * | 110M | 639 |
2693 * | 111M | 774 |
2694 * | 112M | 921 |
2695 * | 113M | 1081 |
2696 * | 114M | 1254 |
2697 * | 115M | 1439 |
2698 * | 116M | 1638 |
2699 * | 117M | 1849 |
2700 * | 118M | 2000 |
2701 * | 119M | 2000 |
2702 * | 120M | 2000 |
2703 * +-------+------------------------+
2704 */
2705 #define MEMCG_DELAY_PRECISION_SHIFT 20
2706 #define MEMCG_DELAY_SCALING_SHIFT 14
2707
8a5dbc65 2708static u64 calculate_overage(unsigned long usage, unsigned long high)
b23afb93 2709{
8a5dbc65 2710 u64 overage;
b23afb93 2711
8a5dbc65
JK
2712 if (usage <= high)
2713 return 0;
e26733e0 2714
8a5dbc65
JK
2715 /*
2716 * Prevent division by 0 in overage calculation by acting as if
2717 * it was a threshold of 1 page
2718 */
2719 high = max(high, 1UL);
9b8b1754 2720
8a5dbc65
JK
2721 overage = usage - high;
2722 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2723 return div64_u64(overage, high);
2724}
e26733e0 2725
8a5dbc65
JK
2726static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2727{
2728 u64 overage, max_overage = 0;
e26733e0 2729
8a5dbc65
JK
2730 do {
2731 overage = calculate_overage(page_counter_read(&memcg->memory),
d1663a90 2732 READ_ONCE(memcg->memory.high));
8a5dbc65 2733 max_overage = max(overage, max_overage);
e26733e0
CD
2734 } while ((memcg = parent_mem_cgroup(memcg)) &&
2735 !mem_cgroup_is_root(memcg));
2736
8a5dbc65
JK
2737 return max_overage;
2738}
2739
4b82ab4f
JK
2740static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2741{
2742 u64 overage, max_overage = 0;
2743
2744 do {
2745 overage = calculate_overage(page_counter_read(&memcg->swap),
2746 READ_ONCE(memcg->swap.high));
2747 if (overage)
2748 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2749 max_overage = max(overage, max_overage);
2750 } while ((memcg = parent_mem_cgroup(memcg)) &&
2751 !mem_cgroup_is_root(memcg));
2752
2753 return max_overage;
2754}
2755
8a5dbc65
JK
2756/*
2757 * Get the number of jiffies that we should penalise a mischievous cgroup which
2758 * is exceeding its memory.high by checking both it and its ancestors.
2759 */
2760static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2761 unsigned int nr_pages,
2762 u64 max_overage)
2763{
2764 unsigned long penalty_jiffies;
2765
e26733e0
CD
2766 if (!max_overage)
2767 return 0;
0e4b01df
CD
2768
2769 /*
0e4b01df
CD
2770 * We use overage compared to memory.high to calculate the number of
2771 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2772 * fairly lenient on small overages, and increasingly harsh when the
2773 * memcg in question makes it clear that it has no intention of stopping
2774 * its crazy behaviour, so we exponentially increase the delay based on
2775 * overage amount.
2776 */
e26733e0
CD
2777 penalty_jiffies = max_overage * max_overage * HZ;
2778 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2779 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
0e4b01df
CD
2780
2781 /*
2782 * Factor in the task's own contribution to the overage, such that four
2783 * N-sized allocations are throttled approximately the same as one
2784 * 4N-sized allocation.
2785 *
2786 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2787 * larger the current charge patch is than that.
2788 */
ff144e69 2789 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
e26733e0
CD
2790}
2791
2792/*
63fd3270
JW
2793 * Reclaims memory over the high limit. Called directly from
2794 * try_charge() (context permitting), as well as from the userland
2795 * return path where reclaim is always able to block.
e26733e0 2796 */
9ea9cb00 2797void mem_cgroup_handle_over_high(gfp_t gfp_mask)
e26733e0
CD
2798{
2799 unsigned long penalty_jiffies;
2800 unsigned long pflags;
b3ff9291 2801 unsigned long nr_reclaimed;
e26733e0 2802 unsigned int nr_pages = current->memcg_nr_pages_over_high;
d977aa93 2803 int nr_retries = MAX_RECLAIM_RETRIES;
e26733e0 2804 struct mem_cgroup *memcg;
b3ff9291 2805 bool in_retry = false;
e26733e0
CD
2806
2807 if (likely(!nr_pages))
2808 return;
2809
2810 memcg = get_mem_cgroup_from_mm(current->mm);
e26733e0
CD
2811 current->memcg_nr_pages_over_high = 0;
2812
b3ff9291 2813retry_reclaim:
63fd3270
JW
2814 /*
2815 * Bail if the task is already exiting. Unlike memory.max,
2816 * memory.high enforcement isn't as strict, and there is no
2817 * OOM killer involved, which means the excess could already
2818 * be much bigger (and still growing) than it could for
2819 * memory.max; the dying task could get stuck in fruitless
2820 * reclaim for a long time, which isn't desirable.
2821 */
2822 if (task_is_dying())
2823 goto out;
2824
b3ff9291
CD
2825 /*
2826 * The allocating task should reclaim at least the batch size, but for
2827 * subsequent retries we only want to do what's necessary to prevent oom
2828 * or breaching resource isolation.
2829 *
2830 * This is distinct from memory.max or page allocator behaviour because
2831 * memory.high is currently batched, whereas memory.max and the page
2832 * allocator run every time an allocation is made.
2833 */
2834 nr_reclaimed = reclaim_high(memcg,
2835 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
9ea9cb00 2836 gfp_mask);
b3ff9291 2837
e26733e0
CD
2838 /*
2839 * memory.high is breached and reclaim is unable to keep up. Throttle
2840 * allocators proactively to slow down excessive growth.
2841 */
8a5dbc65
JK
2842 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2843 mem_find_max_overage(memcg));
0e4b01df 2844
4b82ab4f
JK
2845 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2846 swap_find_max_overage(memcg));
2847
ff144e69
JK
2848 /*
2849 * Clamp the max delay per usermode return so as to still keep the
2850 * application moving forwards and also permit diagnostics, albeit
2851 * extremely slowly.
2852 */
2853 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2854
0e4b01df
CD
2855 /*
2856 * Don't sleep if the amount of jiffies this memcg owes us is so low
2857 * that it's not even worth doing, in an attempt to be nice to those who
2858 * go only a small amount over their memory.high value and maybe haven't
2859 * been aggressively reclaimed enough yet.
2860 */
2861 if (penalty_jiffies <= HZ / 100)
2862 goto out;
2863
b3ff9291
CD
2864 /*
2865 * If reclaim is making forward progress but we're still over
2866 * memory.high, we want to encourage that rather than doing allocator
2867 * throttling.
2868 */
2869 if (nr_reclaimed || nr_retries--) {
2870 in_retry = true;
2871 goto retry_reclaim;
2872 }
2873
0e4b01df 2874 /*
63fd3270
JW
2875 * Reclaim didn't manage to push usage below the limit, slow
2876 * this allocating task down.
2877 *
0e4b01df
CD
2878 * If we exit early, we're guaranteed to die (since
2879 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2880 * need to account for any ill-begotten jiffies to pay them off later.
2881 */
2882 psi_memstall_enter(&pflags);
2883 schedule_timeout_killable(penalty_jiffies);
2884 psi_memstall_leave(&pflags);
2885
2886out:
2887 css_put(&memcg->css);
b23afb93
TH
2888}
2889
c5c8b16b
MS
2890static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2891 unsigned int nr_pages)
8a9f3ccd 2892{
a983b5eb 2893 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
d977aa93 2894 int nr_retries = MAX_RECLAIM_RETRIES;
6539cc05 2895 struct mem_cgroup *mem_over_limit;
3e32cb2e 2896 struct page_counter *counter;
6539cc05 2897 unsigned long nr_reclaimed;
a4ebf1b6 2898 bool passed_oom = false;
73b73bac 2899 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
b70a2a21 2900 bool drained = false;
d6e103a7 2901 bool raised_max_event = false;
e22c6ed9 2902 unsigned long pflags;
a636b327 2903
6539cc05 2904retry:
b6b6cc72 2905 if (consume_stock(memcg, nr_pages))
10d53c74 2906 return 0;
8a9f3ccd 2907
7941d214 2908 if (!do_memsw_account() ||
6071ca52
JW
2909 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2910 if (page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 2911 goto done_restock;
7941d214 2912 if (do_memsw_account())
3e32cb2e
JW
2913 page_counter_uncharge(&memcg->memsw, batch);
2914 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 2915 } else {
3e32cb2e 2916 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
73b73bac 2917 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
3fbe7244 2918 }
7a81b88c 2919
6539cc05
JW
2920 if (batch > nr_pages) {
2921 batch = nr_pages;
2922 goto retry;
2923 }
6d61ef40 2924
89a28483
JW
2925 /*
2926 * Prevent unbounded recursion when reclaim operations need to
2927 * allocate memory. This might exceed the limits temporarily,
2928 * but we prefer facilitating memory reclaim and getting back
2929 * under the limit over triggering OOM kills in these cases.
2930 */
2931 if (unlikely(current->flags & PF_MEMALLOC))
2932 goto force;
2933
06b078fc
JW
2934 if (unlikely(task_in_memcg_oom(current)))
2935 goto nomem;
2936
d0164adc 2937 if (!gfpflags_allow_blocking(gfp_mask))
6539cc05 2938 goto nomem;
4b534334 2939
e27be240 2940 memcg_memory_event(mem_over_limit, MEMCG_MAX);
d6e103a7 2941 raised_max_event = true;
241994ed 2942
e22c6ed9 2943 psi_memstall_enter(&pflags);
b70a2a21 2944 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
55ab834a 2945 gfp_mask, reclaim_options);
e22c6ed9 2946 psi_memstall_leave(&pflags);
6539cc05 2947
61e02c74 2948 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 2949 goto retry;
28c34c29 2950
b70a2a21 2951 if (!drained) {
6d3d6aa2 2952 drain_all_stock(mem_over_limit);
b70a2a21
JW
2953 drained = true;
2954 goto retry;
2955 }
2956
28c34c29
JW
2957 if (gfp_mask & __GFP_NORETRY)
2958 goto nomem;
6539cc05
JW
2959 /*
2960 * Even though the limit is exceeded at this point, reclaim
2961 * may have been able to free some pages. Retry the charge
2962 * before killing the task.
2963 *
2964 * Only for regular pages, though: huge pages are rather
2965 * unlikely to succeed so close to the limit, and we fall back
2966 * to regular pages anyway in case of failure.
2967 */
61e02c74 2968 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
2969 goto retry;
2970 /*
2971 * At task move, charge accounts can be doubly counted. So, it's
2972 * better to wait until the end of task_move if something is going on.
2973 */
2974 if (mem_cgroup_wait_acct_move(mem_over_limit))
2975 goto retry;
2976
9b130619
JW
2977 if (nr_retries--)
2978 goto retry;
2979
38d38493 2980 if (gfp_mask & __GFP_RETRY_MAYFAIL)
29ef680a
MH
2981 goto nomem;
2982
a4ebf1b6
VA
2983 /* Avoid endless loop for tasks bypassed by the oom killer */
2984 if (passed_oom && task_is_dying())
2985 goto nomem;
6539cc05 2986
29ef680a
MH
2987 /*
2988 * keep retrying as long as the memcg oom killer is able to make
2989 * a forward progress or bypass the charge if the oom killer
2990 * couldn't make any progress.
2991 */
becdf89d
SB
2992 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2993 get_order(nr_pages * PAGE_SIZE))) {
a4ebf1b6 2994 passed_oom = true;
d977aa93 2995 nr_retries = MAX_RECLAIM_RETRIES;
29ef680a 2996 goto retry;
29ef680a 2997 }
7a81b88c 2998nomem:
1461e8c2
SB
2999 /*
3000 * Memcg doesn't have a dedicated reserve for atomic
3001 * allocations. But like the global atomic pool, we need to
3002 * put the burden of reclaim on regular allocation requests
3003 * and let these go through as privileged allocations.
3004 */
3005 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
3168ecbe 3006 return -ENOMEM;
10d53c74 3007force:
d6e103a7
RG
3008 /*
3009 * If the allocation has to be enforced, don't forget to raise
3010 * a MEMCG_MAX event.
3011 */
3012 if (!raised_max_event)
3013 memcg_memory_event(mem_over_limit, MEMCG_MAX);
3014
10d53c74
TH
3015 /*
3016 * The allocation either can't fail or will lead to more memory
3017 * being freed very soon. Allow memory usage go over the limit
3018 * temporarily by force charging it.
3019 */
3020 page_counter_charge(&memcg->memory, nr_pages);
7941d214 3021 if (do_memsw_account())
10d53c74 3022 page_counter_charge(&memcg->memsw, nr_pages);
10d53c74
TH
3023
3024 return 0;
6539cc05
JW
3025
3026done_restock:
3027 if (batch > nr_pages)
3028 refill_stock(memcg, batch - nr_pages);
b23afb93 3029
241994ed 3030 /*
b23afb93
TH
3031 * If the hierarchy is above the normal consumption range, schedule
3032 * reclaim on returning to userland. We can perform reclaim here
71baba4b 3033 * if __GFP_RECLAIM but let's always punt for simplicity and so that
b23afb93
TH
3034 * GFP_KERNEL can consistently be used during reclaim. @memcg is
3035 * not recorded as it most likely matches current's and won't
3036 * change in the meantime. As high limit is checked again before
3037 * reclaim, the cost of mismatch is negligible.
241994ed
JW
3038 */
3039 do {
4b82ab4f
JK
3040 bool mem_high, swap_high;
3041
3042 mem_high = page_counter_read(&memcg->memory) >
3043 READ_ONCE(memcg->memory.high);
3044 swap_high = page_counter_read(&memcg->swap) >
3045 READ_ONCE(memcg->swap.high);
3046
3047 /* Don't bother a random interrupted task */
086f694a 3048 if (!in_task()) {
4b82ab4f 3049 if (mem_high) {
f7e1cb6e
JW
3050 schedule_work(&memcg->high_work);
3051 break;
3052 }
4b82ab4f
JK
3053 continue;
3054 }
3055
3056 if (mem_high || swap_high) {
3057 /*
3058 * The allocating tasks in this cgroup will need to do
3059 * reclaim or be throttled to prevent further growth
3060 * of the memory or swap footprints.
3061 *
3062 * Target some best-effort fairness between the tasks,
3063 * and distribute reclaim work and delay penalties
3064 * based on how much each task is actually allocating.
3065 */
9516a18a 3066 current->memcg_nr_pages_over_high += batch;
b23afb93
TH
3067 set_notify_resume(current);
3068 break;
3069 }
241994ed 3070 } while ((memcg = parent_mem_cgroup(memcg)));
10d53c74 3071
63fd3270
JW
3072 /*
3073 * Reclaim is set up above to be called from the userland
3074 * return path. But also attempt synchronous reclaim to avoid
3075 * excessive overrun while the task is still inside the
3076 * kernel. If this is successful, the return path will see it
3077 * when it rechecks the overage and simply bail out.
3078 */
c9afe31e
SB
3079 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
3080 !(current->flags & PF_MEMALLOC) &&
63fd3270 3081 gfpflags_allow_blocking(gfp_mask))
9ea9cb00 3082 mem_cgroup_handle_over_high(gfp_mask);
10d53c74 3083 return 0;
7a81b88c 3084}
8a9f3ccd 3085
c5c8b16b
MS
3086static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
3087 unsigned int nr_pages)
3088{
3089 if (mem_cgroup_is_root(memcg))
3090 return 0;
3091
3092 return try_charge_memcg(memcg, gfp_mask, nr_pages);
3093}
3094
4b569387
NP
3095/**
3096 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
3097 * @memcg: memcg previously charged.
3098 * @nr_pages: number of pages previously charged.
3099 */
3100void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 3101{
ce00a967
JW
3102 if (mem_cgroup_is_root(memcg))
3103 return;
3104
3e32cb2e 3105 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 3106 if (do_memsw_account())
3e32cb2e 3107 page_counter_uncharge(&memcg->memsw, nr_pages);
d01dd17f
KH
3108}
3109
118f2875 3110static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
0a31bc97 3111{
118f2875 3112 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
0a31bc97 3113 /*
a5eb011a 3114 * Any of the following ensures page's memcg stability:
0a31bc97 3115 *
a0b5b414
JW
3116 * - the page lock
3117 * - LRU isolation
6c77b607 3118 * - folio_memcg_lock()
a0b5b414 3119 * - exclusive reference
018ee47f 3120 * - mem_cgroup_trylock_pages()
0a31bc97 3121 */
118f2875 3122 folio->memcg_data = (unsigned long)memcg;
7a81b88c 3123}
66e1707b 3124
4b569387
NP
3125/**
3126 * mem_cgroup_commit_charge - commit a previously successful try_charge().
3127 * @folio: folio to commit the charge to.
3128 * @memcg: memcg previously charged.
3129 */
3130void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
3131{
3132 css_get(&memcg->css);
3133 commit_charge(folio, memcg);
3134
3135 local_irq_disable();
3136 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
3137 memcg_check_events(memcg, folio_nid(folio));
3138 local_irq_enable();
3139}
3140
84c07d11 3141#ifdef CONFIG_MEMCG_KMEM
41eb5df1 3142
91882c16
SB
3143static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
3144 struct pglist_data *pgdat,
3145 enum node_stat_item idx, int nr)
a7ebf564
WL
3146{
3147 struct mem_cgroup *memcg;
3148 struct lruvec *lruvec;
3149
91882c16
SB
3150 lockdep_assert_irqs_disabled();
3151
a7ebf564
WL
3152 rcu_read_lock();
3153 memcg = obj_cgroup_memcg(objcg);
3154 lruvec = mem_cgroup_lruvec(memcg, pgdat);
91882c16 3155 __mod_memcg_lruvec_state(lruvec, idx, nr);
a7ebf564
WL
3156 rcu_read_unlock();
3157}
3158
fc4db90f
RG
3159static __always_inline
3160struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
8380ce47 3161{
8380ce47 3162 /*
9855609b
RG
3163 * Slab objects are accounted individually, not per-page.
3164 * Memcg membership data for each individual object is saved in
21c690a3 3165 * slab->obj_exts.
8380ce47 3166 */
4b5f8d9a 3167 if (folio_test_slab(folio)) {
21c690a3 3168 struct slabobj_ext *obj_exts;
4b5f8d9a 3169 struct slab *slab;
9855609b
RG
3170 unsigned int off;
3171
4b5f8d9a 3172 slab = folio_slab(folio);
21c690a3
SB
3173 obj_exts = slab_obj_exts(slab);
3174 if (!obj_exts)
4b5f8d9a
VB
3175 return NULL;
3176
3177 off = obj_to_index(slab->slab_cache, slab, p);
21c690a3
SB
3178 if (obj_exts[off].objcg)
3179 return obj_cgroup_memcg(obj_exts[off].objcg);
10befea9
RG
3180
3181 return NULL;
9855609b 3182 }
8380ce47 3183
bcfe06bf 3184 /*
becacb04 3185 * folio_memcg_check() is used here, because in theory we can encounter
4b5f8d9a 3186 * a folio where the slab flag has been cleared already, but
21c690a3 3187 * slab->obj_exts has not been freed yet
becacb04 3188 * folio_memcg_check() will guarantee that a proper memory
bcfe06bf
RG
3189 * cgroup pointer or NULL will be returned.
3190 */
becacb04 3191 return folio_memcg_check(folio);
8380ce47
RG
3192}
3193
fc4db90f
RG
3194/*
3195 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3196 *
3197 * A passed kernel object can be a slab object, vmalloc object or a generic
3198 * kernel page, so different mechanisms for getting the memory cgroup pointer
3199 * should be used.
3200 *
3201 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3202 * can not know for sure how the kernel object is implemented.
3203 * mem_cgroup_from_obj() can be safely used in such cases.
3204 *
3205 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3206 * cgroup_mutex, etc.
3207 */
3208struct mem_cgroup *mem_cgroup_from_obj(void *p)
3209{
3210 struct folio *folio;
3211
3212 if (mem_cgroup_disabled())
3213 return NULL;
3214
3215 if (unlikely(is_vmalloc_addr(p)))
3216 folio = page_folio(vmalloc_to_page(p));
3217 else
3218 folio = virt_to_folio(p);
3219
3220 return mem_cgroup_from_obj_folio(folio, p);
3221}
3222
3223/*
3224 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3225 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3226 * allocated using vmalloc().
3227 *
3228 * A passed kernel object must be a slab object or a generic kernel page.
3229 *
3230 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3231 * cgroup_mutex, etc.
3232 */
3233struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3234{
3235 if (mem_cgroup_disabled())
3236 return NULL;
3237
3238 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3239}
3240
f4840ccf
JW
3241static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3242{
3243 struct obj_cgroup *objcg = NULL;
3244
7848ed62 3245 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
f4840ccf 3246 objcg = rcu_dereference(memcg->objcg);
7d0715d0 3247 if (likely(objcg && obj_cgroup_tryget(objcg)))
f4840ccf
JW
3248 break;
3249 objcg = NULL;
3250 }
3251 return objcg;
3252}
3253
1aacbd35
RG
3254static struct obj_cgroup *current_objcg_update(void)
3255{
3256 struct mem_cgroup *memcg;
3257 struct obj_cgroup *old, *objcg = NULL;
3258
3259 do {
3260 /* Atomically drop the update bit. */
3261 old = xchg(&current->objcg, NULL);
3262 if (old) {
3263 old = (struct obj_cgroup *)
3264 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
91b71e78 3265 obj_cgroup_put(old);
1aacbd35
RG
3266
3267 old = NULL;
3268 }
3269
3270 /* If new objcg is NULL, no reason for the second atomic update. */
3271 if (!current->mm || (current->flags & PF_KTHREAD))
3272 return NULL;
3273
3274 /*
3275 * Release the objcg pointer from the previous iteration,
3276 * if try_cmpxcg() below fails.
3277 */
3278 if (unlikely(objcg)) {
3279 obj_cgroup_put(objcg);
3280 objcg = NULL;
3281 }
3282
3283 /*
3284 * Obtain the new objcg pointer. The current task can be
3285 * asynchronously moved to another memcg and the previous
3286 * memcg can be offlined. So let's get the memcg pointer
3287 * and try get a reference to objcg under a rcu read lock.
3288 */
3289
3290 rcu_read_lock();
3291 memcg = mem_cgroup_from_task(current);
3292 objcg = __get_obj_cgroup_from_memcg(memcg);
3293 rcu_read_unlock();
3294
3295 /*
3296 * Try set up a new objcg pointer atomically. If it
3297 * fails, it means the update flag was set concurrently, so
3298 * the whole procedure should be repeated.
3299 */
3300 } while (!try_cmpxchg(&current->objcg, &old, objcg));
3301
3302 return objcg;
3303}
3304
e86828e5
RG
3305__always_inline struct obj_cgroup *current_obj_cgroup(void)
3306{
3307 struct mem_cgroup *memcg;
3308 struct obj_cgroup *objcg;
3309
3310 if (in_task()) {
3311 memcg = current->active_memcg;
3312 if (unlikely(memcg))
3313 goto from_memcg;
3314
3315 objcg = READ_ONCE(current->objcg);
3316 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3317 objcg = current_objcg_update();
3318 /*
3319 * Objcg reference is kept by the task, so it's safe
3320 * to use the objcg by the current task.
3321 */
3322 return objcg;
3323 }
3324
3325 memcg = this_cpu_read(int_active_memcg);
3326 if (unlikely(memcg))
3327 goto from_memcg;
3328
3329 return NULL;
3330
3331from_memcg:
5f79489a 3332 objcg = NULL;
e86828e5
RG
3333 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3334 /*
3335 * Memcg pointer is protected by scope (see set_active_memcg())
3336 * and is pinning the corresponding objcg, so objcg can't go
3337 * away and can be used within the scope without any additional
3338 * protection.
3339 */
3340 objcg = rcu_dereference_check(memcg->objcg, 1);
3341 if (likely(objcg))
3342 break;
e86828e5
RG
3343 }
3344
3345 return objcg;
3346}
3347
074e3e26 3348struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
f4840ccf
JW
3349{
3350 struct obj_cgroup *objcg;
3351
f7a449f7 3352 if (!memcg_kmem_online())
f4840ccf
JW
3353 return NULL;
3354
074e3e26
MWO
3355 if (folio_memcg_kmem(folio)) {
3356 objcg = __folio_objcg(folio);
f4840ccf
JW
3357 obj_cgroup_get(objcg);
3358 } else {
3359 struct mem_cgroup *memcg;
bf4f0599 3360
f4840ccf 3361 rcu_read_lock();
074e3e26 3362 memcg = __folio_memcg(folio);
f4840ccf
JW
3363 if (memcg)
3364 objcg = __get_obj_cgroup_from_memcg(memcg);
3365 else
3366 objcg = NULL;
3367 rcu_read_unlock();
3368 }
bf4f0599
RG
3369 return objcg;
3370}
3371
a8c49af3
YA
3372static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3373{
3374 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3375 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3376 if (nr_pages > 0)
3377 page_counter_charge(&memcg->kmem, nr_pages);
3378 else
3379 page_counter_uncharge(&memcg->kmem, -nr_pages);
3380 }
3381}
3382
3383
f1286fae
MS
3384/*
3385 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3386 * @objcg: object cgroup to uncharge
3387 * @nr_pages: number of pages to uncharge
3388 */
e74d2259
MS
3389static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3390 unsigned int nr_pages)
3391{
3392 struct mem_cgroup *memcg;
3393
3394 memcg = get_mem_cgroup_from_objcg(objcg);
e74d2259 3395
a8c49af3 3396 memcg_account_kmem(memcg, -nr_pages);
f1286fae 3397 refill_stock(memcg, nr_pages);
e74d2259 3398
e74d2259 3399 css_put(&memcg->css);
e74d2259
MS
3400}
3401
f1286fae
MS
3402/*
3403 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3404 * @objcg: object cgroup to charge
45264778 3405 * @gfp: reclaim mode
92d0510c 3406 * @nr_pages: number of pages to charge
45264778
VD
3407 *
3408 * Returns 0 on success, an error code on failure.
3409 */
f1286fae
MS
3410static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3411 unsigned int nr_pages)
7ae1e1d0 3412{
f1286fae 3413 struct mem_cgroup *memcg;
7ae1e1d0
GC
3414 int ret;
3415
f1286fae
MS
3416 memcg = get_mem_cgroup_from_objcg(objcg);
3417
c5c8b16b 3418 ret = try_charge_memcg(memcg, gfp, nr_pages);
52c29b04 3419 if (ret)
f1286fae 3420 goto out;
52c29b04 3421
a8c49af3 3422 memcg_account_kmem(memcg, nr_pages);
f1286fae
MS
3423out:
3424 css_put(&memcg->css);
4b13f64d 3425
f1286fae 3426 return ret;
4b13f64d
RG
3427}
3428
45264778 3429/**
f4b00eab 3430 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
45264778
VD
3431 * @page: page to charge
3432 * @gfp: reclaim mode
3433 * @order: allocation order
3434 *
3435 * Returns 0 on success, an error code on failure.
3436 */
f4b00eab 3437int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
7ae1e1d0 3438{
b4e0b68f 3439 struct obj_cgroup *objcg;
fcff7d7e 3440 int ret = 0;
7ae1e1d0 3441
e86828e5 3442 objcg = current_obj_cgroup();
b4e0b68f
MS
3443 if (objcg) {
3444 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
4d96ba35 3445 if (!ret) {
e86828e5 3446 obj_cgroup_get(objcg);
b4e0b68f 3447 page->memcg_data = (unsigned long)objcg |
18b2db3b 3448 MEMCG_DATA_KMEM;
1a3e1f40 3449 return 0;
4d96ba35 3450 }
c4159a75 3451 }
d05e83a6 3452 return ret;
7ae1e1d0 3453}
49a18eae 3454
45264778 3455/**
f4b00eab 3456 * __memcg_kmem_uncharge_page: uncharge a kmem page
45264778
VD
3457 * @page: page to uncharge
3458 * @order: allocation order
3459 */
f4b00eab 3460void __memcg_kmem_uncharge_page(struct page *page, int order)
7ae1e1d0 3461{
1b7e4464 3462 struct folio *folio = page_folio(page);
b4e0b68f 3463 struct obj_cgroup *objcg;
f3ccb2c4 3464 unsigned int nr_pages = 1 << order;
7ae1e1d0 3465
1b7e4464 3466 if (!folio_memcg_kmem(folio))
7ae1e1d0
GC
3467 return;
3468
1b7e4464 3469 objcg = __folio_objcg(folio);
b4e0b68f 3470 obj_cgroup_uncharge_pages(objcg, nr_pages);
1b7e4464 3471 folio->memcg_data = 0;
b4e0b68f 3472 obj_cgroup_put(objcg);
60d3fd32 3473}
bf4f0599 3474
91882c16 3475static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
68ac5b3c
WL
3476 enum node_stat_item idx, int nr)
3477{
fead2b86 3478 struct memcg_stock_pcp *stock;
56751146 3479 struct obj_cgroup *old = NULL;
68ac5b3c
WL
3480 unsigned long flags;
3481 int *bytes;
3482
56751146 3483 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3484 stock = this_cpu_ptr(&memcg_stock);
3485
68ac5b3c
WL
3486 /*
3487 * Save vmstat data in stock and skip vmstat array update unless
3488 * accumulating over a page of vmstat data or when pgdat or idx
3489 * changes.
3490 */
3b8abb32 3491 if (READ_ONCE(stock->cached_objcg) != objcg) {
56751146 3492 old = drain_obj_stock(stock);
68ac5b3c
WL
3493 obj_cgroup_get(objcg);
3494 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3495 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3b8abb32 3496 WRITE_ONCE(stock->cached_objcg, objcg);
68ac5b3c
WL
3497 stock->cached_pgdat = pgdat;
3498 } else if (stock->cached_pgdat != pgdat) {
3499 /* Flush the existing cached vmstat data */
7fa0dacb
WL
3500 struct pglist_data *oldpg = stock->cached_pgdat;
3501
68ac5b3c 3502 if (stock->nr_slab_reclaimable_b) {
91882c16 3503 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
68ac5b3c
WL
3504 stock->nr_slab_reclaimable_b);
3505 stock->nr_slab_reclaimable_b = 0;
3506 }
3507 if (stock->nr_slab_unreclaimable_b) {
91882c16 3508 __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
68ac5b3c
WL
3509 stock->nr_slab_unreclaimable_b);
3510 stock->nr_slab_unreclaimable_b = 0;
3511 }
3512 stock->cached_pgdat = pgdat;
3513 }
3514
3515 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3516 : &stock->nr_slab_unreclaimable_b;
3517 /*
3518 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3519 * cached locally at least once before pushing it out.
3520 */
3521 if (!*bytes) {
3522 *bytes = nr;
3523 nr = 0;
3524 } else {
3525 *bytes += nr;
3526 if (abs(*bytes) > PAGE_SIZE) {
3527 nr = *bytes;
3528 *bytes = 0;
3529 } else {
3530 nr = 0;
3531 }
3532 }
3533 if (nr)
91882c16 3534 __mod_objcg_mlstate(objcg, pgdat, idx, nr);
68ac5b3c 3535
56751146 3536 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
91b71e78 3537 obj_cgroup_put(old);
68ac5b3c
WL
3538}
3539
bf4f0599
RG
3540static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3541{
fead2b86 3542 struct memcg_stock_pcp *stock;
bf4f0599
RG
3543 unsigned long flags;
3544 bool ret = false;
3545
56751146 3546 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3547
3548 stock = this_cpu_ptr(&memcg_stock);
3b8abb32 3549 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
bf4f0599
RG
3550 stock->nr_bytes -= nr_bytes;
3551 ret = true;
3552 }
3553
56751146 3554 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
bf4f0599
RG
3555
3556 return ret;
3557}
3558
56751146 3559static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599 3560{
3b8abb32 3561 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
bf4f0599
RG
3562
3563 if (!old)
56751146 3564 return NULL;
bf4f0599
RG
3565
3566 if (stock->nr_bytes) {
3567 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3568 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3569
af9a3b69
JW
3570 if (nr_pages) {
3571 struct mem_cgroup *memcg;
3572
3573 memcg = get_mem_cgroup_from_objcg(old);
3574
3575 memcg_account_kmem(memcg, -nr_pages);
3576 __refill_stock(memcg, nr_pages);
3577
3578 css_put(&memcg->css);
3579 }
bf4f0599
RG
3580
3581 /*
3582 * The leftover is flushed to the centralized per-memcg value.
3583 * On the next attempt to refill obj stock it will be moved
3584 * to a per-cpu stock (probably, on an other CPU), see
3585 * refill_obj_stock().
3586 *
3587 * How often it's flushed is a trade-off between the memory
3588 * limit enforcement accuracy and potential CPU contention,
3589 * so it might be changed in the future.
3590 */
3591 atomic_add(nr_bytes, &old->nr_charged_bytes);
3592 stock->nr_bytes = 0;
3593 }
3594
68ac5b3c
WL
3595 /*
3596 * Flush the vmstat data in current stock
3597 */
3598 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3599 if (stock->nr_slab_reclaimable_b) {
91882c16 3600 __mod_objcg_mlstate(old, stock->cached_pgdat,
68ac5b3c
WL
3601 NR_SLAB_RECLAIMABLE_B,
3602 stock->nr_slab_reclaimable_b);
3603 stock->nr_slab_reclaimable_b = 0;
3604 }
3605 if (stock->nr_slab_unreclaimable_b) {
91882c16 3606 __mod_objcg_mlstate(old, stock->cached_pgdat,
68ac5b3c
WL
3607 NR_SLAB_UNRECLAIMABLE_B,
3608 stock->nr_slab_unreclaimable_b);
3609 stock->nr_slab_unreclaimable_b = 0;
3610 }
3611 stock->cached_pgdat = NULL;
3612 }
3613
3b8abb32 3614 WRITE_ONCE(stock->cached_objcg, NULL);
56751146
SAS
3615 /*
3616 * The `old' objects needs to be released by the caller via
3617 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3618 */
3619 return old;
bf4f0599
RG
3620}
3621
3622static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3623 struct mem_cgroup *root_memcg)
3624{
3b8abb32 3625 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
bf4f0599
RG
3626 struct mem_cgroup *memcg;
3627
3b8abb32
RG
3628 if (objcg) {
3629 memcg = obj_cgroup_memcg(objcg);
bf4f0599
RG
3630 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3631 return true;
3632 }
3633
3634 return false;
3635}
3636
5387c904
WL
3637static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3638 bool allow_uncharge)
bf4f0599 3639{
fead2b86 3640 struct memcg_stock_pcp *stock;
56751146 3641 struct obj_cgroup *old = NULL;
bf4f0599 3642 unsigned long flags;
5387c904 3643 unsigned int nr_pages = 0;
bf4f0599 3644
56751146 3645 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3646
3647 stock = this_cpu_ptr(&memcg_stock);
3b8abb32 3648 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
56751146 3649 old = drain_obj_stock(stock);
bf4f0599 3650 obj_cgroup_get(objcg);
3b8abb32 3651 WRITE_ONCE(stock->cached_objcg, objcg);
5387c904
WL
3652 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3653 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3654 allow_uncharge = true; /* Allow uncharge when objcg changes */
bf4f0599
RG
3655 }
3656 stock->nr_bytes += nr_bytes;
3657
5387c904
WL
3658 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3659 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3660 stock->nr_bytes &= (PAGE_SIZE - 1);
3661 }
bf4f0599 3662
56751146 3663 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
91b71e78 3664 obj_cgroup_put(old);
5387c904
WL
3665
3666 if (nr_pages)
3667 obj_cgroup_uncharge_pages(objcg, nr_pages);
bf4f0599
RG
3668}
3669
3670int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3671{
bf4f0599
RG
3672 unsigned int nr_pages, nr_bytes;
3673 int ret;
3674
3675 if (consume_obj_stock(objcg, size))
3676 return 0;
3677
3678 /*
5387c904 3679 * In theory, objcg->nr_charged_bytes can have enough
bf4f0599 3680 * pre-charged bytes to satisfy the allocation. However,
5387c904
WL
3681 * flushing objcg->nr_charged_bytes requires two atomic
3682 * operations, and objcg->nr_charged_bytes can't be big.
3683 * The shared objcg->nr_charged_bytes can also become a
3684 * performance bottleneck if all tasks of the same memcg are
3685 * trying to update it. So it's better to ignore it and try
3686 * grab some new pages. The stock's nr_bytes will be flushed to
3687 * objcg->nr_charged_bytes later on when objcg changes.
3688 *
3689 * The stock's nr_bytes may contain enough pre-charged bytes
3690 * to allow one less page from being charged, but we can't rely
3691 * on the pre-charged bytes not being changed outside of
3692 * consume_obj_stock() or refill_obj_stock(). So ignore those
3693 * pre-charged bytes as well when charging pages. To avoid a
3694 * page uncharge right after a page charge, we set the
3695 * allow_uncharge flag to false when calling refill_obj_stock()
3696 * to temporarily allow the pre-charged bytes to exceed the page
3697 * size limit. The maximum reachable value of the pre-charged
3698 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3699 * race.
bf4f0599 3700 */
bf4f0599
RG
3701 nr_pages = size >> PAGE_SHIFT;
3702 nr_bytes = size & (PAGE_SIZE - 1);
3703
3704 if (nr_bytes)
3705 nr_pages += 1;
3706
e74d2259 3707 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
bf4f0599 3708 if (!ret && nr_bytes)
5387c904 3709 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
bf4f0599 3710
bf4f0599
RG
3711 return ret;
3712}
3713
3714void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3715{
5387c904 3716 refill_obj_stock(objcg, size, true);
bf4f0599
RG
3717}
3718
e6100a45
VB
3719static inline size_t obj_full_size(struct kmem_cache *s)
3720{
3721 /*
3722 * For each accounted object there is an extra space which is used
3723 * to store obj_cgroup membership. Charge it too.
3724 */
3725 return s->size + sizeof(struct obj_cgroup *);
3726}
3727
3728bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
3729 gfp_t flags, size_t size, void **p)
3730{
3731 struct obj_cgroup *objcg;
3732 struct slab *slab;
3733 unsigned long off;
3734 size_t i;
3735
3736 /*
3737 * The obtained objcg pointer is safe to use within the current scope,
3738 * defined by current task or set_active_memcg() pair.
3739 * obj_cgroup_get() is used to get a permanent reference.
3740 */
3741 objcg = current_obj_cgroup();
3742 if (!objcg)
3743 return true;
3744
3745 /*
3746 * slab_alloc_node() avoids the NULL check, so we might be called with a
3747 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
3748 * the whole requested size.
3749 * return success as there's nothing to free back
3750 */
3751 if (unlikely(*p == NULL))
3752 return true;
3753
3754 flags &= gfp_allowed_mask;
3755
3756 if (lru) {
3757 int ret;
3758 struct mem_cgroup *memcg;
3759
3760 memcg = get_mem_cgroup_from_objcg(objcg);
3761 ret = memcg_list_lru_alloc(memcg, lru, flags);
3762 css_put(&memcg->css);
3763
3764 if (ret)
3765 return false;
3766 }
3767
3768 if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
3769 return false;
3770
3771 for (i = 0; i < size; i++) {
3772 slab = virt_to_slab(p[i]);
3773
3774 if (!slab_obj_exts(slab) &&
3775 alloc_slab_obj_exts(slab, s, flags, false)) {
3776 obj_cgroup_uncharge(objcg, obj_full_size(s));
3777 continue;
3778 }
3779
3780 off = obj_to_index(s, slab, p[i]);
3781 obj_cgroup_get(objcg);
3782 slab_obj_exts(slab)[off].objcg = objcg;
3783 mod_objcg_state(objcg, slab_pgdat(slab),
3784 cache_vmstat_idx(s), obj_full_size(s));
3785 }
3786
3787 return true;
3788}
3789
3790void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3791 void **p, int objects, struct slabobj_ext *obj_exts)
3792{
3793 for (int i = 0; i < objects; i++) {
3794 struct obj_cgroup *objcg;
3795 unsigned int off;
3796
3797 off = obj_to_index(s, slab, p[i]);
3798 objcg = obj_exts[off].objcg;
3799 if (!objcg)
3800 continue;
3801
3802 obj_exts[off].objcg = NULL;
3803 obj_cgroup_uncharge(objcg, obj_full_size(s));
3804 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3805 -obj_full_size(s));
3806 obj_cgroup_put(objcg);
3807 }
3808}
84c07d11 3809#endif /* CONFIG_MEMCG_KMEM */
7ae1e1d0 3810
ca3e0214 3811/*
be6c8982 3812 * Because page_memcg(head) is not set on tails, set it now.
ca3e0214 3813 */
b8791381 3814void split_page_memcg(struct page *head, int old_order, int new_order)
ca3e0214 3815{
1b7e4464
MWO
3816 struct folio *folio = page_folio(head);
3817 struct mem_cgroup *memcg = folio_memcg(folio);
e94c8a9c 3818 int i;
b8791381
ZY
3819 unsigned int old_nr = 1 << old_order;
3820 unsigned int new_nr = 1 << new_order;
ca3e0214 3821
be6c8982 3822 if (mem_cgroup_disabled() || !memcg)
3d37c4a9 3823 return;
b070e65c 3824
b8791381 3825 for (i = new_nr; i < old_nr; i += new_nr)
1b7e4464 3826 folio_page(folio, i)->memcg_data = folio->memcg_data;
b4e0b68f 3827
1b7e4464 3828 if (folio_memcg_kmem(folio))
b8791381 3829 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
b4e0b68f 3830 else
b8791381 3831 css_get_many(&memcg->css, old_nr / new_nr - 1);
ca3e0214 3832}
ca3e0214 3833
e55b9f96 3834#ifdef CONFIG_SWAP
02491447
DN
3835/**
3836 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3837 * @entry: swap entry to be moved
3838 * @from: mem_cgroup which the entry is moved from
3839 * @to: mem_cgroup which the entry is moved to
3840 *
3841 * It succeeds only when the swap_cgroup's record for this entry is the same
3842 * as the mem_cgroup's id of @from.
3843 *
3844 * Returns 0 on success, -EINVAL on failure.
3845 *
3e32cb2e 3846 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
3847 * both res and memsw, and called css_get().
3848 */
3849static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3850 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3851{
3852 unsigned short old_id, new_id;
3853
34c00c31
LZ
3854 old_id = mem_cgroup_id(from);
3855 new_id = mem_cgroup_id(to);
02491447
DN
3856
3857 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
c9019e9b
JW
3858 mod_memcg_state(from, MEMCG_SWAP, -1);
3859 mod_memcg_state(to, MEMCG_SWAP, 1);
02491447
DN
3860 return 0;
3861 }
3862 return -EINVAL;
3863}
3864#else
3865static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3866 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3867{
3868 return -EINVAL;
3869}
8c7c6e34 3870#endif
d13d1443 3871
bbec2e15 3872static DEFINE_MUTEX(memcg_max_mutex);
f212ad7c 3873
bbec2e15
RG
3874static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3875 unsigned long max, bool memsw)
628f4235 3876{
3e32cb2e 3877 bool enlarge = false;
bb4a7ea2 3878 bool drained = false;
3e32cb2e 3879 int ret;
c054a78c
YZ
3880 bool limits_invariant;
3881 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
81d39c20 3882
3e32cb2e 3883 do {
628f4235
KH
3884 if (signal_pending(current)) {
3885 ret = -EINTR;
3886 break;
3887 }
3e32cb2e 3888
bbec2e15 3889 mutex_lock(&memcg_max_mutex);
c054a78c
YZ
3890 /*
3891 * Make sure that the new limit (memsw or memory limit) doesn't
bbec2e15 3892 * break our basic invariant rule memory.max <= memsw.max.
c054a78c 3893 */
15b42562 3894 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
bbec2e15 3895 max <= memcg->memsw.max;
c054a78c 3896 if (!limits_invariant) {
bbec2e15 3897 mutex_unlock(&memcg_max_mutex);
8c7c6e34 3898 ret = -EINVAL;
8c7c6e34
KH
3899 break;
3900 }
bbec2e15 3901 if (max > counter->max)
3e32cb2e 3902 enlarge = true;
bbec2e15
RG
3903 ret = page_counter_set_max(counter, max);
3904 mutex_unlock(&memcg_max_mutex);
8c7c6e34
KH
3905
3906 if (!ret)
3907 break;
3908
bb4a7ea2
SB
3909 if (!drained) {
3910 drain_all_stock(memcg);
3911 drained = true;
3912 continue;
3913 }
3914
73b73bac 3915 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 3916 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
1ab5c056
AR
3917 ret = -EBUSY;
3918 break;
3919 }
3920 } while (true);
3e32cb2e 3921
3c11ecf4
KH
3922 if (!ret && enlarge)
3923 memcg_oom_recover(memcg);
3e32cb2e 3924
628f4235
KH
3925 return ret;
3926}
3927
ef8f2327 3928unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
3929 gfp_t gfp_mask,
3930 unsigned long *total_scanned)
3931{
3932 unsigned long nr_reclaimed = 0;
ef8f2327 3933 struct mem_cgroup_per_node *mz, *next_mz = NULL;
0608f43d
AM
3934 unsigned long reclaimed;
3935 int loop = 0;
ef8f2327 3936 struct mem_cgroup_tree_per_node *mctz;
3e32cb2e 3937 unsigned long excess;
0608f43d 3938
e4dde56c
YZ
3939 if (lru_gen_enabled())
3940 return 0;
3941
0608f43d
AM
3942 if (order > 0)
3943 return 0;
3944
2ab082ba 3945 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
d6507ff5
MH
3946
3947 /*
3948 * Do not even bother to check the largest node if the root
3949 * is empty. Do it lockless to prevent lock bouncing. Races
3950 * are acceptable as soft limit is best effort anyway.
3951 */
bfc7228b 3952 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
d6507ff5
MH
3953 return 0;
3954
0608f43d
AM
3955 /*
3956 * This loop can run a while, specially if mem_cgroup's continuously
3957 * keep exceeding their soft limit and putting the system under
3958 * pressure
3959 */
3960 do {
3961 if (next_mz)
3962 mz = next_mz;
3963 else
3964 mz = mem_cgroup_largest_soft_limit_node(mctz);
3965 if (!mz)
3966 break;
3967
ef8f2327 3968 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
d8f65338 3969 gfp_mask, total_scanned);
0608f43d 3970 nr_reclaimed += reclaimed;
0a31bc97 3971 spin_lock_irq(&mctz->lock);
0608f43d
AM
3972
3973 /*
3974 * If we failed to reclaim anything from this memory cgroup
3975 * it is time to move on to the next cgroup
3976 */
3977 next_mz = NULL;
bc2f2e7f
VD
3978 if (!reclaimed)
3979 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3980
3e32cb2e 3981 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
3982 /*
3983 * One school of thought says that we should not add
3984 * back the node to the tree if reclaim returns 0.
3985 * But our reclaim could return 0, simply because due
3986 * to priority we are exposing a smaller subset of
3987 * memory to reclaim from. Consider this as a longer
3988 * term TODO.
3989 */
3990 /* If excess == 0, no tree ops */
cf2c8127 3991 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 3992 spin_unlock_irq(&mctz->lock);
0608f43d
AM
3993 css_put(&mz->memcg->css);
3994 loop++;
3995 /*
3996 * Could not reclaim anything and there are no more
3997 * mem cgroups to try or we seem to be looping without
3998 * reclaiming anything.
3999 */
4000 if (!nr_reclaimed &&
4001 (next_mz == NULL ||
4002 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4003 break;
4004 } while (!nr_reclaimed);
4005 if (next_mz)
4006 css_put(&next_mz->memcg->css);
4007 return nr_reclaimed;
4008}
4009
c26251f9 4010/*
51038171 4011 * Reclaims as many pages from the given memcg as possible.
c26251f9
MH
4012 *
4013 * Caller is responsible for holding css reference for memcg.
4014 */
4015static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4016{
d977aa93 4017 int nr_retries = MAX_RECLAIM_RETRIES;
c26251f9 4018
c1e862c1
KH
4019 /* we call try-to-free pages for make this cgroup empty */
4020 lru_add_drain_all();
d12c60f6
JS
4021
4022 drain_all_stock(memcg);
4023
f817ed48 4024 /* try to free all pages in this cgroup */
3e32cb2e 4025 while (nr_retries && page_counter_read(&memcg->memory)) {
c26251f9
MH
4026 if (signal_pending(current))
4027 return -EINTR;
4028
73b73bac 4029 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 4030 MEMCG_RECLAIM_MAY_SWAP))
f817ed48 4031 nr_retries--;
f817ed48 4032 }
ab5196c2
MH
4033
4034 return 0;
cc847582
KH
4035}
4036
6770c64e
TH
4037static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
4038 char *buf, size_t nbytes,
4039 loff_t off)
c1e862c1 4040{
6770c64e 4041 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 4042
d8423011
MH
4043 if (mem_cgroup_is_root(memcg))
4044 return -EINVAL;
6770c64e 4045 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
4046}
4047
182446d0
TH
4048static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4049 struct cftype *cft)
18f59ea7 4050{
bef8620c 4051 return 1;
18f59ea7
BS
4052}
4053
182446d0
TH
4054static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4055 struct cftype *cft, u64 val)
18f59ea7 4056{
bef8620c 4057 if (val == 1)
0b8f73e1 4058 return 0;
567fb435 4059
bef8620c
RG
4060 pr_warn_once("Non-hierarchical mode is deprecated. "
4061 "Please report your usecase to linux-mm@kvack.org if you "
4062 "depend on this functionality.\n");
567fb435 4063
bef8620c 4064 return -EINVAL;
18f59ea7
BS
4065}
4066
6f646156 4067static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
ce00a967 4068{
42a30035 4069 unsigned long val;
ce00a967 4070
3e32cb2e 4071 if (mem_cgroup_is_root(memcg)) {
a2174e95 4072 /*
f82a7a86
YA
4073 * Approximate root's usage from global state. This isn't
4074 * perfect, but the root usage was always an approximation.
a2174e95 4075 */
f82a7a86
YA
4076 val = global_node_page_state(NR_FILE_PAGES) +
4077 global_node_page_state(NR_ANON_MAPPED);
42a30035 4078 if (swap)
f82a7a86 4079 val += total_swap_pages - get_nr_swap_pages();
3e32cb2e 4080 } else {
ce00a967 4081 if (!swap)
3e32cb2e 4082 val = page_counter_read(&memcg->memory);
ce00a967 4083 else
3e32cb2e 4084 val = page_counter_read(&memcg->memsw);
ce00a967 4085 }
c12176d3 4086 return val;
ce00a967
JW
4087}
4088
3e32cb2e
JW
4089enum {
4090 RES_USAGE,
4091 RES_LIMIT,
4092 RES_MAX_USAGE,
4093 RES_FAILCNT,
4094 RES_SOFT_LIMIT,
4095};
ce00a967 4096
791badbd 4097static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 4098 struct cftype *cft)
8cdea7c0 4099{
182446d0 4100 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 4101 struct page_counter *counter;
af36f906 4102
3e32cb2e 4103 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 4104 case _MEM:
3e32cb2e
JW
4105 counter = &memcg->memory;
4106 break;
8c7c6e34 4107 case _MEMSWAP:
3e32cb2e
JW
4108 counter = &memcg->memsw;
4109 break;
510fc4e1 4110 case _KMEM:
3e32cb2e 4111 counter = &memcg->kmem;
510fc4e1 4112 break;
d55f90bf 4113 case _TCP:
0db15298 4114 counter = &memcg->tcpmem;
d55f90bf 4115 break;
8c7c6e34
KH
4116 default:
4117 BUG();
8c7c6e34 4118 }
3e32cb2e
JW
4119
4120 switch (MEMFILE_ATTR(cft->private)) {
4121 case RES_USAGE:
4122 if (counter == &memcg->memory)
c12176d3 4123 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3e32cb2e 4124 if (counter == &memcg->memsw)
c12176d3 4125 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3e32cb2e
JW
4126 return (u64)page_counter_read(counter) * PAGE_SIZE;
4127 case RES_LIMIT:
bbec2e15 4128 return (u64)counter->max * PAGE_SIZE;
3e32cb2e
JW
4129 case RES_MAX_USAGE:
4130 return (u64)counter->watermark * PAGE_SIZE;
4131 case RES_FAILCNT:
4132 return counter->failcnt;
4133 case RES_SOFT_LIMIT:
2178e20c 4134 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3e32cb2e
JW
4135 default:
4136 BUG();
4137 }
8cdea7c0 4138}
510fc4e1 4139
6b0ba2ab
FS
4140/*
4141 * This function doesn't do anything useful. Its only job is to provide a read
4142 * handler for a file so that cgroup_file_mode() will add read permissions.
4143 */
4144static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
4145 __always_unused void *v)
4146{
4147 return -EINVAL;
4148}
4149
84c07d11 4150#ifdef CONFIG_MEMCG_KMEM
567e9ab2 4151static int memcg_online_kmem(struct mem_cgroup *memcg)
d6441637 4152{
bf4f0599 4153 struct obj_cgroup *objcg;
d6441637 4154
9c94bef9 4155 if (mem_cgroup_kmem_disabled())
b313aeee
VD
4156 return 0;
4157
da0efe30
MS
4158 if (unlikely(mem_cgroup_is_root(memcg)))
4159 return 0;
d6441637 4160
bf4f0599 4161 objcg = obj_cgroup_alloc();
f9c69d63 4162 if (!objcg)
bf4f0599 4163 return -ENOMEM;
f9c69d63 4164
bf4f0599
RG
4165 objcg->memcg = memcg;
4166 rcu_assign_pointer(memcg->objcg, objcg);
675d6c9b
RG
4167 obj_cgroup_get(objcg);
4168 memcg->orig_objcg = objcg;
bf4f0599 4169
f7a449f7 4170 static_branch_enable(&memcg_kmem_online_key);
d648bcc7 4171
f9c69d63 4172 memcg->kmemcg_id = memcg->id.id;
0b8f73e1
JW
4173
4174 return 0;
d6441637
VD
4175}
4176
8e0a8912
JW
4177static void memcg_offline_kmem(struct mem_cgroup *memcg)
4178{
64268868 4179 struct mem_cgroup *parent;
8e0a8912 4180
9c94bef9 4181 if (mem_cgroup_kmem_disabled())
da0efe30
MS
4182 return;
4183
4184 if (unlikely(mem_cgroup_is_root(memcg)))
8e0a8912 4185 return;
9855609b 4186
8e0a8912
JW
4187 parent = parent_mem_cgroup(memcg);
4188 if (!parent)
4189 parent = root_mem_cgroup;
4190
bf4f0599 4191 memcg_reparent_objcgs(memcg, parent);
fb2f2b0a 4192
8e0a8912 4193 /*
64268868
MS
4194 * After we have finished memcg_reparent_objcgs(), all list_lrus
4195 * corresponding to this cgroup are guaranteed to remain empty.
4196 * The ordering is imposed by list_lru_node->lock taken by
1f391eb2 4197 * memcg_reparent_list_lrus().
8e0a8912 4198 */
1f391eb2 4199 memcg_reparent_list_lrus(memcg, parent);
8e0a8912 4200}
d6441637 4201#else
0b8f73e1 4202static int memcg_online_kmem(struct mem_cgroup *memcg)
127424c8
JW
4203{
4204 return 0;
4205}
4206static void memcg_offline_kmem(struct mem_cgroup *memcg)
4207{
4208}
84c07d11 4209#endif /* CONFIG_MEMCG_KMEM */
127424c8 4210
bbec2e15 4211static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
d55f90bf
VD
4212{
4213 int ret;
4214
bbec2e15 4215 mutex_lock(&memcg_max_mutex);
d55f90bf 4216
bbec2e15 4217 ret = page_counter_set_max(&memcg->tcpmem, max);
d55f90bf
VD
4218 if (ret)
4219 goto out;
4220
0db15298 4221 if (!memcg->tcpmem_active) {
d55f90bf
VD
4222 /*
4223 * The active flag needs to be written after the static_key
4224 * update. This is what guarantees that the socket activation
2d758073
JW
4225 * function is the last one to run. See mem_cgroup_sk_alloc()
4226 * for details, and note that we don't mark any socket as
4227 * belonging to this memcg until that flag is up.
d55f90bf
VD
4228 *
4229 * We need to do this, because static_keys will span multiple
4230 * sites, but we can't control their order. If we mark a socket
4231 * as accounted, but the accounting functions are not patched in
4232 * yet, we'll lose accounting.
4233 *
2d758073 4234 * We never race with the readers in mem_cgroup_sk_alloc(),
d55f90bf
VD
4235 * because when this value change, the code to process it is not
4236 * patched in yet.
4237 */
4238 static_branch_inc(&memcg_sockets_enabled_key);
0db15298 4239 memcg->tcpmem_active = true;
d55f90bf
VD
4240 }
4241out:
bbec2e15 4242 mutex_unlock(&memcg_max_mutex);
d55f90bf
VD
4243 return ret;
4244}
d55f90bf 4245
628f4235
KH
4246/*
4247 * The user of this function is...
4248 * RES_LIMIT.
4249 */
451af504
TH
4250static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4251 char *buf, size_t nbytes, loff_t off)
8cdea7c0 4252{
451af504 4253 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 4254 unsigned long nr_pages;
628f4235
KH
4255 int ret;
4256
451af504 4257 buf = strstrip(buf);
650c5e56 4258 ret = page_counter_memparse(buf, "-1", &nr_pages);
3e32cb2e
JW
4259 if (ret)
4260 return ret;
af36f906 4261
3e32cb2e 4262 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 4263 case RES_LIMIT:
4b3bde4c
BS
4264 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4265 ret = -EINVAL;
4266 break;
4267 }
3e32cb2e
JW
4268 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4269 case _MEM:
bbec2e15 4270 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
8c7c6e34 4271 break;
3e32cb2e 4272 case _MEMSWAP:
bbec2e15 4273 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
296c81d8 4274 break;
4597648f
MH
4275 case _KMEM:
4276 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4277 "Writing any value to this file has no effect. "
4278 "Please report your usecase to linux-mm@kvack.org if you "
4279 "depend on this functionality.\n");
4280 ret = 0;
4281 break;
d55f90bf 4282 case _TCP:
bbec2e15 4283 ret = memcg_update_tcp_max(memcg, nr_pages);
d55f90bf 4284 break;
3e32cb2e 4285 }
296c81d8 4286 break;
3e32cb2e 4287 case RES_SOFT_LIMIT:
2343e88d
SAS
4288 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4289 ret = -EOPNOTSUPP;
4290 } else {
2178e20c 4291 WRITE_ONCE(memcg->soft_limit, nr_pages);
2343e88d
SAS
4292 ret = 0;
4293 }
628f4235
KH
4294 break;
4295 }
451af504 4296 return ret ?: nbytes;
8cdea7c0
BS
4297}
4298
6770c64e
TH
4299static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4300 size_t nbytes, loff_t off)
c84872e1 4301{
6770c64e 4302 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 4303 struct page_counter *counter;
c84872e1 4304
3e32cb2e
JW
4305 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4306 case _MEM:
4307 counter = &memcg->memory;
4308 break;
4309 case _MEMSWAP:
4310 counter = &memcg->memsw;
4311 break;
4312 case _KMEM:
4313 counter = &memcg->kmem;
4314 break;
d55f90bf 4315 case _TCP:
0db15298 4316 counter = &memcg->tcpmem;
d55f90bf 4317 break;
3e32cb2e
JW
4318 default:
4319 BUG();
4320 }
af36f906 4321
3e32cb2e 4322 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 4323 case RES_MAX_USAGE:
3e32cb2e 4324 page_counter_reset_watermark(counter);
29f2a4da
PE
4325 break;
4326 case RES_FAILCNT:
3e32cb2e 4327 counter->failcnt = 0;
29f2a4da 4328 break;
3e32cb2e
JW
4329 default:
4330 BUG();
29f2a4da 4331 }
f64c3f54 4332
6770c64e 4333 return nbytes;
c84872e1
PE
4334}
4335
182446d0 4336static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
4337 struct cftype *cft)
4338{
182446d0 4339 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
4340}
4341
02491447 4342#ifdef CONFIG_MMU
182446d0 4343static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
4344 struct cftype *cft, u64 val)
4345{
182446d0 4346 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0 4347
da34a848
JW
4348 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4349 "Please report your usecase to linux-mm@kvack.org if you "
4350 "depend on this functionality.\n");
4351
1dfab5ab 4352 if (val & ~MOVE_MASK)
7dc74be0 4353 return -EINVAL;
ee5e8472 4354
7dc74be0 4355 /*
ee5e8472
GC
4356 * No kind of locking is needed in here, because ->can_attach() will
4357 * check this value once in the beginning of the process, and then carry
4358 * on with stale data. This means that changes to this value will only
4359 * affect task migrations starting after the change.
7dc74be0 4360 */
c0ff4b85 4361 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
4362 return 0;
4363}
02491447 4364#else
182446d0 4365static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
4366 struct cftype *cft, u64 val)
4367{
4368 return -ENOSYS;
4369}
4370#endif
7dc74be0 4371
406eb0c9 4372#ifdef CONFIG_NUMA
113b7dfd
JW
4373
4374#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4375#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4376#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4377
4378static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6 4379 int nid, unsigned int lru_mask, bool tree)
113b7dfd 4380{
867e5e1d 4381 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
113b7dfd
JW
4382 unsigned long nr = 0;
4383 enum lru_list lru;
4384
4385 VM_BUG_ON((unsigned)nid >= nr_node_ids);
4386
4387 for_each_lru(lru) {
4388 if (!(BIT(lru) & lru_mask))
4389 continue;
dd8657b6
SB
4390 if (tree)
4391 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4392 else
4393 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
113b7dfd
JW
4394 }
4395 return nr;
4396}
4397
4398static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6
SB
4399 unsigned int lru_mask,
4400 bool tree)
113b7dfd
JW
4401{
4402 unsigned long nr = 0;
4403 enum lru_list lru;
4404
4405 for_each_lru(lru) {
4406 if (!(BIT(lru) & lru_mask))
4407 continue;
dd8657b6
SB
4408 if (tree)
4409 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4410 else
4411 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
113b7dfd
JW
4412 }
4413 return nr;
4414}
4415
2da8ca82 4416static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 4417{
25485de6
GT
4418 struct numa_stat {
4419 const char *name;
4420 unsigned int lru_mask;
4421 };
4422
4423 static const struct numa_stat stats[] = {
4424 { "total", LRU_ALL },
4425 { "file", LRU_ALL_FILE },
4426 { "anon", LRU_ALL_ANON },
4427 { "unevictable", BIT(LRU_UNEVICTABLE) },
4428 };
4429 const struct numa_stat *stat;
406eb0c9 4430 int nid;
aa9694bb 4431 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
406eb0c9 4432
7d7ef0a4 4433 mem_cgroup_flush_stats(memcg);
2d146aa3 4434
25485de6 4435 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4436 seq_printf(m, "%s=%lu", stat->name,
4437 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4438 false));
4439 for_each_node_state(nid, N_MEMORY)
4440 seq_printf(m, " N%d=%lu", nid,
4441 mem_cgroup_node_nr_lru_pages(memcg, nid,
4442 stat->lru_mask, false));
25485de6 4443 seq_putc(m, '\n');
406eb0c9 4444 }
406eb0c9 4445
071aee13 4446 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4447
4448 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4449 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4450 true));
4451 for_each_node_state(nid, N_MEMORY)
4452 seq_printf(m, " N%d=%lu", nid,
4453 mem_cgroup_node_nr_lru_pages(memcg, nid,
4454 stat->lru_mask, true));
071aee13 4455 seq_putc(m, '\n');
406eb0c9 4456 }
406eb0c9 4457
406eb0c9
YH
4458 return 0;
4459}
4460#endif /* CONFIG_NUMA */
4461
c8713d0b 4462static const unsigned int memcg1_stats[] = {
0d1c2072 4463 NR_FILE_PAGES,
be5d0a74 4464 NR_ANON_MAPPED,
468c3982
JW
4465#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4466 NR_ANON_THPS,
4467#endif
c8713d0b
JW
4468 NR_SHMEM,
4469 NR_FILE_MAPPED,
4470 NR_FILE_DIRTY,
4471 NR_WRITEBACK,
e09b0b61
YS
4472 WORKINGSET_REFAULT_ANON,
4473 WORKINGSET_REFAULT_FILE,
72a14e82 4474#ifdef CONFIG_SWAP
c8713d0b 4475 MEMCG_SWAP,
72a14e82
LS
4476 NR_SWAPCACHE,
4477#endif
c8713d0b
JW
4478};
4479
4480static const char *const memcg1_stat_names[] = {
4481 "cache",
4482 "rss",
468c3982 4483#ifdef CONFIG_TRANSPARENT_HUGEPAGE
c8713d0b 4484 "rss_huge",
468c3982 4485#endif
c8713d0b
JW
4486 "shmem",
4487 "mapped_file",
4488 "dirty",
4489 "writeback",
e09b0b61
YS
4490 "workingset_refault_anon",
4491 "workingset_refault_file",
72a14e82 4492#ifdef CONFIG_SWAP
c8713d0b 4493 "swap",
72a14e82
LS
4494 "swapcached",
4495#endif
c8713d0b
JW
4496};
4497
df0e53d0 4498/* Universal VM events cgroup1 shows, original sort order */
8dd53fd3 4499static const unsigned int memcg1_events[] = {
df0e53d0
JW
4500 PGPGIN,
4501 PGPGOUT,
4502 PGFAULT,
4503 PGMAJFAULT,
4504};
4505
dddb44ff 4506static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
d2ceb9b7 4507{
3e32cb2e 4508 unsigned long memory, memsw;
af7c4b0e
JW
4509 struct mem_cgroup *mi;
4510 unsigned int i;
406eb0c9 4511
71cd3113 4512 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
70bc068c 4513
7d7ef0a4 4514 mem_cgroup_flush_stats(memcg);
2d146aa3 4515
71cd3113 4516 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
468c3982
JW
4517 unsigned long nr;
4518
ff841a06
YA
4519 nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4520 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
1dd3a273 4521 }
7b854121 4522
df0e53d0 4523 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
dddb44ff
YA
4524 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4525 memcg_events_local(memcg, memcg1_events[i]));
af7c4b0e
JW
4526
4527 for (i = 0; i < NR_LRU_LISTS; i++)
dddb44ff
YA
4528 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4529 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4530 PAGE_SIZE);
af7c4b0e 4531
14067bb3 4532 /* Hierarchical information */
3e32cb2e
JW
4533 memory = memsw = PAGE_COUNTER_MAX;
4534 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
15b42562
CD
4535 memory = min(memory, READ_ONCE(mi->memory.max));
4536 memsw = min(memsw, READ_ONCE(mi->memsw.max));
fee7b548 4537 }
dddb44ff
YA
4538 seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4539 (u64)memory * PAGE_SIZE);
840ea53a
LS
4540 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4541 (u64)memsw * PAGE_SIZE);
7f016ee8 4542
8de7ecc6 4543 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
7de2e9f1 4544 unsigned long nr;
4545
ff841a06 4546 nr = memcg_page_state_output(memcg, memcg1_stats[i]);
dddb44ff 4547 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
ff841a06 4548 (u64)nr);
af7c4b0e
JW
4549 }
4550
8de7ecc6 4551 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
dddb44ff
YA
4552 seq_buf_printf(s, "total_%s %llu\n",
4553 vm_event_name(memcg1_events[i]),
4554 (u64)memcg_events(memcg, memcg1_events[i]));
af7c4b0e 4555
8de7ecc6 4556 for (i = 0; i < NR_LRU_LISTS; i++)
dddb44ff
YA
4557 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4558 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4559 PAGE_SIZE);
14067bb3 4560
7f016ee8 4561#ifdef CONFIG_DEBUG_VM
7f016ee8 4562 {
ef8f2327
MG
4563 pg_data_t *pgdat;
4564 struct mem_cgroup_per_node *mz;
1431d4d1
JW
4565 unsigned long anon_cost = 0;
4566 unsigned long file_cost = 0;
7f016ee8 4567
ef8f2327 4568 for_each_online_pgdat(pgdat) {
a3747b53 4569 mz = memcg->nodeinfo[pgdat->node_id];
7f016ee8 4570
1431d4d1
JW
4571 anon_cost += mz->lruvec.anon_cost;
4572 file_cost += mz->lruvec.file_cost;
ef8f2327 4573 }
dddb44ff
YA
4574 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4575 seq_buf_printf(s, "file_cost %lu\n", file_cost);
7f016ee8
KM
4576 }
4577#endif
d2ceb9b7
KH
4578}
4579
182446d0
TH
4580static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4581 struct cftype *cft)
a7885eb8 4582{
182446d0 4583 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4584
1f4c025b 4585 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4586}
4587
182446d0
TH
4588static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4589 struct cftype *cft, u64 val)
a7885eb8 4590{
182446d0 4591 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4592
37bc3cb9 4593 if (val > 200)
a7885eb8
KM
4594 return -EINVAL;
4595
a4792030 4596 if (!mem_cgroup_is_root(memcg))
82b3aa26 4597 WRITE_ONCE(memcg->swappiness, val);
3dae7fec 4598 else
82b3aa26 4599 WRITE_ONCE(vm_swappiness, val);
068b38c1 4600
a7885eb8
KM
4601 return 0;
4602}
4603
2e72b634
KS
4604static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4605{
4606 struct mem_cgroup_threshold_ary *t;
3e32cb2e 4607 unsigned long usage;
2e72b634
KS
4608 int i;
4609
4610 rcu_read_lock();
4611 if (!swap)
2c488db2 4612 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4613 else
2c488db2 4614 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4615
4616 if (!t)
4617 goto unlock;
4618
ce00a967 4619 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
4620
4621 /*
748dad36 4622 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
4623 * If it's not true, a threshold was crossed after last
4624 * call of __mem_cgroup_threshold().
4625 */
5407a562 4626 i = t->current_threshold;
2e72b634
KS
4627
4628 /*
4629 * Iterate backward over array of thresholds starting from
4630 * current_threshold and check if a threshold is crossed.
4631 * If none of thresholds below usage is crossed, we read
4632 * only one element of the array here.
4633 */
4634 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3652117f 4635 eventfd_signal(t->entries[i].eventfd);
2e72b634
KS
4636
4637 /* i = current_threshold + 1 */
4638 i++;
4639
4640 /*
4641 * Iterate forward over array of thresholds starting from
4642 * current_threshold+1 and check if a threshold is crossed.
4643 * If none of thresholds above usage is crossed, we read
4644 * only one element of the array here.
4645 */
4646 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3652117f 4647 eventfd_signal(t->entries[i].eventfd);
2e72b634
KS
4648
4649 /* Update current_threshold */
5407a562 4650 t->current_threshold = i - 1;
2e72b634
KS
4651unlock:
4652 rcu_read_unlock();
4653}
4654
4655static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4656{
ad4ca5f4
KS
4657 while (memcg) {
4658 __mem_cgroup_threshold(memcg, false);
7941d214 4659 if (do_memsw_account())
ad4ca5f4
KS
4660 __mem_cgroup_threshold(memcg, true);
4661
4662 memcg = parent_mem_cgroup(memcg);
4663 }
2e72b634
KS
4664}
4665
4666static int compare_thresholds(const void *a, const void *b)
4667{
4668 const struct mem_cgroup_threshold *_a = a;
4669 const struct mem_cgroup_threshold *_b = b;
4670
2bff24a3
GT
4671 if (_a->threshold > _b->threshold)
4672 return 1;
4673
4674 if (_a->threshold < _b->threshold)
4675 return -1;
4676
4677 return 0;
2e72b634
KS
4678}
4679
c0ff4b85 4680static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
4681{
4682 struct mem_cgroup_eventfd_list *ev;
4683
2bcf2e92
MH
4684 spin_lock(&memcg_oom_lock);
4685
c0ff4b85 4686 list_for_each_entry(ev, &memcg->oom_notify, list)
3652117f 4687 eventfd_signal(ev->eventfd);
2bcf2e92
MH
4688
4689 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4690 return 0;
4691}
4692
c0ff4b85 4693static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 4694{
7d74b06f
KH
4695 struct mem_cgroup *iter;
4696
c0ff4b85 4697 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 4698 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4699}
4700
59b6f873 4701static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 4702 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 4703{
2c488db2
KS
4704 struct mem_cgroup_thresholds *thresholds;
4705 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
4706 unsigned long threshold;
4707 unsigned long usage;
2c488db2 4708 int i, size, ret;
2e72b634 4709
650c5e56 4710 ret = page_counter_memparse(args, "-1", &threshold);
2e72b634
KS
4711 if (ret)
4712 return ret;
4713
4714 mutex_lock(&memcg->thresholds_lock);
2c488db2 4715
05b84301 4716 if (type == _MEM) {
2c488db2 4717 thresholds = &memcg->thresholds;
ce00a967 4718 usage = mem_cgroup_usage(memcg, false);
05b84301 4719 } else if (type == _MEMSWAP) {
2c488db2 4720 thresholds = &memcg->memsw_thresholds;
ce00a967 4721 usage = mem_cgroup_usage(memcg, true);
05b84301 4722 } else
2e72b634
KS
4723 BUG();
4724
2e72b634 4725 /* Check if a threshold crossed before adding a new one */
2c488db2 4726 if (thresholds->primary)
2e72b634
KS
4727 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4728
2c488db2 4729 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4730
4731 /* Allocate memory for new array of thresholds */
67b8046f 4732 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
2c488db2 4733 if (!new) {
2e72b634
KS
4734 ret = -ENOMEM;
4735 goto unlock;
4736 }
2c488db2 4737 new->size = size;
2e72b634
KS
4738
4739 /* Copy thresholds (if any) to new array */
e90342e6
GS
4740 if (thresholds->primary)
4741 memcpy(new->entries, thresholds->primary->entries,
4742 flex_array_size(new, entries, size - 1));
2c488db2 4743
2e72b634 4744 /* Add new threshold */
2c488db2
KS
4745 new->entries[size - 1].eventfd = eventfd;
4746 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4747
4748 /* Sort thresholds. Registering of new threshold isn't time-critical */
61e604e6 4749 sort(new->entries, size, sizeof(*new->entries),
2e72b634
KS
4750 compare_thresholds, NULL);
4751
4752 /* Find current threshold */
2c488db2 4753 new->current_threshold = -1;
2e72b634 4754 for (i = 0; i < size; i++) {
748dad36 4755 if (new->entries[i].threshold <= usage) {
2e72b634 4756 /*
2c488db2
KS
4757 * new->current_threshold will not be used until
4758 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4759 * it here.
4760 */
2c488db2 4761 ++new->current_threshold;
748dad36
SZ
4762 } else
4763 break;
2e72b634
KS
4764 }
4765
2c488db2
KS
4766 /* Free old spare buffer and save old primary buffer as spare */
4767 kfree(thresholds->spare);
4768 thresholds->spare = thresholds->primary;
4769
4770 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4771
907860ed 4772 /* To be sure that nobody uses thresholds */
2e72b634
KS
4773 synchronize_rcu();
4774
2e72b634
KS
4775unlock:
4776 mutex_unlock(&memcg->thresholds_lock);
4777
4778 return ret;
4779}
4780
59b6f873 4781static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4782 struct eventfd_ctx *eventfd, const char *args)
4783{
59b6f873 4784 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
4785}
4786
59b6f873 4787static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4788 struct eventfd_ctx *eventfd, const char *args)
4789{
59b6f873 4790 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
4791}
4792
59b6f873 4793static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 4794 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 4795{
2c488db2
KS
4796 struct mem_cgroup_thresholds *thresholds;
4797 struct mem_cgroup_threshold_ary *new;
3e32cb2e 4798 unsigned long usage;
7d36665a 4799 int i, j, size, entries;
2e72b634
KS
4800
4801 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
4802
4803 if (type == _MEM) {
2c488db2 4804 thresholds = &memcg->thresholds;
ce00a967 4805 usage = mem_cgroup_usage(memcg, false);
05b84301 4806 } else if (type == _MEMSWAP) {
2c488db2 4807 thresholds = &memcg->memsw_thresholds;
ce00a967 4808 usage = mem_cgroup_usage(memcg, true);
05b84301 4809 } else
2e72b634
KS
4810 BUG();
4811
371528ca
AV
4812 if (!thresholds->primary)
4813 goto unlock;
4814
2e72b634
KS
4815 /* Check if a threshold crossed before removing */
4816 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4817
4818 /* Calculate new number of threshold */
7d36665a 4819 size = entries = 0;
2c488db2
KS
4820 for (i = 0; i < thresholds->primary->size; i++) {
4821 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634 4822 size++;
7d36665a
CX
4823 else
4824 entries++;
2e72b634
KS
4825 }
4826
2c488db2 4827 new = thresholds->spare;
907860ed 4828
7d36665a
CX
4829 /* If no items related to eventfd have been cleared, nothing to do */
4830 if (!entries)
4831 goto unlock;
4832
2e72b634
KS
4833 /* Set thresholds array to NULL if we don't have thresholds */
4834 if (!size) {
2c488db2
KS
4835 kfree(new);
4836 new = NULL;
907860ed 4837 goto swap_buffers;
2e72b634
KS
4838 }
4839
2c488db2 4840 new->size = size;
2e72b634
KS
4841
4842 /* Copy thresholds and find current threshold */
2c488db2
KS
4843 new->current_threshold = -1;
4844 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4845 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4846 continue;
4847
2c488db2 4848 new->entries[j] = thresholds->primary->entries[i];
748dad36 4849 if (new->entries[j].threshold <= usage) {
2e72b634 4850 /*
2c488db2 4851 * new->current_threshold will not be used
2e72b634
KS
4852 * until rcu_assign_pointer(), so it's safe to increment
4853 * it here.
4854 */
2c488db2 4855 ++new->current_threshold;
2e72b634
KS
4856 }
4857 j++;
4858 }
4859
907860ed 4860swap_buffers:
2c488db2
KS
4861 /* Swap primary and spare array */
4862 thresholds->spare = thresholds->primary;
8c757763 4863
2c488db2 4864 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4865
907860ed 4866 /* To be sure that nobody uses thresholds */
2e72b634 4867 synchronize_rcu();
6611d8d7
MC
4868
4869 /* If all events are unregistered, free the spare array */
4870 if (!new) {
4871 kfree(thresholds->spare);
4872 thresholds->spare = NULL;
4873 }
371528ca 4874unlock:
2e72b634 4875 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4876}
c1e862c1 4877
59b6f873 4878static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4879 struct eventfd_ctx *eventfd)
4880{
59b6f873 4881 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
4882}
4883
59b6f873 4884static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4885 struct eventfd_ctx *eventfd)
4886{
59b6f873 4887 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
4888}
4889
59b6f873 4890static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 4891 struct eventfd_ctx *eventfd, const char *args)
9490ff27 4892{
9490ff27 4893 struct mem_cgroup_eventfd_list *event;
9490ff27 4894
9490ff27
KH
4895 event = kmalloc(sizeof(*event), GFP_KERNEL);
4896 if (!event)
4897 return -ENOMEM;
4898
1af8efe9 4899 spin_lock(&memcg_oom_lock);
9490ff27
KH
4900
4901 event->eventfd = eventfd;
4902 list_add(&event->list, &memcg->oom_notify);
4903
4904 /* already in OOM ? */
c2b42d3c 4905 if (memcg->under_oom)
3652117f 4906 eventfd_signal(eventfd);
1af8efe9 4907 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4908
4909 return 0;
4910}
4911
59b6f873 4912static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 4913 struct eventfd_ctx *eventfd)
9490ff27 4914{
9490ff27 4915 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 4916
1af8efe9 4917 spin_lock(&memcg_oom_lock);
9490ff27 4918
c0ff4b85 4919 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
4920 if (ev->eventfd == eventfd) {
4921 list_del(&ev->list);
4922 kfree(ev);
4923 }
4924 }
4925
1af8efe9 4926 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4927}
4928
2da8ca82 4929static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 4930{
aa9694bb 4931 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
3c11ecf4 4932
17c56de6 4933 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
c2b42d3c 4934 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
fe6bdfc8
RG
4935 seq_printf(sf, "oom_kill %lu\n",
4936 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
3c11ecf4
KH
4937 return 0;
4938}
4939
182446d0 4940static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
4941 struct cftype *cft, u64 val)
4942{
182446d0 4943 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
4944
4945 /* cannot set to root cgroup and only 0 and 1 are allowed */
a4792030 4946 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
3c11ecf4
KH
4947 return -EINVAL;
4948
17c56de6 4949 WRITE_ONCE(memcg->oom_kill_disable, val);
4d845ebf 4950 if (!val)
c0ff4b85 4951 memcg_oom_recover(memcg);
3dae7fec 4952
3c11ecf4
KH
4953 return 0;
4954}
4955
52ebea74
TH
4956#ifdef CONFIG_CGROUP_WRITEBACK
4957
3a8e9ac8
TH
4958#include <trace/events/writeback.h>
4959
841710aa
TH
4960static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4961{
4962 return wb_domain_init(&memcg->cgwb_domain, gfp);
4963}
4964
4965static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4966{
4967 wb_domain_exit(&memcg->cgwb_domain);
4968}
4969
2529bb3a
TH
4970static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4971{
4972 wb_domain_size_changed(&memcg->cgwb_domain);
4973}
4974
841710aa
TH
4975struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4976{
4977 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4978
4979 if (!memcg->css.parent)
4980 return NULL;
4981
4982 return &memcg->cgwb_domain;
4983}
4984
c2aa723a
TH
4985/**
4986 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4987 * @wb: bdi_writeback in question
c5edf9cd
TH
4988 * @pfilepages: out parameter for number of file pages
4989 * @pheadroom: out parameter for number of allocatable pages according to memcg
c2aa723a
TH
4990 * @pdirty: out parameter for number of dirty pages
4991 * @pwriteback: out parameter for number of pages under writeback
4992 *
c5edf9cd
TH
4993 * Determine the numbers of file, headroom, dirty, and writeback pages in
4994 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4995 * is a bit more involved.
c2aa723a 4996 *
c5edf9cd
TH
4997 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4998 * headroom is calculated as the lowest headroom of itself and the
4999 * ancestors. Note that this doesn't consider the actual amount of
5000 * available memory in the system. The caller should further cap
5001 * *@pheadroom accordingly.
c2aa723a 5002 */
c5edf9cd
TH
5003void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
5004 unsigned long *pheadroom, unsigned long *pdirty,
5005 unsigned long *pwriteback)
c2aa723a
TH
5006{
5007 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
5008 struct mem_cgroup *parent;
c2aa723a 5009
d9b3ce87 5010 mem_cgroup_flush_stats_ratelimited(memcg);
c2aa723a 5011
2d146aa3
JW
5012 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
5013 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
5014 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
5015 memcg_page_state(memcg, NR_ACTIVE_FILE);
c2aa723a 5016
2d146aa3 5017 *pheadroom = PAGE_COUNTER_MAX;
c2aa723a 5018 while ((parent = parent_mem_cgroup(memcg))) {
15b42562 5019 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
d1663a90 5020 READ_ONCE(memcg->memory.high));
c2aa723a
TH
5021 unsigned long used = page_counter_read(&memcg->memory);
5022
c5edf9cd 5023 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
c2aa723a
TH
5024 memcg = parent;
5025 }
c2aa723a
TH
5026}
5027
97b27821
TH
5028/*
5029 * Foreign dirty flushing
5030 *
5031 * There's an inherent mismatch between memcg and writeback. The former
f0953a1b 5032 * tracks ownership per-page while the latter per-inode. This was a
97b27821
TH
5033 * deliberate design decision because honoring per-page ownership in the
5034 * writeback path is complicated, may lead to higher CPU and IO overheads
5035 * and deemed unnecessary given that write-sharing an inode across
5036 * different cgroups isn't a common use-case.
5037 *
5038 * Combined with inode majority-writer ownership switching, this works well
5039 * enough in most cases but there are some pathological cases. For
5040 * example, let's say there are two cgroups A and B which keep writing to
5041 * different but confined parts of the same inode. B owns the inode and
5042 * A's memory is limited far below B's. A's dirty ratio can rise enough to
5043 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
5044 * triggering background writeback. A will be slowed down without a way to
5045 * make writeback of the dirty pages happen.
5046 *
f0953a1b 5047 * Conditions like the above can lead to a cgroup getting repeatedly and
97b27821 5048 * severely throttled after making some progress after each
f0953a1b 5049 * dirty_expire_interval while the underlying IO device is almost
97b27821
TH
5050 * completely idle.
5051 *
5052 * Solving this problem completely requires matching the ownership tracking
5053 * granularities between memcg and writeback in either direction. However,
5054 * the more egregious behaviors can be avoided by simply remembering the
5055 * most recent foreign dirtying events and initiating remote flushes on
5056 * them when local writeback isn't enough to keep the memory clean enough.
5057 *
5058 * The following two functions implement such mechanism. When a foreign
5059 * page - a page whose memcg and writeback ownerships don't match - is
5060 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
5061 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
5062 * decides that the memcg needs to sleep due to high dirty ratio, it calls
5063 * mem_cgroup_flush_foreign() which queues writeback on the recorded
5064 * foreign bdi_writebacks which haven't expired. Both the numbers of
5065 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
5066 * limited to MEMCG_CGWB_FRN_CNT.
5067 *
5068 * The mechanism only remembers IDs and doesn't hold any object references.
5069 * As being wrong occasionally doesn't matter, updates and accesses to the
5070 * records are lockless and racy.
5071 */
9d8053fc 5072void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
5073 struct bdi_writeback *wb)
5074{
9d8053fc 5075 struct mem_cgroup *memcg = folio_memcg(folio);
97b27821
TH
5076 struct memcg_cgwb_frn *frn;
5077 u64 now = get_jiffies_64();
5078 u64 oldest_at = now;
5079 int oldest = -1;
5080 int i;
5081
9d8053fc 5082 trace_track_foreign_dirty(folio, wb);
3a8e9ac8 5083
97b27821
TH
5084 /*
5085 * Pick the slot to use. If there is already a slot for @wb, keep
5086 * using it. If not replace the oldest one which isn't being
5087 * written out.
5088 */
5089 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
5090 frn = &memcg->cgwb_frn[i];
5091 if (frn->bdi_id == wb->bdi->id &&
5092 frn->memcg_id == wb->memcg_css->id)
5093 break;
5094 if (time_before64(frn->at, oldest_at) &&
5095 atomic_read(&frn->done.cnt) == 1) {
5096 oldest = i;
5097 oldest_at = frn->at;
5098 }
5099 }
5100
5101 if (i < MEMCG_CGWB_FRN_CNT) {
5102 /*
5103 * Re-using an existing one. Update timestamp lazily to
5104 * avoid making the cacheline hot. We want them to be
5105 * reasonably up-to-date and significantly shorter than
5106 * dirty_expire_interval as that's what expires the record.
5107 * Use the shorter of 1s and dirty_expire_interval / 8.
5108 */
5109 unsigned long update_intv =
5110 min_t(unsigned long, HZ,
5111 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
5112
5113 if (time_before64(frn->at, now - update_intv))
5114 frn->at = now;
5115 } else if (oldest >= 0) {
5116 /* replace the oldest free one */
5117 frn = &memcg->cgwb_frn[oldest];
5118 frn->bdi_id = wb->bdi->id;
5119 frn->memcg_id = wb->memcg_css->id;
5120 frn->at = now;
5121 }
5122}
5123
5124/* issue foreign writeback flushes for recorded foreign dirtying events */
5125void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
5126{
5127 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
5128 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
5129 u64 now = jiffies_64;
5130 int i;
5131
5132 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
5133 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
5134
5135 /*
5136 * If the record is older than dirty_expire_interval,
5137 * writeback on it has already started. No need to kick it
5138 * off again. Also, don't start a new one if there's
5139 * already one in flight.
5140 */
5141 if (time_after64(frn->at, now - intv) &&
5142 atomic_read(&frn->done.cnt) == 1) {
5143 frn->at = 0;
3a8e9ac8 5144 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
7490a2d2 5145 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
97b27821
TH
5146 WB_REASON_FOREIGN_FLUSH,
5147 &frn->done);
5148 }
5149 }
5150}
5151
841710aa
TH
5152#else /* CONFIG_CGROUP_WRITEBACK */
5153
5154static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
5155{
5156 return 0;
5157}
5158
5159static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
5160{
5161}
5162
2529bb3a
TH
5163static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
5164{
5165}
5166
52ebea74
TH
5167#endif /* CONFIG_CGROUP_WRITEBACK */
5168
3bc942f3
TH
5169/*
5170 * DO NOT USE IN NEW FILES.
5171 *
5172 * "cgroup.event_control" implementation.
5173 *
5174 * This is way over-engineered. It tries to support fully configurable
5175 * events for each user. Such level of flexibility is completely
5176 * unnecessary especially in the light of the planned unified hierarchy.
5177 *
5178 * Please deprecate this and replace with something simpler if at all
5179 * possible.
5180 */
5181
79bd9814
TH
5182/*
5183 * Unregister event and free resources.
5184 *
5185 * Gets called from workqueue.
5186 */
3bc942f3 5187static void memcg_event_remove(struct work_struct *work)
79bd9814 5188{
3bc942f3
TH
5189 struct mem_cgroup_event *event =
5190 container_of(work, struct mem_cgroup_event, remove);
59b6f873 5191 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
5192
5193 remove_wait_queue(event->wqh, &event->wait);
5194
59b6f873 5195 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
5196
5197 /* Notify userspace the event is going away. */
3652117f 5198 eventfd_signal(event->eventfd);
79bd9814
TH
5199
5200 eventfd_ctx_put(event->eventfd);
5201 kfree(event);
59b6f873 5202 css_put(&memcg->css);
79bd9814
TH
5203}
5204
5205/*
a9a08845 5206 * Gets called on EPOLLHUP on eventfd when user closes it.
79bd9814
TH
5207 *
5208 * Called with wqh->lock held and interrupts disabled.
5209 */
ac6424b9 5210static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3bc942f3 5211 int sync, void *key)
79bd9814 5212{
3bc942f3
TH
5213 struct mem_cgroup_event *event =
5214 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 5215 struct mem_cgroup *memcg = event->memcg;
3ad6f93e 5216 __poll_t flags = key_to_poll(key);
79bd9814 5217
a9a08845 5218 if (flags & EPOLLHUP) {
79bd9814
TH
5219 /*
5220 * If the event has been detached at cgroup removal, we
5221 * can simply return knowing the other side will cleanup
5222 * for us.
5223 *
5224 * We can't race against event freeing since the other
5225 * side will require wqh->lock via remove_wait_queue(),
5226 * which we hold.
5227 */
fba94807 5228 spin_lock(&memcg->event_list_lock);
79bd9814
TH
5229 if (!list_empty(&event->list)) {
5230 list_del_init(&event->list);
5231 /*
5232 * We are in atomic context, but cgroup_event_remove()
5233 * may sleep, so we have to call it in workqueue.
5234 */
5235 schedule_work(&event->remove);
5236 }
fba94807 5237 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
5238 }
5239
5240 return 0;
5241}
5242
3bc942f3 5243static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
5244 wait_queue_head_t *wqh, poll_table *pt)
5245{
3bc942f3
TH
5246 struct mem_cgroup_event *event =
5247 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
5248
5249 event->wqh = wqh;
5250 add_wait_queue(wqh, &event->wait);
5251}
5252
5253/*
3bc942f3
TH
5254 * DO NOT USE IN NEW FILES.
5255 *
79bd9814
TH
5256 * Parse input and register new cgroup event handler.
5257 *
5258 * Input must be in format '<event_fd> <control_fd> <args>'.
5259 * Interpretation of args is defined by control file implementation.
5260 */
451af504
TH
5261static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5262 char *buf, size_t nbytes, loff_t off)
79bd9814 5263{
451af504 5264 struct cgroup_subsys_state *css = of_css(of);
fba94807 5265 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5266 struct mem_cgroup_event *event;
79bd9814
TH
5267 struct cgroup_subsys_state *cfile_css;
5268 unsigned int efd, cfd;
5269 struct fd efile;
5270 struct fd cfile;
4a7ba45b 5271 struct dentry *cdentry;
fba94807 5272 const char *name;
79bd9814
TH
5273 char *endp;
5274 int ret;
5275
2343e88d
SAS
5276 if (IS_ENABLED(CONFIG_PREEMPT_RT))
5277 return -EOPNOTSUPP;
5278
451af504
TH
5279 buf = strstrip(buf);
5280
5281 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
5282 if (*endp != ' ')
5283 return -EINVAL;
451af504 5284 buf = endp + 1;
79bd9814 5285
451af504 5286 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
5287 if ((*endp != ' ') && (*endp != '\0'))
5288 return -EINVAL;
451af504 5289 buf = endp + 1;
79bd9814
TH
5290
5291 event = kzalloc(sizeof(*event), GFP_KERNEL);
5292 if (!event)
5293 return -ENOMEM;
5294
59b6f873 5295 event->memcg = memcg;
79bd9814 5296 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
5297 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5298 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5299 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
5300
5301 efile = fdget(efd);
5302 if (!efile.file) {
5303 ret = -EBADF;
5304 goto out_kfree;
5305 }
5306
5307 event->eventfd = eventfd_ctx_fileget(efile.file);
5308 if (IS_ERR(event->eventfd)) {
5309 ret = PTR_ERR(event->eventfd);
5310 goto out_put_efile;
5311 }
5312
5313 cfile = fdget(cfd);
5314 if (!cfile.file) {
5315 ret = -EBADF;
5316 goto out_put_eventfd;
5317 }
5318
5319 /* the process need read permission on control file */
5320 /* AV: shouldn't we check that it's been opened for read instead? */
02f92b38 5321 ret = file_permission(cfile.file, MAY_READ);
79bd9814
TH
5322 if (ret < 0)
5323 goto out_put_cfile;
5324
4a7ba45b
TH
5325 /*
5326 * The control file must be a regular cgroup1 file. As a regular cgroup
5327 * file can't be renamed, it's safe to access its name afterwards.
5328 */
5329 cdentry = cfile.file->f_path.dentry;
5330 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5331 ret = -EINVAL;
5332 goto out_put_cfile;
5333 }
5334
fba94807
TH
5335 /*
5336 * Determine the event callbacks and set them in @event. This used
5337 * to be done via struct cftype but cgroup core no longer knows
5338 * about these events. The following is crude but the whole thing
5339 * is for compatibility anyway.
3bc942f3
TH
5340 *
5341 * DO NOT ADD NEW FILES.
fba94807 5342 */
4a7ba45b 5343 name = cdentry->d_name.name;
fba94807
TH
5344
5345 if (!strcmp(name, "memory.usage_in_bytes")) {
5346 event->register_event = mem_cgroup_usage_register_event;
5347 event->unregister_event = mem_cgroup_usage_unregister_event;
5348 } else if (!strcmp(name, "memory.oom_control")) {
5349 event->register_event = mem_cgroup_oom_register_event;
5350 event->unregister_event = mem_cgroup_oom_unregister_event;
5351 } else if (!strcmp(name, "memory.pressure_level")) {
5352 event->register_event = vmpressure_register_event;
5353 event->unregister_event = vmpressure_unregister_event;
5354 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
5355 event->register_event = memsw_cgroup_usage_register_event;
5356 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
5357 } else {
5358 ret = -EINVAL;
5359 goto out_put_cfile;
5360 }
5361
79bd9814 5362 /*
b5557c4c
TH
5363 * Verify @cfile should belong to @css. Also, remaining events are
5364 * automatically removed on cgroup destruction but the removal is
5365 * asynchronous, so take an extra ref on @css.
79bd9814 5366 */
4a7ba45b 5367 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
ec903c0c 5368 &memory_cgrp_subsys);
79bd9814 5369 ret = -EINVAL;
5a17f543 5370 if (IS_ERR(cfile_css))
79bd9814 5371 goto out_put_cfile;
5a17f543
TH
5372 if (cfile_css != css) {
5373 css_put(cfile_css);
79bd9814 5374 goto out_put_cfile;
5a17f543 5375 }
79bd9814 5376
451af504 5377 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
5378 if (ret)
5379 goto out_put_css;
5380
9965ed17 5381 vfs_poll(efile.file, &event->pt);
79bd9814 5382
4ba9515d 5383 spin_lock_irq(&memcg->event_list_lock);
fba94807 5384 list_add(&event->list, &memcg->event_list);
4ba9515d 5385 spin_unlock_irq(&memcg->event_list_lock);
79bd9814
TH
5386
5387 fdput(cfile);
5388 fdput(efile);
5389
451af504 5390 return nbytes;
79bd9814
TH
5391
5392out_put_css:
b5557c4c 5393 css_put(css);
79bd9814
TH
5394out_put_cfile:
5395 fdput(cfile);
5396out_put_eventfd:
5397 eventfd_ctx_put(event->eventfd);
5398out_put_efile:
5399 fdput(efile);
5400out_kfree:
5401 kfree(event);
5402
5403 return ret;
5404}
5405
bc3dcb85 5406#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
c29b5b3d
MS
5407static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5408{
5409 /*
5410 * Deprecated.
df4ae285 5411 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
c29b5b3d
MS
5412 */
5413 return 0;
5414}
5415#endif
5416
dddb44ff
YA
5417static int memory_stat_show(struct seq_file *m, void *v);
5418
241994ed 5419static struct cftype mem_cgroup_legacy_files[] = {
8cdea7c0 5420 {
0eea1030 5421 .name = "usage_in_bytes",
8c7c6e34 5422 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 5423 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5424 },
c84872e1
PE
5425 {
5426 .name = "max_usage_in_bytes",
8c7c6e34 5427 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 5428 .write = mem_cgroup_reset,
791badbd 5429 .read_u64 = mem_cgroup_read_u64,
c84872e1 5430 },
8cdea7c0 5431 {
0eea1030 5432 .name = "limit_in_bytes",
8c7c6e34 5433 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 5434 .write = mem_cgroup_write,
791badbd 5435 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5436 },
296c81d8
BS
5437 {
5438 .name = "soft_limit_in_bytes",
5439 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 5440 .write = mem_cgroup_write,
791badbd 5441 .read_u64 = mem_cgroup_read_u64,
296c81d8 5442 },
8cdea7c0
BS
5443 {
5444 .name = "failcnt",
8c7c6e34 5445 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 5446 .write = mem_cgroup_reset,
791badbd 5447 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5448 },
d2ceb9b7
KH
5449 {
5450 .name = "stat",
dddb44ff 5451 .seq_show = memory_stat_show,
d2ceb9b7 5452 },
c1e862c1
KH
5453 {
5454 .name = "force_empty",
6770c64e 5455 .write = mem_cgroup_force_empty_write,
c1e862c1 5456 },
18f59ea7
BS
5457 {
5458 .name = "use_hierarchy",
5459 .write_u64 = mem_cgroup_hierarchy_write,
5460 .read_u64 = mem_cgroup_hierarchy_read,
5461 },
79bd9814 5462 {
3bc942f3 5463 .name = "cgroup.event_control", /* XXX: for compat */
451af504 5464 .write = memcg_write_event_control,
7dbdb199 5465 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
79bd9814 5466 },
a7885eb8
KM
5467 {
5468 .name = "swappiness",
5469 .read_u64 = mem_cgroup_swappiness_read,
5470 .write_u64 = mem_cgroup_swappiness_write,
5471 },
7dc74be0
DN
5472 {
5473 .name = "move_charge_at_immigrate",
5474 .read_u64 = mem_cgroup_move_charge_read,
5475 .write_u64 = mem_cgroup_move_charge_write,
5476 },
9490ff27
KH
5477 {
5478 .name = "oom_control",
2da8ca82 5479 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 5480 .write_u64 = mem_cgroup_oom_control_write,
9490ff27 5481 },
70ddf637
AV
5482 {
5483 .name = "pressure_level",
6b0ba2ab 5484 .seq_show = mem_cgroup_dummy_seq_show,
70ddf637 5485 },
406eb0c9
YH
5486#ifdef CONFIG_NUMA
5487 {
5488 .name = "numa_stat",
2da8ca82 5489 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
5490 },
5491#endif
4597648f
MH
5492 {
5493 .name = "kmem.limit_in_bytes",
5494 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5495 .write = mem_cgroup_write,
5496 .read_u64 = mem_cgroup_read_u64,
5497 },
510fc4e1
GC
5498 {
5499 .name = "kmem.usage_in_bytes",
5500 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 5501 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5502 },
5503 {
5504 .name = "kmem.failcnt",
5505 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 5506 .write = mem_cgroup_reset,
791badbd 5507 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5508 },
5509 {
5510 .name = "kmem.max_usage_in_bytes",
5511 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 5512 .write = mem_cgroup_reset,
791badbd 5513 .read_u64 = mem_cgroup_read_u64,
510fc4e1 5514 },
bc3dcb85 5515#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
749c5415
GC
5516 {
5517 .name = "kmem.slabinfo",
c29b5b3d 5518 .seq_show = mem_cgroup_slab_show,
749c5415
GC
5519 },
5520#endif
d55f90bf
VD
5521 {
5522 .name = "kmem.tcp.limit_in_bytes",
5523 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5524 .write = mem_cgroup_write,
5525 .read_u64 = mem_cgroup_read_u64,
5526 },
5527 {
5528 .name = "kmem.tcp.usage_in_bytes",
5529 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5530 .read_u64 = mem_cgroup_read_u64,
5531 },
5532 {
5533 .name = "kmem.tcp.failcnt",
5534 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5535 .write = mem_cgroup_reset,
5536 .read_u64 = mem_cgroup_read_u64,
5537 },
5538 {
5539 .name = "kmem.tcp.max_usage_in_bytes",
5540 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5541 .write = mem_cgroup_reset,
5542 .read_u64 = mem_cgroup_read_u64,
5543 },
6bc10349 5544 { }, /* terminate */
af36f906 5545};
8c7c6e34 5546
73f576c0
JW
5547/*
5548 * Private memory cgroup IDR
5549 *
5550 * Swap-out records and page cache shadow entries need to store memcg
5551 * references in constrained space, so we maintain an ID space that is
5552 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5553 * memory-controlled cgroups to 64k.
5554 *
b8f2935f 5555 * However, there usually are many references to the offline CSS after
73f576c0
JW
5556 * the cgroup has been destroyed, such as page cache or reclaimable
5557 * slab objects, that don't need to hang on to the ID. We want to keep
5558 * those dead CSS from occupying IDs, or we might quickly exhaust the
5559 * relatively small ID space and prevent the creation of new cgroups
5560 * even when there are much fewer than 64k cgroups - possibly none.
5561 *
5562 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5563 * be freed and recycled when it's no longer needed, which is usually
5564 * when the CSS is offlined.
5565 *
5566 * The only exception to that are records of swapped out tmpfs/shmem
5567 * pages that need to be attributed to live ancestors on swapin. But
5568 * those references are manageable from userspace.
5569 */
5570
60b1e24c 5571#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
73f576c0
JW
5572static DEFINE_IDR(mem_cgroup_idr);
5573
7e97de0b
KT
5574static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5575{
5576 if (memcg->id.id > 0) {
5577 idr_remove(&mem_cgroup_idr, memcg->id.id);
5578 memcg->id.id = 0;
5579 }
5580}
5581
c1514c0a
VF
5582static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5583 unsigned int n)
73f576c0 5584{
1c2d479a 5585 refcount_add(n, &memcg->id.ref);
73f576c0
JW
5586}
5587
615d66c3 5588static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 5589{
1c2d479a 5590 if (refcount_sub_and_test(n, &memcg->id.ref)) {
7e97de0b 5591 mem_cgroup_id_remove(memcg);
73f576c0
JW
5592
5593 /* Memcg ID pins CSS */
5594 css_put(&memcg->css);
5595 }
5596}
5597
615d66c3
VD
5598static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5599{
5600 mem_cgroup_id_put_many(memcg, 1);
5601}
5602
73f576c0
JW
5603/**
5604 * mem_cgroup_from_id - look up a memcg from a memcg id
5605 * @id: the memcg id to look up
5606 *
5607 * Caller must hold rcu_read_lock().
5608 */
5609struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5610{
5611 WARN_ON_ONCE(!rcu_read_lock_held());
5612 return idr_find(&mem_cgroup_idr, id);
5613}
5614
c15187a4
RG
5615#ifdef CONFIG_SHRINKER_DEBUG
5616struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5617{
5618 struct cgroup *cgrp;
5619 struct cgroup_subsys_state *css;
5620 struct mem_cgroup *memcg;
5621
5622 cgrp = cgroup_get_from_id(ino);
fa7e439c 5623 if (IS_ERR(cgrp))
c0f2df49 5624 return ERR_CAST(cgrp);
c15187a4
RG
5625
5626 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5627 if (css)
5628 memcg = container_of(css, struct mem_cgroup, css);
5629 else
5630 memcg = ERR_PTR(-ENOENT);
5631
5632 cgroup_put(cgrp);
5633
5634 return memcg;
5635}
5636#endif
5637
a8248bb7 5638static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
5639{
5640 struct mem_cgroup_per_node *pn;
8c9bb398
WY
5641
5642 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
6d12e2d8 5643 if (!pn)
a8248bb7 5644 return false;
1ecaab2b 5645
aab6103b
RG
5646 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
5647 GFP_KERNEL_ACCOUNT, node);
70a64b79
SB
5648 if (!pn->lruvec_stats)
5649 goto fail;
5650
7e1c0d6f
SB
5651 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5652 GFP_KERNEL_ACCOUNT);
70a64b79
SB
5653 if (!pn->lruvec_stats_percpu)
5654 goto fail;
00f3ca2c 5655
ef8f2327 5656 lruvec_init(&pn->lruvec);
ef8f2327
MG
5657 pn->memcg = memcg;
5658
54f72fe0 5659 memcg->nodeinfo[node] = pn;
a8248bb7 5660 return true;
70a64b79
SB
5661fail:
5662 kfree(pn->lruvec_stats);
5663 kfree(pn);
a8248bb7 5664 return false;
6d12e2d8
KH
5665}
5666
ef8f2327 5667static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1ecaab2b 5668{
00f3ca2c
JW
5669 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5670
4eaf431f
MH
5671 if (!pn)
5672 return;
5673
7e1c0d6f 5674 free_percpu(pn->lruvec_stats_percpu);
70a64b79 5675 kfree(pn->lruvec_stats);
00f3ca2c 5676 kfree(pn);
1ecaab2b
KH
5677}
5678
40e952f9 5679static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 5680{
c8b2a36f 5681 int node;
59927fb9 5682
91b71e78 5683 obj_cgroup_put(memcg->orig_objcg);
675d6c9b 5684
c8b2a36f 5685 for_each_node(node)
ef8f2327 5686 free_mem_cgroup_per_node_info(memcg, node);
410f8e82 5687 kfree(memcg->vmstats);
871789d4 5688 free_percpu(memcg->vmstats_percpu);
8ff69e2c 5689 kfree(memcg);
59927fb9 5690}
3afe36b1 5691
40e952f9
TE
5692static void mem_cgroup_free(struct mem_cgroup *memcg)
5693{
ec1c86b2 5694 lru_gen_exit_memcg(memcg);
40e952f9
TE
5695 memcg_wb_domain_exit(memcg);
5696 __mem_cgroup_free(memcg);
5697}
5698
9cee7e8e 5699static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
8cdea7c0 5700{
9cee7e8e 5701 struct memcg_vmstats_percpu *statc, *pstatc;
d142e3e6 5702 struct mem_cgroup *memcg;
9cee7e8e 5703 int node, cpu;
97b27821 5704 int __maybe_unused i;
11d67612 5705 long error = -ENOMEM;
8cdea7c0 5706
06b2c3b0 5707 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
c0ff4b85 5708 if (!memcg)
11d67612 5709 return ERR_PTR(error);
0b8f73e1 5710
73f576c0 5711 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
be740503 5712 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
11d67612
YS
5713 if (memcg->id.id < 0) {
5714 error = memcg->id.id;
73f576c0 5715 goto fail;
11d67612 5716 }
73f576c0 5717
aab6103b
RG
5718 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
5719 GFP_KERNEL_ACCOUNT);
410f8e82
SB
5720 if (!memcg->vmstats)
5721 goto fail;
5722
3e38e0aa
RG
5723 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5724 GFP_KERNEL_ACCOUNT);
871789d4 5725 if (!memcg->vmstats_percpu)
0b8f73e1 5726 goto fail;
78fb7466 5727
9cee7e8e
YA
5728 for_each_possible_cpu(cpu) {
5729 if (parent)
5730 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5731 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5732 statc->parent = parent ? pstatc : NULL;
5733 statc->vmstats = memcg->vmstats;
5734 }
5735
3ed28fa1 5736 for_each_node(node)
a8248bb7 5737 if (!alloc_mem_cgroup_per_node_info(memcg, node))
0b8f73e1 5738 goto fail;
f64c3f54 5739
0b8f73e1
JW
5740 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5741 goto fail;
28dbc4b6 5742
f7e1cb6e 5743 INIT_WORK(&memcg->high_work, high_work_func);
d142e3e6 5744 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
5745 mutex_init(&memcg->thresholds_lock);
5746 spin_lock_init(&memcg->move_lock);
70ddf637 5747 vmpressure_init(&memcg->vmpressure);
fba94807
TH
5748 INIT_LIST_HEAD(&memcg->event_list);
5749 spin_lock_init(&memcg->event_list_lock);
d886f4e4 5750 memcg->socket_pressure = jiffies;
84c07d11 5751#ifdef CONFIG_MEMCG_KMEM
900a38f0 5752 memcg->kmemcg_id = -1;
bf4f0599 5753 INIT_LIST_HEAD(&memcg->objcg_list);
900a38f0 5754#endif
52ebea74
TH
5755#ifdef CONFIG_CGROUP_WRITEBACK
5756 INIT_LIST_HEAD(&memcg->cgwb_list);
97b27821
TH
5757 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5758 memcg->cgwb_frn[i].done =
5759 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
87eaceb3
YS
5760#endif
5761#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5762 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5763 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5764 memcg->deferred_split_queue.split_queue_len = 0;
52ebea74 5765#endif
ec1c86b2 5766 lru_gen_init_memcg(memcg);
0b8f73e1
JW
5767 return memcg;
5768fail:
7e97de0b 5769 mem_cgroup_id_remove(memcg);
40e952f9 5770 __mem_cgroup_free(memcg);
11d67612 5771 return ERR_PTR(error);
d142e3e6
GC
5772}
5773
0b8f73e1
JW
5774static struct cgroup_subsys_state * __ref
5775mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
d142e3e6 5776{
0b8f73e1 5777 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
b87d8cef 5778 struct mem_cgroup *memcg, *old_memcg;
d142e3e6 5779
b87d8cef 5780 old_memcg = set_active_memcg(parent);
9cee7e8e 5781 memcg = mem_cgroup_alloc(parent);
b87d8cef 5782 set_active_memcg(old_memcg);
11d67612
YS
5783 if (IS_ERR(memcg))
5784 return ERR_CAST(memcg);
d142e3e6 5785
d1663a90 5786 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5787 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
f4840ccf
JW
5788#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5789 memcg->zswap_max = PAGE_COUNTER_MAX;
501a06fe
NP
5790 WRITE_ONCE(memcg->zswap_writeback,
5791 !parent || READ_ONCE(parent->zswap_writeback));
f4840ccf 5792#endif
4b82ab4f 5793 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
0b8f73e1 5794 if (parent) {
82b3aa26 5795 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
17c56de6 5796 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
bef8620c 5797
3e32cb2e 5798 page_counter_init(&memcg->memory, &parent->memory);
37e84351 5799 page_counter_init(&memcg->swap, &parent->swap);
3e32cb2e 5800 page_counter_init(&memcg->kmem, &parent->kmem);
0db15298 5801 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
18f59ea7 5802 } else {
ff48c71c 5803 init_memcg_stats();
8278f1c7 5804 init_memcg_events();
bef8620c
RG
5805 page_counter_init(&memcg->memory, NULL);
5806 page_counter_init(&memcg->swap, NULL);
5807 page_counter_init(&memcg->kmem, NULL);
5808 page_counter_init(&memcg->tcpmem, NULL);
d6441637 5809
0b8f73e1
JW
5810 root_mem_cgroup = memcg;
5811 return &memcg->css;
5812 }
5813
f7e1cb6e 5814 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5815 static_branch_inc(&memcg_sockets_enabled_key);
f7e1cb6e 5816
b6c1a8af
YS
5817#if defined(CONFIG_MEMCG_KMEM)
5818 if (!cgroup_memory_nobpf)
5819 static_branch_inc(&memcg_bpf_enabled_key);
5820#endif
5821
0b8f73e1 5822 return &memcg->css;
0b8f73e1
JW
5823}
5824
73f576c0 5825static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
0b8f73e1 5826{
58fa2a55
VD
5827 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5828
da0efe30
MS
5829 if (memcg_online_kmem(memcg))
5830 goto remove_id;
5831
0a4465d3 5832 /*
e4262c4f 5833 * A memcg must be visible for expand_shrinker_info()
0a4465d3
KT
5834 * by the time the maps are allocated. So, we allocate maps
5835 * here, when for_each_mem_cgroup() can't skip it.
5836 */
da0efe30
MS
5837 if (alloc_shrinker_info(memcg))
5838 goto offline_kmem;
0a4465d3 5839
13ef7424 5840 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
aa48e47e 5841 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
396faf88 5842 FLUSH_TIME);
e4dde56c 5843 lru_gen_online_memcg(memcg);
6f0df8e1
JW
5844
5845 /* Online state pins memcg ID, memcg ID pins CSS */
5846 refcount_set(&memcg->id.ref, 1);
5847 css_get(css);
5848
5849 /*
5850 * Ensure mem_cgroup_from_id() works once we're fully online.
5851 *
5852 * We could do this earlier and require callers to filter with
5853 * css_tryget_online(). But right now there are no users that
5854 * need earlier access, and the workingset code relies on the
5855 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5856 * publish it here at the end of onlining. This matches the
5857 * regular ID destruction during offlining.
5858 */
5859 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5860
2f7dd7a4 5861 return 0;
da0efe30
MS
5862offline_kmem:
5863 memcg_offline_kmem(memcg);
5864remove_id:
5865 mem_cgroup_id_remove(memcg);
5866 return -ENOMEM;
8cdea7c0
BS
5867}
5868
eb95419b 5869static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 5870{
eb95419b 5871 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5872 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
5873
5874 /*
5875 * Unregister events and notify userspace.
5876 * Notify userspace about cgroup removing only after rmdir of cgroup
5877 * directory to avoid race between userspace and kernelspace.
5878 */
4ba9515d 5879 spin_lock_irq(&memcg->event_list_lock);
fba94807 5880 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
5881 list_del_init(&event->list);
5882 schedule_work(&event->remove);
5883 }
4ba9515d 5884 spin_unlock_irq(&memcg->event_list_lock);
ec64f515 5885
bf8d5d52 5886 page_counter_set_min(&memcg->memory, 0);
23067153 5887 page_counter_set_low(&memcg->memory, 0);
63677c74 5888
a65b0e76
DC
5889 zswap_memcg_offline_cleanup(memcg);
5890
567e9ab2 5891 memcg_offline_kmem(memcg);
a178015c 5892 reparent_shrinker_deferred(memcg);
52ebea74 5893 wb_memcg_offline(memcg);
e4dde56c 5894 lru_gen_offline_memcg(memcg);
73f576c0 5895
591edfb1
RG
5896 drain_all_stock(memcg);
5897
73f576c0 5898 mem_cgroup_id_put(memcg);
df878fb0
KH
5899}
5900
6df38689
VD
5901static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5902{
5903 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5904
5905 invalidate_reclaim_iterators(memcg);
e4dde56c 5906 lru_gen_release_memcg(memcg);
6df38689
VD
5907}
5908
eb95419b 5909static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 5910{
eb95419b 5911 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
97b27821 5912 int __maybe_unused i;
c268e994 5913
97b27821
TH
5914#ifdef CONFIG_CGROUP_WRITEBACK
5915 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5916 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5917#endif
f7e1cb6e 5918 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5919 static_branch_dec(&memcg_sockets_enabled_key);
127424c8 5920
0db15298 5921 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
d55f90bf 5922 static_branch_dec(&memcg_sockets_enabled_key);
3893e302 5923
b6c1a8af
YS
5924#if defined(CONFIG_MEMCG_KMEM)
5925 if (!cgroup_memory_nobpf)
5926 static_branch_dec(&memcg_bpf_enabled_key);
5927#endif
5928
0b8f73e1
JW
5929 vmpressure_cleanup(&memcg->vmpressure);
5930 cancel_work_sync(&memcg->high_work);
5931 mem_cgroup_remove_from_trees(memcg);
e4262c4f 5932 free_shrinker_info(memcg);
0b8f73e1 5933 mem_cgroup_free(memcg);
8cdea7c0
BS
5934}
5935
1ced953b
TH
5936/**
5937 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5938 * @css: the target css
5939 *
5940 * Reset the states of the mem_cgroup associated with @css. This is
5941 * invoked when the userland requests disabling on the default hierarchy
5942 * but the memcg is pinned through dependency. The memcg should stop
5943 * applying policies and should revert to the vanilla state as it may be
5944 * made visible again.
5945 *
5946 * The current implementation only resets the essential configurations.
5947 * This needs to be expanded to cover all the visible parts.
5948 */
5949static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5950{
5951 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5952
bbec2e15
RG
5953 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5954 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
bbec2e15
RG
5955 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5956 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
bf8d5d52 5957 page_counter_set_min(&memcg->memory, 0);
23067153 5958 page_counter_set_low(&memcg->memory, 0);
d1663a90 5959 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5960 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
4b82ab4f 5961 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
2529bb3a 5962 memcg_wb_domain_size_changed(memcg);
1ced953b
TH
5963}
5964
2d146aa3
JW
5965static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5966{
5967 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5968 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5969 struct memcg_vmstats_percpu *statc;
f82e6bf9 5970 long delta, delta_cpu, v;
7e1c0d6f 5971 int i, nid;
2d146aa3
JW
5972
5973 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5974
ff48c71c 5975 for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) {
2d146aa3
JW
5976 /*
5977 * Collect the aggregated propagation counts of groups
5978 * below us. We're in a per-cpu loop here and this is
5979 * a global counter, so the first cycle will get them.
5980 */
410f8e82 5981 delta = memcg->vmstats->state_pending[i];
2d146aa3 5982 if (delta)
410f8e82 5983 memcg->vmstats->state_pending[i] = 0;
2d146aa3
JW
5984
5985 /* Add CPU changes on this level since the last flush */
f82e6bf9 5986 delta_cpu = 0;
2d146aa3
JW
5987 v = READ_ONCE(statc->state[i]);
5988 if (v != statc->state_prev[i]) {
f82e6bf9
YA
5989 delta_cpu = v - statc->state_prev[i];
5990 delta += delta_cpu;
2d146aa3
JW
5991 statc->state_prev[i] = v;
5992 }
5993
2d146aa3 5994 /* Aggregate counts on this level and propagate upwards */
f82e6bf9
YA
5995 if (delta_cpu)
5996 memcg->vmstats->state_local[i] += delta_cpu;
5997
5998 if (delta) {
5999 memcg->vmstats->state[i] += delta;
6000 if (parent)
6001 parent->vmstats->state_pending[i] += delta;
6002 }
2d146aa3
JW
6003 }
6004
8278f1c7 6005 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
410f8e82 6006 delta = memcg->vmstats->events_pending[i];
2d146aa3 6007 if (delta)
410f8e82 6008 memcg->vmstats->events_pending[i] = 0;
2d146aa3 6009
f82e6bf9 6010 delta_cpu = 0;
2d146aa3
JW
6011 v = READ_ONCE(statc->events[i]);
6012 if (v != statc->events_prev[i]) {
f82e6bf9
YA
6013 delta_cpu = v - statc->events_prev[i];
6014 delta += delta_cpu;
2d146aa3
JW
6015 statc->events_prev[i] = v;
6016 }
6017
f82e6bf9
YA
6018 if (delta_cpu)
6019 memcg->vmstats->events_local[i] += delta_cpu;
2d146aa3 6020
f82e6bf9
YA
6021 if (delta) {
6022 memcg->vmstats->events[i] += delta;
6023 if (parent)
6024 parent->vmstats->events_pending[i] += delta;
6025 }
2d146aa3 6026 }
7e1c0d6f
SB
6027
6028 for_each_node_state(nid, N_MEMORY) {
6029 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
70a64b79
SB
6030 struct lruvec_stats *lstats = pn->lruvec_stats;
6031 struct lruvec_stats *plstats = NULL;
7e1c0d6f
SB
6032 struct lruvec_stats_percpu *lstatc;
6033
6034 if (parent)
70a64b79 6035 plstats = parent->nodeinfo[nid]->lruvec_stats;
7e1c0d6f
SB
6036
6037 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
6038
ff48c71c 6039 for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) {
70a64b79 6040 delta = lstats->state_pending[i];
7e1c0d6f 6041 if (delta)
70a64b79 6042 lstats->state_pending[i] = 0;
7e1c0d6f 6043
f82e6bf9 6044 delta_cpu = 0;
7e1c0d6f
SB
6045 v = READ_ONCE(lstatc->state[i]);
6046 if (v != lstatc->state_prev[i]) {
f82e6bf9
YA
6047 delta_cpu = v - lstatc->state_prev[i];
6048 delta += delta_cpu;
7e1c0d6f
SB
6049 lstatc->state_prev[i] = v;
6050 }
6051
f82e6bf9 6052 if (delta_cpu)
70a64b79 6053 lstats->state_local[i] += delta_cpu;
7e1c0d6f 6054
f82e6bf9 6055 if (delta) {
70a64b79
SB
6056 lstats->state[i] += delta;
6057 if (plstats)
6058 plstats->state_pending[i] += delta;
f82e6bf9 6059 }
7e1c0d6f
SB
6060 }
6061 }
78ec6f9d 6062 WRITE_ONCE(statc->stats_updates, 0);
8d59d221
YA
6063 /* We are in a per-cpu loop here, only do the atomic write once */
6064 if (atomic64_read(&memcg->vmstats->stats_updates))
6065 atomic64_set(&memcg->vmstats->stats_updates, 0);
2d146aa3
JW
6066}
6067
02491447 6068#ifdef CONFIG_MMU
7dc74be0 6069/* Handlers for move charge at task migration. */
854ffa8d 6070static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 6071{
05b84301 6072 int ret;
9476db97 6073
d0164adc
MG
6074 /* Try a single bulk charge without reclaim first, kswapd may wake */
6075 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
9476db97 6076 if (!ret) {
854ffa8d 6077 mc.precharge += count;
854ffa8d
DN
6078 return ret;
6079 }
9476db97 6080
3674534b 6081 /* Try charges one by one with reclaim, but do not retry */
854ffa8d 6082 while (count--) {
3674534b 6083 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
38c5d72f 6084 if (ret)
38c5d72f 6085 return ret;
854ffa8d 6086 mc.precharge++;
9476db97 6087 cond_resched();
854ffa8d 6088 }
9476db97 6089 return 0;
4ffef5fe
DN
6090}
6091
4ffef5fe 6092union mc_target {
b46777da 6093 struct folio *folio;
02491447 6094 swp_entry_t ent;
4ffef5fe
DN
6095};
6096
4ffef5fe 6097enum mc_target_type {
8d32ff84 6098 MC_TARGET_NONE = 0,
4ffef5fe 6099 MC_TARGET_PAGE,
02491447 6100 MC_TARGET_SWAP,
c733a828 6101 MC_TARGET_DEVICE,
4ffef5fe
DN
6102};
6103
90254a65
DN
6104static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6105 unsigned long addr, pte_t ptent)
4ffef5fe 6106{
25b2995a 6107 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 6108
58f341f7 6109 if (!page)
90254a65
DN
6110 return NULL;
6111 if (PageAnon(page)) {
1dfab5ab 6112 if (!(mc.flags & MOVE_ANON))
90254a65 6113 return NULL;
1dfab5ab
JW
6114 } else {
6115 if (!(mc.flags & MOVE_FILE))
6116 return NULL;
6117 }
58f341f7 6118 get_page(page);
90254a65
DN
6119
6120 return page;
6121}
6122
c733a828 6123#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
90254a65 6124static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 6125 pte_t ptent, swp_entry_t *entry)
90254a65 6126{
90254a65
DN
6127 struct page *page = NULL;
6128 swp_entry_t ent = pte_to_swp_entry(ptent);
6129
9a137153 6130 if (!(mc.flags & MOVE_ANON))
90254a65 6131 return NULL;
c733a828
JG
6132
6133 /*
27674ef6
CH
6134 * Handle device private pages that are not accessible by the CPU, but
6135 * stored as special swap entries in the page table.
c733a828
JG
6136 */
6137 if (is_device_private_entry(ent)) {
af5cdaf8 6138 page = pfn_swap_entry_to_page(ent);
27674ef6 6139 if (!get_page_unless_zero(page))
c733a828
JG
6140 return NULL;
6141 return page;
6142 }
6143
9a137153
RC
6144 if (non_swap_entry(ent))
6145 return NULL;
6146
4b91355e 6147 /*
cb691e2f 6148 * Because swap_cache_get_folio() updates some statistics counter,
4b91355e
KH
6149 * we call find_get_page() with swapper_space directly.
6150 */
f6ab1f7f 6151 page = find_get_page(swap_address_space(ent), swp_offset(ent));
2d1c4980 6152 entry->val = ent.val;
90254a65
DN
6153
6154 return page;
6155}
4b91355e
KH
6156#else
6157static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 6158 pte_t ptent, swp_entry_t *entry)
4b91355e
KH
6159{
6160 return NULL;
6161}
6162#endif
90254a65 6163
87946a72 6164static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
48384b0b 6165 unsigned long addr, pte_t ptent)
87946a72 6166{
524984ff
MWO
6167 unsigned long index;
6168 struct folio *folio;
6169
87946a72
DN
6170 if (!vma->vm_file) /* anonymous vma */
6171 return NULL;
1dfab5ab 6172 if (!(mc.flags & MOVE_FILE))
87946a72
DN
6173 return NULL;
6174
524984ff 6175 /* folio is moved even if it's not RSS of this task(page-faulted). */
aa3b1895 6176 /* shmem/tmpfs may report page out on swap: account for that too. */
524984ff
MWO
6177 index = linear_page_index(vma, addr);
6178 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
66dabbb6 6179 if (IS_ERR(folio))
524984ff
MWO
6180 return NULL;
6181 return folio_file_page(folio, index);
87946a72
DN
6182}
6183
b1b0deab 6184/**
b267e1a3
MWO
6185 * mem_cgroup_move_account - move account of the folio
6186 * @folio: The folio.
25843c2b 6187 * @compound: charge the page as compound or small page
b267e1a3
MWO
6188 * @from: mem_cgroup which the folio is moved from.
6189 * @to: mem_cgroup which the folio is moved to. @from != @to.
b1b0deab 6190 *
b267e1a3 6191 * The folio must be locked and not on the LRU.
b1b0deab
CG
6192 *
6193 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
6194 * from old cgroup.
6195 */
b267e1a3 6196static int mem_cgroup_move_account(struct folio *folio,
f627c2f5 6197 bool compound,
b1b0deab
CG
6198 struct mem_cgroup *from,
6199 struct mem_cgroup *to)
6200{
ae8af438
KK
6201 struct lruvec *from_vec, *to_vec;
6202 struct pglist_data *pgdat;
fcce4672 6203 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
8e88bd2d 6204 int nid, ret;
b1b0deab
CG
6205
6206 VM_BUG_ON(from == to);
4e0cf05f 6207 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
fcce4672 6208 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
9c325215 6209 VM_BUG_ON(compound && !folio_test_large(folio));
b1b0deab 6210
b1b0deab 6211 ret = -EINVAL;
fcce4672 6212 if (folio_memcg(folio) != from)
4e0cf05f 6213 goto out;
b1b0deab 6214
fcce4672 6215 pgdat = folio_pgdat(folio);
867e5e1d
JW
6216 from_vec = mem_cgroup_lruvec(from, pgdat);
6217 to_vec = mem_cgroup_lruvec(to, pgdat);
ae8af438 6218
fcce4672 6219 folio_memcg_lock(folio);
b1b0deab 6220
fcce4672
MWO
6221 if (folio_test_anon(folio)) {
6222 if (folio_mapped(folio)) {
be5d0a74
JW
6223 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6224 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6199277b 6225 if (folio_test_pmd_mappable(folio)) {
69473e5d
MS
6226 __mod_lruvec_state(from_vec, NR_ANON_THPS,
6227 -nr_pages);
6228 __mod_lruvec_state(to_vec, NR_ANON_THPS,
6229 nr_pages);
468c3982 6230 }
be5d0a74
JW
6231 }
6232 } else {
0d1c2072
JW
6233 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6234 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6235
fcce4672 6236 if (folio_test_swapbacked(folio)) {
0d1c2072
JW
6237 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6238 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6239 }
6240
fcce4672 6241 if (folio_mapped(folio)) {
49e50d27
JW
6242 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6243 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6244 }
b1b0deab 6245
fcce4672
MWO
6246 if (folio_test_dirty(folio)) {
6247 struct address_space *mapping = folio_mapping(folio);
c4843a75 6248
f56753ac 6249 if (mapping_can_writeback(mapping)) {
49e50d27
JW
6250 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6251 -nr_pages);
6252 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6253 nr_pages);
6254 }
c4843a75
GT
6255 }
6256 }
6257
c449deb2
HD
6258#ifdef CONFIG_SWAP
6259 if (folio_test_swapcache(folio)) {
6260 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6261 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6262 }
6263#endif
fcce4672 6264 if (folio_test_writeback(folio)) {
ae8af438
KK
6265 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6266 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
b1b0deab
CG
6267 }
6268
6269 /*
abb242f5
JW
6270 * All state has been migrated, let's switch to the new memcg.
6271 *
bcfe06bf 6272 * It is safe to change page's memcg here because the page
abb242f5
JW
6273 * is referenced, charged, isolated, and locked: we can't race
6274 * with (un)charging, migration, LRU putback, or anything else
bcfe06bf 6275 * that would rely on a stable page's memory cgroup.
abb242f5 6276 *
6c77b607 6277 * Note that folio_memcg_lock is a memcg lock, not a page lock,
bcfe06bf 6278 * to save space. As soon as we switch page's memory cgroup to a
abb242f5
JW
6279 * new memcg that isn't locked, the above state can change
6280 * concurrently again. Make sure we're truly done with it.
b1b0deab 6281 */
abb242f5 6282 smp_mb();
b1b0deab 6283
1a3e1f40
JW
6284 css_get(&to->css);
6285 css_put(&from->css);
6286
fcce4672 6287 folio->memcg_data = (unsigned long)to;
87eaceb3 6288
f70ad448 6289 __folio_memcg_unlock(from);
b1b0deab
CG
6290
6291 ret = 0;
fcce4672 6292 nid = folio_nid(folio);
b1b0deab
CG
6293
6294 local_irq_disable();
6e0110c2 6295 mem_cgroup_charge_statistics(to, nr_pages);
8e88bd2d 6296 memcg_check_events(to, nid);
6e0110c2 6297 mem_cgroup_charge_statistics(from, -nr_pages);
8e88bd2d 6298 memcg_check_events(from, nid);
b1b0deab 6299 local_irq_enable();
b1b0deab
CG
6300out:
6301 return ret;
6302}
6303
7cf7806c
LR
6304/**
6305 * get_mctgt_type - get target type of moving charge
6306 * @vma: the vma the pte to be checked belongs
6307 * @addr: the address corresponding to the pte to be checked
6308 * @ptent: the pte to be checked
6309 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6310 *
853f62a3
MWO
6311 * Context: Called with pte lock held.
6312 * Return:
6313 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6314 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
b46777da 6315 * move charge. If @target is not NULL, the folio is stored in target->folio
853f62a3
MWO
6316 * with extra refcnt taken (Caller should release it).
6317 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6318 * target for charge migration. If @target is not NULL, the entry is
6319 * stored in target->ent.
6320 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6321 * thus not on the lru. For now such page is charged like a regular page
6322 * would be as it is just special memory taking the place of a regular page.
6323 * See Documentations/vm/hmm.txt and include/linux/hmm.h
7cf7806c 6324 */
8d32ff84 6325static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
6326 unsigned long addr, pte_t ptent, union mc_target *target)
6327{
6328 struct page *page = NULL;
b67fa6e4 6329 struct folio *folio;
8d32ff84 6330 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
6331 swp_entry_t ent = { .val = 0 };
6332
6333 if (pte_present(ptent))
6334 page = mc_handle_present_pte(vma, addr, ptent);
5c041f5d
PX
6335 else if (pte_none_mostly(ptent))
6336 /*
6337 * PTE markers should be treated as a none pte here, separated
6338 * from other swap handling below.
6339 */
6340 page = mc_handle_file_pte(vma, addr, ptent);
90254a65 6341 else if (is_swap_pte(ptent))
48406ef8 6342 page = mc_handle_swap_pte(vma, ptent, &ent);
90254a65 6343
b67fa6e4
MWO
6344 if (page)
6345 folio = page_folio(page);
4e0cf05f 6346 if (target && page) {
b67fa6e4
MWO
6347 if (!folio_trylock(folio)) {
6348 folio_put(folio);
4e0cf05f
JW
6349 return ret;
6350 }
6351 /*
6352 * page_mapped() must be stable during the move. This
6353 * pte is locked, so if it's present, the page cannot
6354 * become unmapped. If it isn't, we have only partial
6355 * control over the mapped state: the page lock will
6356 * prevent new faults against pagecache and swapcache,
6357 * so an unmapped page cannot become mapped. However,
6358 * if the page is already mapped elsewhere, it can
6359 * unmap, and there is nothing we can do about it.
6360 * Alas, skip moving the page in this case.
6361 */
6362 if (!pte_present(ptent) && page_mapped(page)) {
b67fa6e4
MWO
6363 folio_unlock(folio);
6364 folio_put(folio);
4e0cf05f
JW
6365 return ret;
6366 }
6367 }
6368
90254a65 6369 if (!page && !ent.val)
8d32ff84 6370 return ret;
02491447 6371 if (page) {
02491447 6372 /*
0a31bc97 6373 * Do only loose check w/o serialization.
1306a85a 6374 * mem_cgroup_move_account() checks the page is valid or
0a31bc97 6375 * not under LRU exclusion.
02491447 6376 */
b67fa6e4 6377 if (folio_memcg(folio) == mc.from) {
02491447 6378 ret = MC_TARGET_PAGE;
b67fa6e4
MWO
6379 if (folio_is_device_private(folio) ||
6380 folio_is_device_coherent(folio))
c733a828 6381 ret = MC_TARGET_DEVICE;
02491447 6382 if (target)
b67fa6e4 6383 target->folio = folio;
02491447 6384 }
4e0cf05f
JW
6385 if (!ret || !target) {
6386 if (target)
b67fa6e4
MWO
6387 folio_unlock(folio);
6388 folio_put(folio);
4e0cf05f 6389 }
02491447 6390 }
3e14a57b
HY
6391 /*
6392 * There is a swap entry and a page doesn't exist or isn't charged.
6393 * But we cannot move a tail-page in a THP.
6394 */
6395 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
34c00c31 6396 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
6397 ret = MC_TARGET_SWAP;
6398 if (target)
6399 target->ent = ent;
4ffef5fe 6400 }
4ffef5fe
DN
6401 return ret;
6402}
6403
12724850
NH
6404#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6405/*
d6810d73
HY
6406 * We don't consider PMD mapped swapping or file mapped pages because THP does
6407 * not support them for now.
12724850
NH
6408 * Caller should make sure that pmd_trans_huge(pmd) is true.
6409 */
6410static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6411 unsigned long addr, pmd_t pmd, union mc_target *target)
6412{
6413 struct page *page = NULL;
f6c7590b 6414 struct folio *folio;
12724850
NH
6415 enum mc_target_type ret = MC_TARGET_NONE;
6416
84c3fc4e
ZY
6417 if (unlikely(is_swap_pmd(pmd))) {
6418 VM_BUG_ON(thp_migration_supported() &&
6419 !is_pmd_migration_entry(pmd));
6420 return ret;
6421 }
12724850 6422 page = pmd_page(pmd);
309381fe 6423 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
f6c7590b 6424 folio = page_folio(page);
1dfab5ab 6425 if (!(mc.flags & MOVE_ANON))
12724850 6426 return ret;
f6c7590b 6427 if (folio_memcg(folio) == mc.from) {
12724850
NH
6428 ret = MC_TARGET_PAGE;
6429 if (target) {
f6c7590b
MWO
6430 folio_get(folio);
6431 if (!folio_trylock(folio)) {
6432 folio_put(folio);
4e0cf05f
JW
6433 return MC_TARGET_NONE;
6434 }
f6c7590b 6435 target->folio = folio;
12724850
NH
6436 }
6437 }
6438 return ret;
6439}
6440#else
6441static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6442 unsigned long addr, pmd_t pmd, union mc_target *target)
6443{
6444 return MC_TARGET_NONE;
6445}
6446#endif
6447
4ffef5fe
DN
6448static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6449 unsigned long addr, unsigned long end,
6450 struct mm_walk *walk)
6451{
26bcd64a 6452 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6453 pte_t *pte;
6454 spinlock_t *ptl;
6455
b6ec57f4
KS
6456 ptl = pmd_trans_huge_lock(pmd, vma);
6457 if (ptl) {
c733a828
JG
6458 /*
6459 * Note their can not be MC_TARGET_DEVICE for now as we do not
25b2995a
CH
6460 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6461 * this might change.
c733a828 6462 */
12724850
NH
6463 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6464 mc.precharge += HPAGE_PMD_NR;
bf929152 6465 spin_unlock(ptl);
1a5a9906 6466 return 0;
12724850 6467 }
03319327 6468
4ffef5fe 6469 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
04dee9e8
HD
6470 if (!pte)
6471 return 0;
4ffef5fe 6472 for (; addr != end; pte++, addr += PAGE_SIZE)
c33c7948 6473 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
4ffef5fe
DN
6474 mc.precharge++; /* increment precharge temporarily */
6475 pte_unmap_unlock(pte - 1, ptl);
6476 cond_resched();
6477
7dc74be0
DN
6478 return 0;
6479}
6480
7b86ac33
CH
6481static const struct mm_walk_ops precharge_walk_ops = {
6482 .pmd_entry = mem_cgroup_count_precharge_pte_range,
49b06385 6483 .walk_lock = PGWALK_RDLOCK,
7b86ac33
CH
6484};
6485
4ffef5fe
DN
6486static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6487{
6488 unsigned long precharge;
4ffef5fe 6489
d8ed45c5 6490 mmap_read_lock(mm);
ba0aff8e 6491 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
d8ed45c5 6492 mmap_read_unlock(mm);
4ffef5fe
DN
6493
6494 precharge = mc.precharge;
6495 mc.precharge = 0;
6496
6497 return precharge;
6498}
6499
4ffef5fe
DN
6500static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6501{
dfe076b0
DN
6502 unsigned long precharge = mem_cgroup_count_precharge(mm);
6503
6504 VM_BUG_ON(mc.moving_task);
6505 mc.moving_task = current;
6506 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
6507}
6508
dfe076b0
DN
6509/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6510static void __mem_cgroup_clear_mc(void)
4ffef5fe 6511{
2bd9bb20
KH
6512 struct mem_cgroup *from = mc.from;
6513 struct mem_cgroup *to = mc.to;
6514
4ffef5fe 6515 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 6516 if (mc.precharge) {
4b569387 6517 mem_cgroup_cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
6518 mc.precharge = 0;
6519 }
6520 /*
6521 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6522 * we must uncharge here.
6523 */
6524 if (mc.moved_charge) {
4b569387 6525 mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
854ffa8d 6526 mc.moved_charge = 0;
4ffef5fe 6527 }
483c30b5
DN
6528 /* we must fixup refcnts and charges */
6529 if (mc.moved_swap) {
483c30b5 6530 /* uncharge swap account from the old cgroup */
ce00a967 6531 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 6532 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 6533
615d66c3
VD
6534 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6535
05b84301 6536 /*
3e32cb2e
JW
6537 * we charged both to->memory and to->memsw, so we
6538 * should uncharge to->memory.
05b84301 6539 */
ce00a967 6540 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
6541 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6542
483c30b5
DN
6543 mc.moved_swap = 0;
6544 }
dfe076b0
DN
6545 memcg_oom_recover(from);
6546 memcg_oom_recover(to);
6547 wake_up_all(&mc.waitq);
6548}
6549
6550static void mem_cgroup_clear_mc(void)
6551{
264a0ae1
TH
6552 struct mm_struct *mm = mc.mm;
6553
dfe076b0
DN
6554 /*
6555 * we must clear moving_task before waking up waiters at the end of
6556 * task migration.
6557 */
6558 mc.moving_task = NULL;
6559 __mem_cgroup_clear_mc();
2bd9bb20 6560 spin_lock(&mc.lock);
4ffef5fe
DN
6561 mc.from = NULL;
6562 mc.to = NULL;
264a0ae1 6563 mc.mm = NULL;
2bd9bb20 6564 spin_unlock(&mc.lock);
264a0ae1
TH
6565
6566 mmput(mm);
4ffef5fe
DN
6567}
6568
1f7dd3e5 6569static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
7dc74be0 6570{
1f7dd3e5 6571 struct cgroup_subsys_state *css;
eed67d75 6572 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
9f2115f9 6573 struct mem_cgroup *from;
4530eddb 6574 struct task_struct *leader, *p;
9f2115f9 6575 struct mm_struct *mm;
1dfab5ab 6576 unsigned long move_flags;
9f2115f9 6577 int ret = 0;
7dc74be0 6578
1f7dd3e5
TH
6579 /* charge immigration isn't supported on the default hierarchy */
6580 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
9f2115f9
TH
6581 return 0;
6582
4530eddb
TH
6583 /*
6584 * Multi-process migrations only happen on the default hierarchy
6585 * where charge immigration is not used. Perform charge
6586 * immigration if @tset contains a leader and whine if there are
6587 * multiple.
6588 */
6589 p = NULL;
1f7dd3e5 6590 cgroup_taskset_for_each_leader(leader, css, tset) {
4530eddb
TH
6591 WARN_ON_ONCE(p);
6592 p = leader;
1f7dd3e5 6593 memcg = mem_cgroup_from_css(css);
4530eddb
TH
6594 }
6595 if (!p)
6596 return 0;
6597
1f7dd3e5 6598 /*
f0953a1b 6599 * We are now committed to this value whatever it is. Changes in this
1f7dd3e5
TH
6600 * tunable will only affect upcoming migrations, not the current one.
6601 * So we need to save it, and keep it going.
6602 */
6603 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6604 if (!move_flags)
6605 return 0;
6606
9f2115f9
TH
6607 from = mem_cgroup_from_task(p);
6608
6609 VM_BUG_ON(from == memcg);
6610
6611 mm = get_task_mm(p);
6612 if (!mm)
6613 return 0;
6614 /* We move charges only when we move a owner of the mm */
6615 if (mm->owner == p) {
6616 VM_BUG_ON(mc.from);
6617 VM_BUG_ON(mc.to);
6618 VM_BUG_ON(mc.precharge);
6619 VM_BUG_ON(mc.moved_charge);
6620 VM_BUG_ON(mc.moved_swap);
6621
6622 spin_lock(&mc.lock);
264a0ae1 6623 mc.mm = mm;
9f2115f9
TH
6624 mc.from = from;
6625 mc.to = memcg;
6626 mc.flags = move_flags;
6627 spin_unlock(&mc.lock);
6628 /* We set mc.moving_task later */
6629
6630 ret = mem_cgroup_precharge_mc(mm);
6631 if (ret)
6632 mem_cgroup_clear_mc();
264a0ae1
TH
6633 } else {
6634 mmput(mm);
7dc74be0
DN
6635 }
6636 return ret;
6637}
6638
1f7dd3e5 6639static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
7dc74be0 6640{
4e2f245d
JW
6641 if (mc.to)
6642 mem_cgroup_clear_mc();
7dc74be0
DN
6643}
6644
4ffef5fe
DN
6645static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6646 unsigned long addr, unsigned long end,
6647 struct mm_walk *walk)
7dc74be0 6648{
4ffef5fe 6649 int ret = 0;
26bcd64a 6650 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6651 pte_t *pte;
6652 spinlock_t *ptl;
12724850
NH
6653 enum mc_target_type target_type;
6654 union mc_target target;
b267e1a3 6655 struct folio *folio;
4ffef5fe 6656
b6ec57f4
KS
6657 ptl = pmd_trans_huge_lock(pmd, vma);
6658 if (ptl) {
62ade86a 6659 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 6660 spin_unlock(ptl);
12724850
NH
6661 return 0;
6662 }
6663 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6664 if (target_type == MC_TARGET_PAGE) {
b46777da 6665 folio = target.folio;
b267e1a3
MWO
6666 if (folio_isolate_lru(folio)) {
6667 if (!mem_cgroup_move_account(folio, true,
1306a85a 6668 mc.from, mc.to)) {
12724850
NH
6669 mc.precharge -= HPAGE_PMD_NR;
6670 mc.moved_charge += HPAGE_PMD_NR;
6671 }
b267e1a3 6672 folio_putback_lru(folio);
12724850 6673 }
b267e1a3
MWO
6674 folio_unlock(folio);
6675 folio_put(folio);
c733a828 6676 } else if (target_type == MC_TARGET_DEVICE) {
b46777da 6677 folio = target.folio;
b267e1a3 6678 if (!mem_cgroup_move_account(folio, true,
c733a828
JG
6679 mc.from, mc.to)) {
6680 mc.precharge -= HPAGE_PMD_NR;
6681 mc.moved_charge += HPAGE_PMD_NR;
6682 }
b267e1a3
MWO
6683 folio_unlock(folio);
6684 folio_put(folio);
12724850 6685 }
bf929152 6686 spin_unlock(ptl);
1a5a9906 6687 return 0;
12724850
NH
6688 }
6689
4ffef5fe
DN
6690retry:
6691 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
04dee9e8
HD
6692 if (!pte)
6693 return 0;
4ffef5fe 6694 for (; addr != end; addr += PAGE_SIZE) {
c33c7948 6695 pte_t ptent = ptep_get(pte++);
c733a828 6696 bool device = false;
02491447 6697 swp_entry_t ent;
4ffef5fe
DN
6698
6699 if (!mc.precharge)
6700 break;
6701
8d32ff84 6702 switch (get_mctgt_type(vma, addr, ptent, &target)) {
c733a828
JG
6703 case MC_TARGET_DEVICE:
6704 device = true;
e4a9bc58 6705 fallthrough;
4ffef5fe 6706 case MC_TARGET_PAGE:
b46777da 6707 folio = target.folio;
53f9263b
KS
6708 /*
6709 * We can have a part of the split pmd here. Moving it
6710 * can be done but it would be too convoluted so simply
6711 * ignore such a partial THP and keep it in original
6712 * memcg. There should be somebody mapping the head.
6713 */
b267e1a3 6714 if (folio_test_large(folio))
53f9263b 6715 goto put;
b267e1a3 6716 if (!device && !folio_isolate_lru(folio))
4ffef5fe 6717 goto put;
b267e1a3 6718 if (!mem_cgroup_move_account(folio, false,
f627c2f5 6719 mc.from, mc.to)) {
4ffef5fe 6720 mc.precharge--;
854ffa8d
DN
6721 /* we uncharge from mc.from later. */
6722 mc.moved_charge++;
4ffef5fe 6723 }
c733a828 6724 if (!device)
b267e1a3 6725 folio_putback_lru(folio);
4e0cf05f 6726put: /* get_mctgt_type() gets & locks the page */
b267e1a3
MWO
6727 folio_unlock(folio);
6728 folio_put(folio);
4ffef5fe 6729 break;
02491447
DN
6730 case MC_TARGET_SWAP:
6731 ent = target.ent;
e91cbb42 6732 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 6733 mc.precharge--;
8d22a935
HD
6734 mem_cgroup_id_get_many(mc.to, 1);
6735 /* we fixup other refcnts and charges later. */
483c30b5
DN
6736 mc.moved_swap++;
6737 }
02491447 6738 break;
4ffef5fe
DN
6739 default:
6740 break;
6741 }
6742 }
6743 pte_unmap_unlock(pte - 1, ptl);
6744 cond_resched();
6745
6746 if (addr != end) {
6747 /*
6748 * We have consumed all precharges we got in can_attach().
6749 * We try charge one by one, but don't do any additional
6750 * charges to mc.to if we have failed in charge once in attach()
6751 * phase.
6752 */
854ffa8d 6753 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
6754 if (!ret)
6755 goto retry;
6756 }
6757
6758 return ret;
6759}
6760
7b86ac33
CH
6761static const struct mm_walk_ops charge_walk_ops = {
6762 .pmd_entry = mem_cgroup_move_charge_pte_range,
49b06385 6763 .walk_lock = PGWALK_RDLOCK,
7b86ac33
CH
6764};
6765
264a0ae1 6766static void mem_cgroup_move_charge(void)
4ffef5fe 6767{
4ffef5fe 6768 lru_add_drain_all();
312722cb 6769 /*
6c77b607 6770 * Signal folio_memcg_lock() to take the memcg's move_lock
81f8c3a4
JW
6771 * while we're moving its pages to another memcg. Then wait
6772 * for already started RCU-only updates to finish.
312722cb
JW
6773 */
6774 atomic_inc(&mc.from->moving_account);
6775 synchronize_rcu();
dfe076b0 6776retry:
d8ed45c5 6777 if (unlikely(!mmap_read_trylock(mc.mm))) {
dfe076b0 6778 /*
c1e8d7c6 6779 * Someone who are holding the mmap_lock might be waiting in
dfe076b0
DN
6780 * waitq. So we cancel all extra charges, wake up all waiters,
6781 * and retry. Because we cancel precharges, we might not be able
6782 * to move enough charges, but moving charge is a best-effort
6783 * feature anyway, so it wouldn't be a big problem.
6784 */
6785 __mem_cgroup_clear_mc();
6786 cond_resched();
6787 goto retry;
6788 }
26bcd64a
NH
6789 /*
6790 * When we have consumed all precharges and failed in doing
6791 * additional charge, the page walk just aborts.
6792 */
ba0aff8e 6793 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
d8ed45c5 6794 mmap_read_unlock(mc.mm);
312722cb 6795 atomic_dec(&mc.from->moving_account);
7dc74be0
DN
6796}
6797
264a0ae1 6798static void mem_cgroup_move_task(void)
67e465a7 6799{
264a0ae1
TH
6800 if (mc.to) {
6801 mem_cgroup_move_charge();
a433658c 6802 mem_cgroup_clear_mc();
264a0ae1 6803 }
67e465a7 6804}
1aacbd35 6805
5cfb80a7 6806#else /* !CONFIG_MMU */
1f7dd3e5 6807static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6808{
6809 return 0;
6810}
1f7dd3e5 6811static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6812{
6813}
264a0ae1 6814static void mem_cgroup_move_task(void)
5cfb80a7
DN
6815{
6816}
6817#endif
67e465a7 6818
1aacbd35
RG
6819#ifdef CONFIG_MEMCG_KMEM
6820static void mem_cgroup_fork(struct task_struct *task)
6821{
6822 /*
6823 * Set the update flag to cause task->objcg to be initialized lazily
6824 * on the first allocation. It can be done without any synchronization
6825 * because it's always performed on the current task, so does
6826 * current_objcg_update().
6827 */
6828 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6829}
6830
6831static void mem_cgroup_exit(struct task_struct *task)
6832{
6833 struct obj_cgroup *objcg = task->objcg;
6834
6835 objcg = (struct obj_cgroup *)
6836 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
91b71e78 6837 obj_cgroup_put(objcg);
1aacbd35
RG
6838
6839 /*
6840 * Some kernel allocations can happen after this point,
6841 * but let's ignore them. It can be done without any synchronization
6842 * because it's always performed on the current task, so does
6843 * current_objcg_update().
6844 */
6845 task->objcg = NULL;
6846}
6847#endif
6848
bd74fdae 6849#ifdef CONFIG_LRU_GEN
1aacbd35 6850static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
bd74fdae
YZ
6851{
6852 struct task_struct *task;
6853 struct cgroup_subsys_state *css;
6854
6855 /* find the first leader if there is any */
6856 cgroup_taskset_for_each_leader(task, css, tset)
6857 break;
6858
6859 if (!task)
6860 return;
6861
6862 task_lock(task);
6863 if (task->mm && READ_ONCE(task->mm->owner) == task)
6864 lru_gen_migrate_mm(task->mm);
6865 task_unlock(task);
6866}
6867#else
1aacbd35
RG
6868static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6869#endif /* CONFIG_LRU_GEN */
6870
6871#ifdef CONFIG_MEMCG_KMEM
6872static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6873{
6874 struct task_struct *task;
6875 struct cgroup_subsys_state *css;
6876
6877 cgroup_taskset_for_each(task, css, tset) {
6878 /* atomically set the update bit */
6879 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6880 }
6881}
6882#else
6883static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6884#endif /* CONFIG_MEMCG_KMEM */
6885
6886#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
bd74fdae
YZ
6887static void mem_cgroup_attach(struct cgroup_taskset *tset)
6888{
1aacbd35
RG
6889 mem_cgroup_lru_gen_attach(tset);
6890 mem_cgroup_kmem_attach(tset);
bd74fdae 6891}
1aacbd35 6892#endif
bd74fdae 6893
677dc973
CD
6894static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6895{
6896 if (value == PAGE_COUNTER_MAX)
6897 seq_puts(m, "max\n");
6898 else
6899 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6900
6901 return 0;
6902}
6903
241994ed
JW
6904static u64 memory_current_read(struct cgroup_subsys_state *css,
6905 struct cftype *cft)
6906{
f5fc3c5d
JW
6907 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6908
6909 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
241994ed
JW
6910}
6911
8e20d4b3
GR
6912static u64 memory_peak_read(struct cgroup_subsys_state *css,
6913 struct cftype *cft)
6914{
6915 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6916
6917 return (u64)memcg->memory.watermark * PAGE_SIZE;
6918}
6919
bf8d5d52
RG
6920static int memory_min_show(struct seq_file *m, void *v)
6921{
677dc973
CD
6922 return seq_puts_memcg_tunable(m,
6923 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
bf8d5d52
RG
6924}
6925
6926static ssize_t memory_min_write(struct kernfs_open_file *of,
6927 char *buf, size_t nbytes, loff_t off)
6928{
6929 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6930 unsigned long min;
6931 int err;
6932
6933 buf = strstrip(buf);
6934 err = page_counter_memparse(buf, "max", &min);
6935 if (err)
6936 return err;
6937
6938 page_counter_set_min(&memcg->memory, min);
6939
6940 return nbytes;
6941}
6942
241994ed
JW
6943static int memory_low_show(struct seq_file *m, void *v)
6944{
677dc973
CD
6945 return seq_puts_memcg_tunable(m,
6946 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
241994ed
JW
6947}
6948
6949static ssize_t memory_low_write(struct kernfs_open_file *of,
6950 char *buf, size_t nbytes, loff_t off)
6951{
6952 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6953 unsigned long low;
6954 int err;
6955
6956 buf = strstrip(buf);
d2973697 6957 err = page_counter_memparse(buf, "max", &low);
241994ed
JW
6958 if (err)
6959 return err;
6960
23067153 6961 page_counter_set_low(&memcg->memory, low);
241994ed
JW
6962
6963 return nbytes;
6964}
6965
6966static int memory_high_show(struct seq_file *m, void *v)
6967{
d1663a90
JK
6968 return seq_puts_memcg_tunable(m,
6969 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
241994ed
JW
6970}
6971
6972static ssize_t memory_high_write(struct kernfs_open_file *of,
6973 char *buf, size_t nbytes, loff_t off)
6974{
6975 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6976 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
8c8c383c 6977 bool drained = false;
241994ed
JW
6978 unsigned long high;
6979 int err;
6980
6981 buf = strstrip(buf);
d2973697 6982 err = page_counter_memparse(buf, "max", &high);
241994ed
JW
6983 if (err)
6984 return err;
6985
e82553c1
JW
6986 page_counter_set_high(&memcg->memory, high);
6987
8c8c383c
JW
6988 for (;;) {
6989 unsigned long nr_pages = page_counter_read(&memcg->memory);
6990 unsigned long reclaimed;
6991
6992 if (nr_pages <= high)
6993 break;
6994
6995 if (signal_pending(current))
6996 break;
6997
6998 if (!drained) {
6999 drain_all_stock(memcg);
7000 drained = true;
7001 continue;
7002 }
7003
7004 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
55ab834a 7005 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
8c8c383c
JW
7006
7007 if (!reclaimed && !nr_retries--)
7008 break;
7009 }
588083bb 7010
19ce33ac 7011 memcg_wb_domain_size_changed(memcg);
241994ed
JW
7012 return nbytes;
7013}
7014
7015static int memory_max_show(struct seq_file *m, void *v)
7016{
677dc973
CD
7017 return seq_puts_memcg_tunable(m,
7018 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
241994ed
JW
7019}
7020
7021static ssize_t memory_max_write(struct kernfs_open_file *of,
7022 char *buf, size_t nbytes, loff_t off)
7023{
7024 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 7025 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
b6e6edcf 7026 bool drained = false;
241994ed
JW
7027 unsigned long max;
7028 int err;
7029
7030 buf = strstrip(buf);
d2973697 7031 err = page_counter_memparse(buf, "max", &max);
241994ed
JW
7032 if (err)
7033 return err;
7034
bbec2e15 7035 xchg(&memcg->memory.max, max);
b6e6edcf
JW
7036
7037 for (;;) {
7038 unsigned long nr_pages = page_counter_read(&memcg->memory);
7039
7040 if (nr_pages <= max)
7041 break;
7042
7249c9f0 7043 if (signal_pending(current))
b6e6edcf 7044 break;
b6e6edcf
JW
7045
7046 if (!drained) {
7047 drain_all_stock(memcg);
7048 drained = true;
7049 continue;
7050 }
7051
7052 if (nr_reclaims) {
7053 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
55ab834a 7054 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
b6e6edcf
JW
7055 nr_reclaims--;
7056 continue;
7057 }
7058
e27be240 7059 memcg_memory_event(memcg, MEMCG_OOM);
b6e6edcf
JW
7060 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
7061 break;
7062 }
241994ed 7063
2529bb3a 7064 memcg_wb_domain_size_changed(memcg);
241994ed
JW
7065 return nbytes;
7066}
7067
664dc218
DR
7068/*
7069 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
7070 * if any new events become available.
7071 */
1e577f97
SB
7072static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
7073{
7074 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
7075 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
7076 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
7077 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
7078 seq_printf(m, "oom_kill %lu\n",
7079 atomic_long_read(&events[MEMCG_OOM_KILL]));
b6bf9abb
DS
7080 seq_printf(m, "oom_group_kill %lu\n",
7081 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
1e577f97
SB
7082}
7083
241994ed
JW
7084static int memory_events_show(struct seq_file *m, void *v)
7085{
aa9694bb 7086 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 7087
1e577f97
SB
7088 __memory_events_show(m, memcg->memory_events);
7089 return 0;
7090}
7091
7092static int memory_events_local_show(struct seq_file *m, void *v)
7093{
7094 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 7095
1e577f97 7096 __memory_events_show(m, memcg->memory_events_local);
241994ed
JW
7097 return 0;
7098}
7099
587d9f72
JW
7100static int memory_stat_show(struct seq_file *m, void *v)
7101{
aa9694bb 7102 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
68aaee14 7103 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
5b42360c 7104 struct seq_buf s;
1ff9e6e1 7105
c8713d0b
JW
7106 if (!buf)
7107 return -ENOMEM;
5b42360c
YA
7108 seq_buf_init(&s, buf, PAGE_SIZE);
7109 memory_stat_format(memcg, &s);
c8713d0b
JW
7110 seq_puts(m, buf);
7111 kfree(buf);
587d9f72
JW
7112 return 0;
7113}
7114
5f9a4f4a 7115#ifdef CONFIG_NUMA
fff66b79
MS
7116static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
7117 int item)
7118{
ff841a06
YA
7119 return lruvec_page_state(lruvec, item) *
7120 memcg_page_state_output_unit(item);
fff66b79
MS
7121}
7122
5f9a4f4a
MS
7123static int memory_numa_stat_show(struct seq_file *m, void *v)
7124{
7125 int i;
7126 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7127
7d7ef0a4 7128 mem_cgroup_flush_stats(memcg);
7e1c0d6f 7129
5f9a4f4a
MS
7130 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
7131 int nid;
7132
7133 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
7134 continue;
7135
7136 seq_printf(m, "%s", memory_stats[i].name);
7137 for_each_node_state(nid, N_MEMORY) {
7138 u64 size;
7139 struct lruvec *lruvec;
7140
7141 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
fff66b79
MS
7142 size = lruvec_page_state_output(lruvec,
7143 memory_stats[i].idx);
5f9a4f4a
MS
7144 seq_printf(m, " N%d=%llu", nid, size);
7145 }
7146 seq_putc(m, '\n');
7147 }
7148
7149 return 0;
7150}
7151#endif
7152
3d8b38eb
RG
7153static int memory_oom_group_show(struct seq_file *m, void *v)
7154{
aa9694bb 7155 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3d8b38eb 7156
eaf7b66b 7157 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
3d8b38eb
RG
7158
7159 return 0;
7160}
7161
7162static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
7163 char *buf, size_t nbytes, loff_t off)
7164{
7165 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7166 int ret, oom_group;
7167
7168 buf = strstrip(buf);
7169 if (!buf)
7170 return -EINVAL;
7171
7172 ret = kstrtoint(buf, 0, &oom_group);
7173 if (ret)
7174 return ret;
7175
7176 if (oom_group != 0 && oom_group != 1)
7177 return -EINVAL;
7178
eaf7b66b 7179 WRITE_ONCE(memcg->oom_group, oom_group);
3d8b38eb
RG
7180
7181 return nbytes;
7182}
7183
94968384
SB
7184static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
7185 size_t nbytes, loff_t off)
7186{
7187 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7188 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
7189 unsigned long nr_to_reclaim, nr_reclaimed = 0;
55ab834a
MH
7190 unsigned int reclaim_options;
7191 int err;
12a5d395
MA
7192
7193 buf = strstrip(buf);
55ab834a
MH
7194 err = page_counter_memparse(buf, "", &nr_to_reclaim);
7195 if (err)
7196 return err;
12a5d395 7197
55ab834a 7198 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
94968384 7199 while (nr_reclaimed < nr_to_reclaim) {
287d5fed
M
7200 /* Will converge on zero, but reclaim enforces a minimum */
7201 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
94968384
SB
7202 unsigned long reclaimed;
7203
7204 if (signal_pending(current))
7205 return -EINTR;
7206
7207 /*
7208 * This is the final attempt, drain percpu lru caches in the
7209 * hope of introducing more evictable pages for
7210 * try_to_free_mem_cgroup_pages().
7211 */
7212 if (!nr_retries)
7213 lru_add_drain_all();
7214
7215 reclaimed = try_to_free_mem_cgroup_pages(memcg,
287d5fed 7216 batch_size, GFP_KERNEL, reclaim_options);
94968384
SB
7217
7218 if (!reclaimed && !nr_retries--)
7219 return -EAGAIN;
7220
7221 nr_reclaimed += reclaimed;
7222 }
7223
7224 return nbytes;
7225}
7226
241994ed
JW
7227static struct cftype memory_files[] = {
7228 {
7229 .name = "current",
f5fc3c5d 7230 .flags = CFTYPE_NOT_ON_ROOT,
241994ed
JW
7231 .read_u64 = memory_current_read,
7232 },
8e20d4b3
GR
7233 {
7234 .name = "peak",
7235 .flags = CFTYPE_NOT_ON_ROOT,
7236 .read_u64 = memory_peak_read,
7237 },
bf8d5d52
RG
7238 {
7239 .name = "min",
7240 .flags = CFTYPE_NOT_ON_ROOT,
7241 .seq_show = memory_min_show,
7242 .write = memory_min_write,
7243 },
241994ed
JW
7244 {
7245 .name = "low",
7246 .flags = CFTYPE_NOT_ON_ROOT,
7247 .seq_show = memory_low_show,
7248 .write = memory_low_write,
7249 },
7250 {
7251 .name = "high",
7252 .flags = CFTYPE_NOT_ON_ROOT,
7253 .seq_show = memory_high_show,
7254 .write = memory_high_write,
7255 },
7256 {
7257 .name = "max",
7258 .flags = CFTYPE_NOT_ON_ROOT,
7259 .seq_show = memory_max_show,
7260 .write = memory_max_write,
7261 },
7262 {
7263 .name = "events",
7264 .flags = CFTYPE_NOT_ON_ROOT,
472912a2 7265 .file_offset = offsetof(struct mem_cgroup, events_file),
241994ed
JW
7266 .seq_show = memory_events_show,
7267 },
1e577f97
SB
7268 {
7269 .name = "events.local",
7270 .flags = CFTYPE_NOT_ON_ROOT,
7271 .file_offset = offsetof(struct mem_cgroup, events_local_file),
7272 .seq_show = memory_events_local_show,
7273 },
587d9f72
JW
7274 {
7275 .name = "stat",
587d9f72
JW
7276 .seq_show = memory_stat_show,
7277 },
5f9a4f4a
MS
7278#ifdef CONFIG_NUMA
7279 {
7280 .name = "numa_stat",
7281 .seq_show = memory_numa_stat_show,
7282 },
7283#endif
3d8b38eb
RG
7284 {
7285 .name = "oom.group",
7286 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7287 .seq_show = memory_oom_group_show,
7288 .write = memory_oom_group_write,
7289 },
94968384
SB
7290 {
7291 .name = "reclaim",
7292 .flags = CFTYPE_NS_DELEGATABLE,
7293 .write = memory_reclaim,
7294 },
241994ed
JW
7295 { } /* terminate */
7296};
7297
073219e9 7298struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 7299 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 7300 .css_online = mem_cgroup_css_online,
92fb9748 7301 .css_offline = mem_cgroup_css_offline,
6df38689 7302 .css_released = mem_cgroup_css_released,
92fb9748 7303 .css_free = mem_cgroup_css_free,
1ced953b 7304 .css_reset = mem_cgroup_css_reset,
2d146aa3 7305 .css_rstat_flush = mem_cgroup_css_rstat_flush,
7dc74be0 7306 .can_attach = mem_cgroup_can_attach,
1aacbd35 7307#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
bd74fdae 7308 .attach = mem_cgroup_attach,
1aacbd35 7309#endif
7dc74be0 7310 .cancel_attach = mem_cgroup_cancel_attach,
264a0ae1 7311 .post_attach = mem_cgroup_move_task,
1aacbd35
RG
7312#ifdef CONFIG_MEMCG_KMEM
7313 .fork = mem_cgroup_fork,
7314 .exit = mem_cgroup_exit,
7315#endif
241994ed
JW
7316 .dfl_cftypes = memory_files,
7317 .legacy_cftypes = mem_cgroup_legacy_files,
6d12e2d8 7318 .early_init = 0,
8cdea7c0 7319};
c077719b 7320
bc50bcc6
JW
7321/*
7322 * This function calculates an individual cgroup's effective
7323 * protection which is derived from its own memory.min/low, its
7324 * parent's and siblings' settings, as well as the actual memory
7325 * distribution in the tree.
7326 *
7327 * The following rules apply to the effective protection values:
7328 *
7329 * 1. At the first level of reclaim, effective protection is equal to
7330 * the declared protection in memory.min and memory.low.
7331 *
7332 * 2. To enable safe delegation of the protection configuration, at
7333 * subsequent levels the effective protection is capped to the
7334 * parent's effective protection.
7335 *
7336 * 3. To make complex and dynamic subtrees easier to configure, the
7337 * user is allowed to overcommit the declared protection at a given
7338 * level. If that is the case, the parent's effective protection is
7339 * distributed to the children in proportion to how much protection
7340 * they have declared and how much of it they are utilizing.
7341 *
7342 * This makes distribution proportional, but also work-conserving:
7343 * if one cgroup claims much more protection than it uses memory,
7344 * the unused remainder is available to its siblings.
7345 *
7346 * 4. Conversely, when the declared protection is undercommitted at a
7347 * given level, the distribution of the larger parental protection
7348 * budget is NOT proportional. A cgroup's protection from a sibling
7349 * is capped to its own memory.min/low setting.
7350 *
8a931f80
JW
7351 * 5. However, to allow protecting recursive subtrees from each other
7352 * without having to declare each individual cgroup's fixed share
7353 * of the ancestor's claim to protection, any unutilized -
7354 * "floating" - protection from up the tree is distributed in
7355 * proportion to each cgroup's *usage*. This makes the protection
7356 * neutral wrt sibling cgroups and lets them compete freely over
7357 * the shared parental protection budget, but it protects the
7358 * subtree as a whole from neighboring subtrees.
7359 *
7360 * Note that 4. and 5. are not in conflict: 4. is about protecting
7361 * against immediate siblings whereas 5. is about protecting against
7362 * neighboring subtrees.
bc50bcc6
JW
7363 */
7364static unsigned long effective_protection(unsigned long usage,
8a931f80 7365 unsigned long parent_usage,
bc50bcc6
JW
7366 unsigned long setting,
7367 unsigned long parent_effective,
7368 unsigned long siblings_protected)
7369{
7370 unsigned long protected;
8a931f80 7371 unsigned long ep;
bc50bcc6
JW
7372
7373 protected = min(usage, setting);
7374 /*
7375 * If all cgroups at this level combined claim and use more
08e0f49e 7376 * protection than what the parent affords them, distribute
bc50bcc6
JW
7377 * shares in proportion to utilization.
7378 *
7379 * We are using actual utilization rather than the statically
7380 * claimed protection in order to be work-conserving: claimed
7381 * but unused protection is available to siblings that would
7382 * otherwise get a smaller chunk than what they claimed.
7383 */
7384 if (siblings_protected > parent_effective)
7385 return protected * parent_effective / siblings_protected;
7386
7387 /*
7388 * Ok, utilized protection of all children is within what the
7389 * parent affords them, so we know whatever this child claims
7390 * and utilizes is effectively protected.
7391 *
7392 * If there is unprotected usage beyond this value, reclaim
7393 * will apply pressure in proportion to that amount.
7394 *
7395 * If there is unutilized protection, the cgroup will be fully
7396 * shielded from reclaim, but we do return a smaller value for
7397 * protection than what the group could enjoy in theory. This
7398 * is okay. With the overcommit distribution above, effective
7399 * protection is always dependent on how memory is actually
7400 * consumed among the siblings anyway.
7401 */
8a931f80
JW
7402 ep = protected;
7403
7404 /*
7405 * If the children aren't claiming (all of) the protection
7406 * afforded to them by the parent, distribute the remainder in
7407 * proportion to the (unprotected) memory of each cgroup. That
7408 * way, cgroups that aren't explicitly prioritized wrt each
7409 * other compete freely over the allowance, but they are
7410 * collectively protected from neighboring trees.
7411 *
7412 * We're using unprotected memory for the weight so that if
7413 * some cgroups DO claim explicit protection, we don't protect
7414 * the same bytes twice.
cd324edc
JW
7415 *
7416 * Check both usage and parent_usage against the respective
7417 * protected values. One should imply the other, but they
7418 * aren't read atomically - make sure the division is sane.
8a931f80
JW
7419 */
7420 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7421 return ep;
cd324edc
JW
7422 if (parent_effective > siblings_protected &&
7423 parent_usage > siblings_protected &&
7424 usage > protected) {
8a931f80
JW
7425 unsigned long unclaimed;
7426
7427 unclaimed = parent_effective - siblings_protected;
7428 unclaimed *= usage - protected;
7429 unclaimed /= parent_usage - siblings_protected;
7430
7431 ep += unclaimed;
7432 }
7433
7434 return ep;
bc50bcc6
JW
7435}
7436
241994ed 7437/**
05395718 7438 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
34c81057 7439 * @root: the top ancestor of the sub-tree being checked
241994ed
JW
7440 * @memcg: the memory cgroup to check
7441 *
23067153
RG
7442 * WARNING: This function is not stateless! It can only be used as part
7443 * of a top-down tree iteration, not for isolated queries.
241994ed 7444 */
45c7f7e1
CD
7445void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7446 struct mem_cgroup *memcg)
241994ed 7447{
8a931f80 7448 unsigned long usage, parent_usage;
23067153
RG
7449 struct mem_cgroup *parent;
7450
241994ed 7451 if (mem_cgroup_disabled())
45c7f7e1 7452 return;
241994ed 7453
34c81057
SC
7454 if (!root)
7455 root = root_mem_cgroup;
22f7496f
YS
7456
7457 /*
7458 * Effective values of the reclaim targets are ignored so they
7459 * can be stale. Have a look at mem_cgroup_protection for more
7460 * details.
7461 * TODO: calculation should be more robust so that we do not need
7462 * that special casing.
7463 */
34c81057 7464 if (memcg == root)
45c7f7e1 7465 return;
241994ed 7466
23067153 7467 usage = page_counter_read(&memcg->memory);
bf8d5d52 7468 if (!usage)
45c7f7e1 7469 return;
bf8d5d52 7470
bf8d5d52 7471 parent = parent_mem_cgroup(memcg);
df2a4196 7472
bc50bcc6 7473 if (parent == root) {
c3d53200 7474 memcg->memory.emin = READ_ONCE(memcg->memory.min);
03960e33 7475 memcg->memory.elow = READ_ONCE(memcg->memory.low);
45c7f7e1 7476 return;
bf8d5d52
RG
7477 }
7478
8a931f80
JW
7479 parent_usage = page_counter_read(&parent->memory);
7480
b3a7822e 7481 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
c3d53200
CD
7482 READ_ONCE(memcg->memory.min),
7483 READ_ONCE(parent->memory.emin),
b3a7822e 7484 atomic_long_read(&parent->memory.children_min_usage)));
23067153 7485
b3a7822e 7486 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
03960e33
CD
7487 READ_ONCE(memcg->memory.low),
7488 READ_ONCE(parent->memory.elow),
b3a7822e 7489 atomic_long_read(&parent->memory.children_low_usage)));
241994ed
JW
7490}
7491
8f425e4e
MWO
7492static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7493 gfp_t gfp)
0add0c77 7494{
0add0c77
SB
7495 int ret;
7496
4b569387 7497 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
0add0c77
SB
7498 if (ret)
7499 goto out;
7500
4b569387 7501 mem_cgroup_commit_charge(folio, memcg);
0add0c77
SB
7502out:
7503 return ret;
7504}
7505
8f425e4e 7506int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
00501b53 7507{
0add0c77
SB
7508 struct mem_cgroup *memcg;
7509 int ret;
00501b53 7510
0add0c77 7511 memcg = get_mem_cgroup_from_mm(mm);
8f425e4e 7512 ret = charge_memcg(folio, memcg, gfp);
0add0c77 7513 css_put(&memcg->css);
2d1c4980 7514
0add0c77
SB
7515 return ret;
7516}
e993d905 7517
8cba9576
NP
7518/**
7519 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7520 * @memcg: memcg to charge.
7521 * @gfp: reclaim mode.
7522 * @nr_pages: number of pages to charge.
7523 *
7524 * This function is called when allocating a huge page folio to determine if
7525 * the memcg has the capacity for it. It does not commit the charge yet,
7526 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7527 *
7528 * Once we have obtained the hugetlb folio, we can call
7529 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7530 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7531 * of try_charge().
7532 *
7533 * Returns 0 on success. Otherwise, an error code is returned.
7534 */
7535int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7536 long nr_pages)
7537{
7538 /*
7539 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7540 * but do not attempt to commit charge later (or cancel on error) either.
7541 */
7542 if (mem_cgroup_disabled() || !memcg ||
7543 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7544 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7545 return -EOPNOTSUPP;
7546
7547 if (try_charge(memcg, gfp, nr_pages))
7548 return -ENOMEM;
7549
7550 return 0;
7551}
7552
0add0c77 7553/**
65995918
MWO
7554 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7555 * @folio: folio to charge.
0add0c77
SB
7556 * @mm: mm context of the victim
7557 * @gfp: reclaim mode
65995918 7558 * @entry: swap entry for which the folio is allocated
0add0c77 7559 *
65995918
MWO
7560 * This function charges a folio allocated for swapin. Please call this before
7561 * adding the folio to the swapcache.
0add0c77
SB
7562 *
7563 * Returns 0 on success. Otherwise, an error code is returned.
7564 */
65995918 7565int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
0add0c77
SB
7566 gfp_t gfp, swp_entry_t entry)
7567{
7568 struct mem_cgroup *memcg;
7569 unsigned short id;
7570 int ret;
00501b53 7571
0add0c77
SB
7572 if (mem_cgroup_disabled())
7573 return 0;
00501b53 7574
0add0c77
SB
7575 id = lookup_swap_cgroup_id(entry);
7576 rcu_read_lock();
7577 memcg = mem_cgroup_from_id(id);
7578 if (!memcg || !css_tryget_online(&memcg->css))
7579 memcg = get_mem_cgroup_from_mm(mm);
7580 rcu_read_unlock();
00501b53 7581
8f425e4e 7582 ret = charge_memcg(folio, memcg, gfp);
6abb5a86 7583
0add0c77
SB
7584 css_put(&memcg->css);
7585 return ret;
7586}
00501b53 7587
0add0c77
SB
7588/*
7589 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7590 * @entry: swap entry for which the page is charged
7591 *
7592 * Call this function after successfully adding the charged page to swapcache.
7593 *
7594 * Note: This function assumes the page for which swap slot is being uncharged
7595 * is order 0 page.
7596 */
7597void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7598{
cae3af62
MS
7599 /*
7600 * Cgroup1's unified memory+swap counter has been charged with the
7601 * new swapcache page, finish the transfer by uncharging the swap
7602 * slot. The swap slot would also get uncharged when it dies, but
7603 * it can stick around indefinitely and we'd count the page twice
7604 * the entire time.
7605 *
7606 * Cgroup2 has separate resource counters for memory and swap,
7607 * so this is a non-issue here. Memory and swap charge lifetimes
7608 * correspond 1:1 to page and swap slot lifetimes: we charge the
7609 * page to memory here, and uncharge swap when the slot is freed.
7610 */
0add0c77 7611 if (!mem_cgroup_disabled() && do_memsw_account()) {
00501b53
JW
7612 /*
7613 * The swap entry might not get freed for a long time,
7614 * let's not wait for it. The page already received a
7615 * memory+swap charge, drop the swap entry duplicate.
7616 */
0add0c77 7617 mem_cgroup_uncharge_swap(entry, 1);
00501b53 7618 }
3fea5a49
JW
7619}
7620
a9d5adee
JG
7621struct uncharge_gather {
7622 struct mem_cgroup *memcg;
b4e0b68f 7623 unsigned long nr_memory;
a9d5adee 7624 unsigned long pgpgout;
a9d5adee 7625 unsigned long nr_kmem;
8e88bd2d 7626 int nid;
a9d5adee
JG
7627};
7628
7629static inline void uncharge_gather_clear(struct uncharge_gather *ug)
747db954 7630{
a9d5adee
JG
7631 memset(ug, 0, sizeof(*ug));
7632}
7633
7634static void uncharge_batch(const struct uncharge_gather *ug)
7635{
747db954
JW
7636 unsigned long flags;
7637
b4e0b68f
MS
7638 if (ug->nr_memory) {
7639 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7941d214 7640 if (do_memsw_account())
b4e0b68f 7641 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
a8c49af3
YA
7642 if (ug->nr_kmem)
7643 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
a9d5adee 7644 memcg_oom_recover(ug->memcg);
ce00a967 7645 }
747db954
JW
7646
7647 local_irq_save(flags);
c9019e9b 7648 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
b4e0b68f 7649 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
8e88bd2d 7650 memcg_check_events(ug->memcg, ug->nid);
747db954 7651 local_irq_restore(flags);
f1796544 7652
c4ed6ebf 7653 /* drop reference from uncharge_folio */
f1796544 7654 css_put(&ug->memcg->css);
a9d5adee
JG
7655}
7656
c4ed6ebf 7657static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
a9d5adee 7658{
c4ed6ebf 7659 long nr_pages;
b4e0b68f
MS
7660 struct mem_cgroup *memcg;
7661 struct obj_cgroup *objcg;
9f762dbe 7662
c4ed6ebf 7663 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
b7b098cf
MWO
7664 VM_BUG_ON_FOLIO(folio_order(folio) > 1 &&
7665 !folio_test_hugetlb(folio) &&
7666 !list_empty(&folio->_deferred_list), folio);
a9d5adee 7667
a9d5adee
JG
7668 /*
7669 * Nobody should be changing or seriously looking at
c4ed6ebf
MWO
7670 * folio memcg or objcg at this point, we have fully
7671 * exclusive access to the folio.
a9d5adee 7672 */
fead2b86 7673 if (folio_memcg_kmem(folio)) {
1b7e4464 7674 objcg = __folio_objcg(folio);
b4e0b68f
MS
7675 /*
7676 * This get matches the put at the end of the function and
7677 * kmem pages do not hold memcg references anymore.
7678 */
7679 memcg = get_mem_cgroup_from_objcg(objcg);
7680 } else {
1b7e4464 7681 memcg = __folio_memcg(folio);
b4e0b68f 7682 }
a9d5adee 7683
b4e0b68f
MS
7684 if (!memcg)
7685 return;
7686
7687 if (ug->memcg != memcg) {
a9d5adee
JG
7688 if (ug->memcg) {
7689 uncharge_batch(ug);
7690 uncharge_gather_clear(ug);
7691 }
b4e0b68f 7692 ug->memcg = memcg;
c4ed6ebf 7693 ug->nid = folio_nid(folio);
f1796544
MH
7694
7695 /* pairs with css_put in uncharge_batch */
b4e0b68f 7696 css_get(&memcg->css);
a9d5adee
JG
7697 }
7698
c4ed6ebf 7699 nr_pages = folio_nr_pages(folio);
a9d5adee 7700
fead2b86 7701 if (folio_memcg_kmem(folio)) {
b4e0b68f 7702 ug->nr_memory += nr_pages;
9f762dbe 7703 ug->nr_kmem += nr_pages;
b4e0b68f 7704
c4ed6ebf 7705 folio->memcg_data = 0;
b4e0b68f
MS
7706 obj_cgroup_put(objcg);
7707 } else {
7708 /* LRU pages aren't accounted at the root level */
7709 if (!mem_cgroup_is_root(memcg))
7710 ug->nr_memory += nr_pages;
18b2db3b 7711 ug->pgpgout++;
a9d5adee 7712
c4ed6ebf 7713 folio->memcg_data = 0;
b4e0b68f
MS
7714 }
7715
7716 css_put(&memcg->css);
747db954
JW
7717}
7718
bbc6b703 7719void __mem_cgroup_uncharge(struct folio *folio)
0a31bc97 7720{
a9d5adee
JG
7721 struct uncharge_gather ug;
7722
bbc6b703
MWO
7723 /* Don't touch folio->lru of any random page, pre-check: */
7724 if (!folio_memcg(folio))
0a31bc97
JW
7725 return;
7726
a9d5adee 7727 uncharge_gather_clear(&ug);
bbc6b703 7728 uncharge_folio(folio, &ug);
a9d5adee 7729 uncharge_batch(&ug);
747db954 7730}
0a31bc97 7731
4882c809
MWO
7732void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
7733{
7734 struct uncharge_gather ug;
7735 unsigned int i;
7736
7737 uncharge_gather_clear(&ug);
7738 for (i = 0; i < folios->nr; i++)
7739 uncharge_folio(folios->folios[i], &ug);
7740 if (ug.memcg)
7741 uncharge_batch(&ug);
0a31bc97
JW
7742}
7743
7744/**
85ce2c51 7745 * mem_cgroup_replace_folio - Charge a folio's replacement.
d21bba2b
MWO
7746 * @old: Currently circulating folio.
7747 * @new: Replacement folio.
0a31bc97 7748 *
d21bba2b 7749 * Charge @new as a replacement folio for @old. @old will
85ce2c51
NP
7750 * be uncharged upon free. This is only used by the page cache
7751 * (in replace_page_cache_folio()).
0a31bc97 7752 *
d21bba2b 7753 * Both folios must be locked, @new->mapping must be set up.
0a31bc97 7754 */
85ce2c51 7755void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
0a31bc97 7756{
29833315 7757 struct mem_cgroup *memcg;
d21bba2b 7758 long nr_pages = folio_nr_pages(new);
d93c4130 7759 unsigned long flags;
0a31bc97 7760
d21bba2b
MWO
7761 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7762 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7763 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7764 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
0a31bc97
JW
7765
7766 if (mem_cgroup_disabled())
7767 return;
7768
d21bba2b
MWO
7769 /* Page cache replacement: new folio already charged? */
7770 if (folio_memcg(new))
0a31bc97
JW
7771 return;
7772
d21bba2b
MWO
7773 memcg = folio_memcg(old);
7774 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
29833315 7775 if (!memcg)
0a31bc97
JW
7776 return;
7777
44b7a8d3 7778 /* Force-charge the new page. The old one will be freed soon */
8dc87c7d
MS
7779 if (!mem_cgroup_is_root(memcg)) {
7780 page_counter_charge(&memcg->memory, nr_pages);
7781 if (do_memsw_account())
7782 page_counter_charge(&memcg->memsw, nr_pages);
7783 }
0a31bc97 7784
1a3e1f40 7785 css_get(&memcg->css);
d21bba2b 7786 commit_charge(new, memcg);
44b7a8d3 7787
d93c4130 7788 local_irq_save(flags);
6e0110c2 7789 mem_cgroup_charge_statistics(memcg, nr_pages);
d21bba2b 7790 memcg_check_events(memcg, folio_nid(new));
d93c4130 7791 local_irq_restore(flags);
0a31bc97
JW
7792}
7793
85ce2c51
NP
7794/**
7795 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7796 * @old: Currently circulating folio.
7797 * @new: Replacement folio.
7798 *
7799 * Transfer the memcg data from the old folio to the new folio for migration.
7800 * The old folio's data info will be cleared. Note that the memory counters
7801 * will remain unchanged throughout the process.
7802 *
7803 * Both folios must be locked, @new->mapping must be set up.
7804 */
7805void mem_cgroup_migrate(struct folio *old, struct folio *new)
7806{
7807 struct mem_cgroup *memcg;
7808
7809 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7810 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7811 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7812 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7813
7814 if (mem_cgroup_disabled())
7815 return;
7816
7817 memcg = folio_memcg(old);
8cba9576
NP
7818 /*
7819 * Note that it is normal to see !memcg for a hugetlb folio.
7820 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7821 * was not selected.
7822 */
7823 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
85ce2c51
NP
7824 if (!memcg)
7825 return;
7826
7827 /* Transfer the charge and the css ref */
7828 commit_charge(new, memcg);
9bcef597
BW
7829 /*
7830 * If the old folio is a large folio and is in the split queue, it needs
7831 * to be removed from the split queue now, in case getting an incorrect
7832 * split queue in destroy_large_folio() after the memcg of the old folio
7833 * is cleared.
7834 *
7835 * In addition, the old folio is about to be freed after migration, so
7836 * removing from the split queue a bit earlier seems reasonable.
7837 */
7838 if (folio_test_large(old) && folio_test_large_rmappable(old))
7839 folio_undo_large_rmappable(old);
85ce2c51
NP
7840 old->memcg_data = 0;
7841}
7842
ef12947c 7843DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
11092087
JW
7844EXPORT_SYMBOL(memcg_sockets_enabled_key);
7845
2d758073 7846void mem_cgroup_sk_alloc(struct sock *sk)
11092087
JW
7847{
7848 struct mem_cgroup *memcg;
7849
2d758073
JW
7850 if (!mem_cgroup_sockets_enabled)
7851 return;
7852
e876ecc6 7853 /* Do not associate the sock with unrelated interrupted task's memcg. */
086f694a 7854 if (!in_task())
e876ecc6
SB
7855 return;
7856
11092087
JW
7857 rcu_read_lock();
7858 memcg = mem_cgroup_from_task(current);
7848ed62 7859 if (mem_cgroup_is_root(memcg))
f7e1cb6e 7860 goto out;
0db15298 7861 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
f7e1cb6e 7862 goto out;
8965aa28 7863 if (css_tryget(&memcg->css))
11092087 7864 sk->sk_memcg = memcg;
f7e1cb6e 7865out:
11092087
JW
7866 rcu_read_unlock();
7867}
11092087 7868
2d758073 7869void mem_cgroup_sk_free(struct sock *sk)
11092087 7870{
2d758073
JW
7871 if (sk->sk_memcg)
7872 css_put(&sk->sk_memcg->css);
11092087
JW
7873}
7874
7875/**
7876 * mem_cgroup_charge_skmem - charge socket memory
7877 * @memcg: memcg to charge
7878 * @nr_pages: number of pages to charge
4b1327be 7879 * @gfp_mask: reclaim mode
11092087
JW
7880 *
7881 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4b1327be 7882 * @memcg's configured limit, %false if it doesn't.
11092087 7883 */
4b1327be
WW
7884bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7885 gfp_t gfp_mask)
11092087 7886{
f7e1cb6e 7887 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7888 struct page_counter *fail;
f7e1cb6e 7889
0db15298
JW
7890 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7891 memcg->tcpmem_pressure = 0;
f7e1cb6e
JW
7892 return true;
7893 }
0db15298 7894 memcg->tcpmem_pressure = 1;
4b1327be
WW
7895 if (gfp_mask & __GFP_NOFAIL) {
7896 page_counter_charge(&memcg->tcpmem, nr_pages);
7897 return true;
7898 }
f7e1cb6e 7899 return false;
11092087 7900 }
d886f4e4 7901
4b1327be
WW
7902 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7903 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
f7e1cb6e 7904 return true;
4b1327be 7905 }
f7e1cb6e 7906
11092087
JW
7907 return false;
7908}
7909
7910/**
7911 * mem_cgroup_uncharge_skmem - uncharge socket memory
b7701a5f
MR
7912 * @memcg: memcg to uncharge
7913 * @nr_pages: number of pages to uncharge
11092087
JW
7914 */
7915void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7916{
f7e1cb6e 7917 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7918 page_counter_uncharge(&memcg->tcpmem, nr_pages);
f7e1cb6e
JW
7919 return;
7920 }
d886f4e4 7921
c9019e9b 7922 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
b2807f07 7923
475d0487 7924 refill_stock(memcg, nr_pages);
11092087
JW
7925}
7926
f7e1cb6e
JW
7927static int __init cgroup_memory(char *s)
7928{
7929 char *token;
7930
7931 while ((token = strsep(&s, ",")) != NULL) {
7932 if (!*token)
7933 continue;
7934 if (!strcmp(token, "nosocket"))
7935 cgroup_memory_nosocket = true;
04823c83
VD
7936 if (!strcmp(token, "nokmem"))
7937 cgroup_memory_nokmem = true;
b6c1a8af
YS
7938 if (!strcmp(token, "nobpf"))
7939 cgroup_memory_nobpf = true;
f7e1cb6e 7940 }
460a79e1 7941 return 1;
f7e1cb6e
JW
7942}
7943__setup("cgroup.memory=", cgroup_memory);
11092087 7944
2d11085e 7945/*
1081312f
MH
7946 * subsys_initcall() for memory controller.
7947 *
308167fc
SAS
7948 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7949 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7950 * basically everything that doesn't depend on a specific mem_cgroup structure
7951 * should be initialized from here.
2d11085e
MH
7952 */
7953static int __init mem_cgroup_init(void)
7954{
95a045f6
JW
7955 int cpu, node;
7956
f3344adf
MS
7957 /*
7958 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7959 * used for per-memcg-per-cpu caching of per-node statistics. In order
7960 * to work fine, we should make sure that the overfill threshold can't
7961 * exceed S32_MAX / PAGE_SIZE.
7962 */
7963 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7964
308167fc
SAS
7965 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7966 memcg_hotplug_cpu_dead);
95a045f6
JW
7967
7968 for_each_possible_cpu(cpu)
7969 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7970 drain_local_stock);
7971
7972 for_each_node(node) {
7973 struct mem_cgroup_tree_per_node *rtpn;
95a045f6 7974
91f0dcce 7975 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
95a045f6 7976
ef8f2327 7977 rtpn->rb_root = RB_ROOT;
fa90b2fd 7978 rtpn->rb_rightmost = NULL;
ef8f2327 7979 spin_lock_init(&rtpn->lock);
95a045f6
JW
7980 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7981 }
7982
2d11085e
MH
7983 return 0;
7984}
7985subsys_initcall(mem_cgroup_init);
21afa38e 7986
e55b9f96 7987#ifdef CONFIG_SWAP
358c07fc
AB
7988static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7989{
1c2d479a 7990 while (!refcount_inc_not_zero(&memcg->id.ref)) {
358c07fc
AB
7991 /*
7992 * The root cgroup cannot be destroyed, so it's refcount must
7993 * always be >= 1.
7994 */
7848ed62 7995 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
358c07fc
AB
7996 VM_BUG_ON(1);
7997 break;
7998 }
7999 memcg = parent_mem_cgroup(memcg);
8000 if (!memcg)
8001 memcg = root_mem_cgroup;
8002 }
8003 return memcg;
8004}
8005
21afa38e
JW
8006/**
8007 * mem_cgroup_swapout - transfer a memsw charge to swap
3ecb0087 8008 * @folio: folio whose memsw charge to transfer
21afa38e
JW
8009 * @entry: swap entry to move the charge to
8010 *
3ecb0087 8011 * Transfer the memsw charge of @folio to @entry.
21afa38e 8012 */
3ecb0087 8013void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
21afa38e 8014{
1f47b61f 8015 struct mem_cgroup *memcg, *swap_memcg;
d6810d73 8016 unsigned int nr_entries;
21afa38e
JW
8017 unsigned short oldid;
8018
3ecb0087
MWO
8019 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
8020 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
21afa38e 8021
76358ab5
AS
8022 if (mem_cgroup_disabled())
8023 return;
8024
b94c4e94 8025 if (!do_memsw_account())
21afa38e
JW
8026 return;
8027
3ecb0087 8028 memcg = folio_memcg(folio);
21afa38e 8029
3ecb0087 8030 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
21afa38e
JW
8031 if (!memcg)
8032 return;
8033
1f47b61f
VD
8034 /*
8035 * In case the memcg owning these pages has been offlined and doesn't
8036 * have an ID allocated to it anymore, charge the closest online
8037 * ancestor for the swap instead and transfer the memory+swap charge.
8038 */
8039 swap_memcg = mem_cgroup_id_get_online(memcg);
3ecb0087 8040 nr_entries = folio_nr_pages(folio);
d6810d73
HY
8041 /* Get references for the tail pages, too */
8042 if (nr_entries > 1)
8043 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
8044 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
8045 nr_entries);
3ecb0087 8046 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 8047 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
21afa38e 8048
3ecb0087 8049 folio->memcg_data = 0;
21afa38e
JW
8050
8051 if (!mem_cgroup_is_root(memcg))
d6810d73 8052 page_counter_uncharge(&memcg->memory, nr_entries);
21afa38e 8053
b25806dc 8054 if (memcg != swap_memcg) {
1f47b61f 8055 if (!mem_cgroup_is_root(swap_memcg))
d6810d73
HY
8056 page_counter_charge(&swap_memcg->memsw, nr_entries);
8057 page_counter_uncharge(&memcg->memsw, nr_entries);
1f47b61f
VD
8058 }
8059
ce9ce665
SAS
8060 /*
8061 * Interrupts should be disabled here because the caller holds the
b93b0163 8062 * i_pages lock which is taken with interrupts-off. It is
ce9ce665 8063 * important here to have the interrupts disabled because it is the
b93b0163 8064 * only synchronisation we have for updating the per-CPU variables.
ce9ce665 8065 */
be3e67b5 8066 memcg_stats_lock();
6e0110c2 8067 mem_cgroup_charge_statistics(memcg, -nr_entries);
be3e67b5 8068 memcg_stats_unlock();
3ecb0087 8069 memcg_check_events(memcg, folio_nid(folio));
73f576c0 8070
1a3e1f40 8071 css_put(&memcg->css);
21afa38e
JW
8072}
8073
38d8b4e6 8074/**
e2e3fdc7
MWO
8075 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
8076 * @folio: folio being added to swap
37e84351
VD
8077 * @entry: swap entry to charge
8078 *
e2e3fdc7 8079 * Try to charge @folio's memcg for the swap space at @entry.
37e84351
VD
8080 *
8081 * Returns 0 on success, -ENOMEM on failure.
8082 */
e2e3fdc7 8083int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
37e84351 8084{
e2e3fdc7 8085 unsigned int nr_pages = folio_nr_pages(folio);
37e84351 8086 struct page_counter *counter;
38d8b4e6 8087 struct mem_cgroup *memcg;
37e84351
VD
8088 unsigned short oldid;
8089
b94c4e94 8090 if (do_memsw_account())
37e84351
VD
8091 return 0;
8092
e2e3fdc7 8093 memcg = folio_memcg(folio);
37e84351 8094
e2e3fdc7 8095 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
37e84351
VD
8096 if (!memcg)
8097 return 0;
8098
f3a53a3a
TH
8099 if (!entry.val) {
8100 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
bb98f2c5 8101 return 0;
f3a53a3a 8102 }
bb98f2c5 8103
1f47b61f
VD
8104 memcg = mem_cgroup_id_get_online(memcg);
8105
b25806dc 8106 if (!mem_cgroup_is_root(memcg) &&
38d8b4e6 8107 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
f3a53a3a
TH
8108 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
8109 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
1f47b61f 8110 mem_cgroup_id_put(memcg);
37e84351 8111 return -ENOMEM;
1f47b61f 8112 }
37e84351 8113
38d8b4e6
HY
8114 /* Get references for the tail pages, too */
8115 if (nr_pages > 1)
8116 mem_cgroup_id_get_many(memcg, nr_pages - 1);
8117 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
e2e3fdc7 8118 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 8119 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
37e84351 8120
37e84351
VD
8121 return 0;
8122}
8123
21afa38e 8124/**
01c4b28c 8125 * __mem_cgroup_uncharge_swap - uncharge swap space
21afa38e 8126 * @entry: swap entry to uncharge
38d8b4e6 8127 * @nr_pages: the amount of swap space to uncharge
21afa38e 8128 */
01c4b28c 8129void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
21afa38e
JW
8130{
8131 struct mem_cgroup *memcg;
8132 unsigned short id;
8133
38d8b4e6 8134 id = swap_cgroup_record(entry, 0, nr_pages);
21afa38e 8135 rcu_read_lock();
adbe427b 8136 memcg = mem_cgroup_from_id(id);
21afa38e 8137 if (memcg) {
b25806dc 8138 if (!mem_cgroup_is_root(memcg)) {
b94c4e94 8139 if (do_memsw_account())
38d8b4e6 8140 page_counter_uncharge(&memcg->memsw, nr_pages);
b94c4e94
JW
8141 else
8142 page_counter_uncharge(&memcg->swap, nr_pages);
37e84351 8143 }
c9019e9b 8144 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
38d8b4e6 8145 mem_cgroup_id_put_many(memcg, nr_pages);
21afa38e
JW
8146 }
8147 rcu_read_unlock();
8148}
8149
d8b38438
VD
8150long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
8151{
8152 long nr_swap_pages = get_nr_swap_pages();
8153
b25806dc 8154 if (mem_cgroup_disabled() || do_memsw_account())
d8b38438 8155 return nr_swap_pages;
7848ed62 8156 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
d8b38438 8157 nr_swap_pages = min_t(long, nr_swap_pages,
bbec2e15 8158 READ_ONCE(memcg->swap.max) -
d8b38438
VD
8159 page_counter_read(&memcg->swap));
8160 return nr_swap_pages;
8161}
8162
9202d527 8163bool mem_cgroup_swap_full(struct folio *folio)
5ccc5aba
VD
8164{
8165 struct mem_cgroup *memcg;
8166
9202d527 8167 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5ccc5aba
VD
8168
8169 if (vm_swap_full())
8170 return true;
b25806dc 8171 if (do_memsw_account())
5ccc5aba
VD
8172 return false;
8173
9202d527 8174 memcg = folio_memcg(folio);
5ccc5aba
VD
8175 if (!memcg)
8176 return false;
8177
7848ed62 8178 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
4b82ab4f
JK
8179 unsigned long usage = page_counter_read(&memcg->swap);
8180
8181 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
8182 usage * 2 >= READ_ONCE(memcg->swap.max))
5ccc5aba 8183 return true;
4b82ab4f 8184 }
5ccc5aba
VD
8185
8186 return false;
8187}
8188
eccb52e7 8189static int __init setup_swap_account(char *s)
21afa38e 8190{
118642d7
JW
8191 bool res;
8192
8193 if (!kstrtobool(s, &res) && !res)
8194 pr_warn_once("The swapaccount=0 commandline option is deprecated "
8195 "in favor of configuring swap control via cgroupfs. "
8196 "Please report your usecase to linux-mm@kvack.org if you "
8197 "depend on this functionality.\n");
21afa38e
JW
8198 return 1;
8199}
eccb52e7 8200__setup("swapaccount=", setup_swap_account);
21afa38e 8201
37e84351
VD
8202static u64 swap_current_read(struct cgroup_subsys_state *css,
8203 struct cftype *cft)
8204{
8205 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8206
8207 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
8208}
8209
e0e0b412
LD
8210static u64 swap_peak_read(struct cgroup_subsys_state *css,
8211 struct cftype *cft)
8212{
8213 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8214
8215 return (u64)memcg->swap.watermark * PAGE_SIZE;
8216}
8217
4b82ab4f
JK
8218static int swap_high_show(struct seq_file *m, void *v)
8219{
8220 return seq_puts_memcg_tunable(m,
8221 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8222}
8223
8224static ssize_t swap_high_write(struct kernfs_open_file *of,
8225 char *buf, size_t nbytes, loff_t off)
8226{
8227 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8228 unsigned long high;
8229 int err;
8230
8231 buf = strstrip(buf);
8232 err = page_counter_memparse(buf, "max", &high);
8233 if (err)
8234 return err;
8235
8236 page_counter_set_high(&memcg->swap, high);
8237
8238 return nbytes;
8239}
8240
37e84351
VD
8241static int swap_max_show(struct seq_file *m, void *v)
8242{
677dc973
CD
8243 return seq_puts_memcg_tunable(m,
8244 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
37e84351
VD
8245}
8246
8247static ssize_t swap_max_write(struct kernfs_open_file *of,
8248 char *buf, size_t nbytes, loff_t off)
8249{
8250 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8251 unsigned long max;
8252 int err;
8253
8254 buf = strstrip(buf);
8255 err = page_counter_memparse(buf, "max", &max);
8256 if (err)
8257 return err;
8258
be09102b 8259 xchg(&memcg->swap.max, max);
37e84351
VD
8260
8261 return nbytes;
8262}
8263
f3a53a3a
TH
8264static int swap_events_show(struct seq_file *m, void *v)
8265{
aa9694bb 8266 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
f3a53a3a 8267
4b82ab4f
JK
8268 seq_printf(m, "high %lu\n",
8269 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
f3a53a3a
TH
8270 seq_printf(m, "max %lu\n",
8271 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8272 seq_printf(m, "fail %lu\n",
8273 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8274
8275 return 0;
8276}
8277
37e84351
VD
8278static struct cftype swap_files[] = {
8279 {
8280 .name = "swap.current",
8281 .flags = CFTYPE_NOT_ON_ROOT,
8282 .read_u64 = swap_current_read,
8283 },
4b82ab4f
JK
8284 {
8285 .name = "swap.high",
8286 .flags = CFTYPE_NOT_ON_ROOT,
8287 .seq_show = swap_high_show,
8288 .write = swap_high_write,
8289 },
37e84351
VD
8290 {
8291 .name = "swap.max",
8292 .flags = CFTYPE_NOT_ON_ROOT,
8293 .seq_show = swap_max_show,
8294 .write = swap_max_write,
8295 },
e0e0b412
LD
8296 {
8297 .name = "swap.peak",
8298 .flags = CFTYPE_NOT_ON_ROOT,
8299 .read_u64 = swap_peak_read,
8300 },
f3a53a3a
TH
8301 {
8302 .name = "swap.events",
8303 .flags = CFTYPE_NOT_ON_ROOT,
8304 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
8305 .seq_show = swap_events_show,
8306 },
37e84351
VD
8307 { } /* terminate */
8308};
8309
eccb52e7 8310static struct cftype memsw_files[] = {
21afa38e
JW
8311 {
8312 .name = "memsw.usage_in_bytes",
8313 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8314 .read_u64 = mem_cgroup_read_u64,
8315 },
8316 {
8317 .name = "memsw.max_usage_in_bytes",
8318 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8319 .write = mem_cgroup_reset,
8320 .read_u64 = mem_cgroup_read_u64,
8321 },
8322 {
8323 .name = "memsw.limit_in_bytes",
8324 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8325 .write = mem_cgroup_write,
8326 .read_u64 = mem_cgroup_read_u64,
8327 },
8328 {
8329 .name = "memsw.failcnt",
8330 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8331 .write = mem_cgroup_reset,
8332 .read_u64 = mem_cgroup_read_u64,
8333 },
8334 { }, /* terminate */
8335};
8336
f4840ccf
JW
8337#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8338/**
8339 * obj_cgroup_may_zswap - check if this cgroup can zswap
8340 * @objcg: the object cgroup
8341 *
8342 * Check if the hierarchical zswap limit has been reached.
8343 *
8344 * This doesn't check for specific headroom, and it is not atomic
8345 * either. But with zswap, the size of the allocation is only known
be16dd76 8346 * once compression has occurred, and this optimistic pre-check avoids
f4840ccf
JW
8347 * spending cycles on compression when there is already no room left
8348 * or zswap is disabled altogether somewhere in the hierarchy.
8349 */
8350bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8351{
8352 struct mem_cgroup *memcg, *original_memcg;
8353 bool ret = true;
8354
8355 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8356 return true;
8357
8358 original_memcg = get_mem_cgroup_from_objcg(objcg);
7848ed62 8359 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
f4840ccf
JW
8360 memcg = parent_mem_cgroup(memcg)) {
8361 unsigned long max = READ_ONCE(memcg->zswap_max);
8362 unsigned long pages;
8363
8364 if (max == PAGE_COUNTER_MAX)
8365 continue;
8366 if (max == 0) {
8367 ret = false;
8368 break;
8369 }
8370
7d7ef0a4
YA
8371 /*
8372 * mem_cgroup_flush_stats() ignores small changes. Use
8373 * do_flush_stats() directly to get accurate stats for charging.
8374 */
8375 do_flush_stats(memcg);
f4840ccf
JW
8376 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8377 if (pages < max)
8378 continue;
8379 ret = false;
8380 break;
8381 }
8382 mem_cgroup_put(original_memcg);
8383 return ret;
8384}
8385
8386/**
8387 * obj_cgroup_charge_zswap - charge compression backend memory
8388 * @objcg: the object cgroup
8389 * @size: size of compressed object
8390 *
3a1060c2 8391 * This forces the charge after obj_cgroup_may_zswap() allowed
f4840ccf
JW
8392 * compression and storage in zwap for this cgroup to go ahead.
8393 */
8394void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8395{
8396 struct mem_cgroup *memcg;
8397
8398 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8399 return;
8400
8401 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8402
8403 /* PF_MEMALLOC context, charging must succeed */
8404 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8405 VM_WARN_ON_ONCE(1);
8406
8407 rcu_read_lock();
8408 memcg = obj_cgroup_memcg(objcg);
8409 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8410 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8411 rcu_read_unlock();
8412}
8413
8414/**
8415 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8416 * @objcg: the object cgroup
8417 * @size: size of compressed object
8418 *
8419 * Uncharges zswap memory on page in.
8420 */
8421void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8422{
8423 struct mem_cgroup *memcg;
8424
8425 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8426 return;
8427
8428 obj_cgroup_uncharge(objcg, size);
8429
8430 rcu_read_lock();
8431 memcg = obj_cgroup_memcg(objcg);
8432 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8433 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8434 rcu_read_unlock();
8435}
8436
501a06fe
NP
8437bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8438{
8439 /* if zswap is disabled, do not block pages going to the swapping device */
8440 return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8441}
8442
f4840ccf
JW
8443static u64 zswap_current_read(struct cgroup_subsys_state *css,
8444 struct cftype *cft)
8445{
7d7ef0a4
YA
8446 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8447
8448 mem_cgroup_flush_stats(memcg);
8449 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
f4840ccf
JW
8450}
8451
8452static int zswap_max_show(struct seq_file *m, void *v)
8453{
8454 return seq_puts_memcg_tunable(m,
8455 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8456}
8457
8458static ssize_t zswap_max_write(struct kernfs_open_file *of,
8459 char *buf, size_t nbytes, loff_t off)
8460{
8461 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8462 unsigned long max;
8463 int err;
8464
8465 buf = strstrip(buf);
8466 err = page_counter_memparse(buf, "max", &max);
8467 if (err)
8468 return err;
8469
8470 xchg(&memcg->zswap_max, max);
8471
8472 return nbytes;
8473}
8474
501a06fe
NP
8475static int zswap_writeback_show(struct seq_file *m, void *v)
8476{
8477 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8478
8479 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8480 return 0;
8481}
8482
8483static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8484 char *buf, size_t nbytes, loff_t off)
8485{
8486 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8487 int zswap_writeback;
8488 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8489
8490 if (parse_ret)
8491 return parse_ret;
8492
8493 if (zswap_writeback != 0 && zswap_writeback != 1)
8494 return -EINVAL;
8495
8496 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8497 return nbytes;
8498}
8499
f4840ccf
JW
8500static struct cftype zswap_files[] = {
8501 {
8502 .name = "zswap.current",
8503 .flags = CFTYPE_NOT_ON_ROOT,
8504 .read_u64 = zswap_current_read,
8505 },
8506 {
8507 .name = "zswap.max",
8508 .flags = CFTYPE_NOT_ON_ROOT,
8509 .seq_show = zswap_max_show,
8510 .write = zswap_max_write,
8511 },
501a06fe
NP
8512 {
8513 .name = "zswap.writeback",
8514 .seq_show = zswap_writeback_show,
8515 .write = zswap_writeback_write,
8516 },
f4840ccf
JW
8517 { } /* terminate */
8518};
8519#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8520
21afa38e
JW
8521static int __init mem_cgroup_swap_init(void)
8522{
2d1c4980 8523 if (mem_cgroup_disabled())
eccb52e7
JW
8524 return 0;
8525
8526 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8527 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
f4840ccf
JW
8528#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8529 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8530#endif
21afa38e
JW
8531 return 0;
8532}
b25806dc 8533subsys_initcall(mem_cgroup_swap_init);
21afa38e 8534
e55b9f96 8535#endif /* CONFIG_SWAP */