]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/memcontrol.c
memcg: rename mem_cgroup_flush_stats_"delayed" to "ratelimited"
[thirdparty/kernel/stable.git] / mm / memcontrol.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
8cdea7c0
BS
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
2e72b634
KS
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
7ae1e1d0
GC
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
1575e68b
JW
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
6168d0da
AS
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
8cdea7c0
BS
26 */
27
3e32cb2e 28#include <linux/page_counter.h>
8cdea7c0
BS
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
a520110e 31#include <linux/pagewalk.h>
6e84f315 32#include <linux/sched/mm.h>
3a4f8a0b 33#include <linux/shmem_fs.h>
4ffef5fe 34#include <linux/hugetlb.h>
d13d1443 35#include <linux/pagemap.h>
1ff9e6e1 36#include <linux/vm_event_item.h>
d52aa412 37#include <linux/smp.h>
8a9f3ccd 38#include <linux/page-flags.h>
66e1707b 39#include <linux/backing-dev.h>
8a9f3ccd
BS
40#include <linux/bit_spinlock.h>
41#include <linux/rcupdate.h>
e222432b 42#include <linux/limits.h>
b9e15baf 43#include <linux/export.h>
8c7c6e34 44#include <linux/mutex.h>
bb4cc1a8 45#include <linux/rbtree.h>
b6ac57d5 46#include <linux/slab.h>
66e1707b 47#include <linux/swap.h>
02491447 48#include <linux/swapops.h>
66e1707b 49#include <linux/spinlock.h>
2e72b634 50#include <linux/eventfd.h>
79bd9814 51#include <linux/poll.h>
2e72b634 52#include <linux/sort.h>
66e1707b 53#include <linux/fs.h>
d2ceb9b7 54#include <linux/seq_file.h>
70ddf637 55#include <linux/vmpressure.h>
dc90f084 56#include <linux/memremap.h>
b69408e8 57#include <linux/mm_inline.h>
5d1ea48b 58#include <linux/swap_cgroup.h>
cdec2e42 59#include <linux/cpu.h>
158e0a2d 60#include <linux/oom.h>
0056f4e6 61#include <linux/lockdep.h>
79bd9814 62#include <linux/file.h>
03248add 63#include <linux/resume_user_mode.h>
0e4b01df 64#include <linux/psi.h>
c8713d0b 65#include <linux/seq_buf.h>
6a792697 66#include <linux/sched/isolation.h>
08e552c6 67#include "internal.h"
d1a4c0b3 68#include <net/sock.h>
4bd2c1ee 69#include <net/ip.h>
f35c3a8e 70#include "slab.h"
014bb1de 71#include "swap.h"
8cdea7c0 72
7c0f6ba6 73#include <linux/uaccess.h>
8697d331 74
cc8e970c
KM
75#include <trace/events/vmscan.h>
76
073219e9
TH
77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 79
7d828602
JW
80struct mem_cgroup *root_mem_cgroup __read_mostly;
81
37d5985c
RG
82/* Active memory cgroup to use from an interrupt context */
83DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
c74d40e8 84EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
37d5985c 85
f7e1cb6e 86/* Socket memory accounting disabled? */
0f0cace3 87static bool cgroup_memory_nosocket __ro_after_init;
f7e1cb6e 88
04823c83 89/* Kernel memory accounting disabled? */
17c17367 90static bool cgroup_memory_nokmem __ro_after_init;
04823c83 91
b6c1a8af
YS
92/* BPF memory accounting disabled? */
93static bool cgroup_memory_nobpf __ro_after_init;
94
97b27821
TH
95#ifdef CONFIG_CGROUP_WRITEBACK
96static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
97#endif
98
7941d214
JW
99/* Whether legacy memory+swap accounting is active */
100static bool do_memsw_account(void)
101{
b25806dc 102 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
7941d214
JW
103}
104
a0db00fc
KS
105#define THRESHOLDS_EVENTS_TARGET 128
106#define SOFTLIMIT_EVENTS_TARGET 1024
e9f8974f 107
bb4cc1a8
AM
108/*
109 * Cgroups above their limits are maintained in a RB-Tree, independent of
110 * their hierarchy representation
111 */
112
ef8f2327 113struct mem_cgroup_tree_per_node {
bb4cc1a8 114 struct rb_root rb_root;
fa90b2fd 115 struct rb_node *rb_rightmost;
bb4cc1a8
AM
116 spinlock_t lock;
117};
118
bb4cc1a8
AM
119struct mem_cgroup_tree {
120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
121};
122
123static struct mem_cgroup_tree soft_limit_tree __read_mostly;
124
9490ff27
KH
125/* for OOM */
126struct mem_cgroup_eventfd_list {
127 struct list_head list;
128 struct eventfd_ctx *eventfd;
129};
2e72b634 130
79bd9814
TH
131/*
132 * cgroup_event represents events which userspace want to receive.
133 */
3bc942f3 134struct mem_cgroup_event {
79bd9814 135 /*
59b6f873 136 * memcg which the event belongs to.
79bd9814 137 */
59b6f873 138 struct mem_cgroup *memcg;
79bd9814
TH
139 /*
140 * eventfd to signal userspace about the event.
141 */
142 struct eventfd_ctx *eventfd;
143 /*
144 * Each of these stored in a list by the cgroup.
145 */
146 struct list_head list;
fba94807
TH
147 /*
148 * register_event() callback will be used to add new userspace
149 * waiter for changes related to this event. Use eventfd_signal()
150 * on eventfd to send notification to userspace.
151 */
59b6f873 152 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 153 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
154 /*
155 * unregister_event() callback will be called when userspace closes
156 * the eventfd or on cgroup removing. This callback must be set,
157 * if you want provide notification functionality.
158 */
59b6f873 159 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 160 struct eventfd_ctx *eventfd);
79bd9814
TH
161 /*
162 * All fields below needed to unregister event when
163 * userspace closes eventfd.
164 */
165 poll_table pt;
166 wait_queue_head_t *wqh;
ac6424b9 167 wait_queue_entry_t wait;
79bd9814
TH
168 struct work_struct remove;
169};
170
c0ff4b85
R
171static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 173
7dc74be0
DN
174/* Stuffs for move charges at task migration. */
175/*
1dfab5ab 176 * Types of charges to be moved.
7dc74be0 177 */
1dfab5ab
JW
178#define MOVE_ANON 0x1U
179#define MOVE_FILE 0x2U
180#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
7dc74be0 181
4ffef5fe
DN
182/* "mc" and its members are protected by cgroup_mutex */
183static struct move_charge_struct {
b1dd693e 184 spinlock_t lock; /* for from, to */
264a0ae1 185 struct mm_struct *mm;
4ffef5fe
DN
186 struct mem_cgroup *from;
187 struct mem_cgroup *to;
1dfab5ab 188 unsigned long flags;
4ffef5fe 189 unsigned long precharge;
854ffa8d 190 unsigned long moved_charge;
483c30b5 191 unsigned long moved_swap;
8033b97c
DN
192 struct task_struct *moving_task; /* a task moving charges */
193 wait_queue_head_t waitq; /* a waitq for other context */
194} mc = {
2bd9bb20 195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
197};
4ffef5fe 198
4e416953
BS
199/*
200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
201 * limit reclaim to prevent infinite loops, if they ever occur.
202 */
a0db00fc 203#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 204#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 205
8c7c6e34 206/* for encoding cft->private value on file */
86ae53e1
GC
207enum res_type {
208 _MEM,
209 _MEMSWAP,
510fc4e1 210 _KMEM,
d55f90bf 211 _TCP,
86ae53e1
GC
212};
213
a0db00fc
KS
214#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
215#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34
KH
216#define MEMFILE_ATTR(val) ((val) & 0xffff)
217
b05706f1
KT
218/*
219 * Iteration constructs for visiting all cgroups (under a tree). If
220 * loops are exited prematurely (break), mem_cgroup_iter_break() must
221 * be used for reference counting.
222 */
223#define for_each_mem_cgroup_tree(iter, root) \
224 for (iter = mem_cgroup_iter(root, NULL, NULL); \
225 iter != NULL; \
226 iter = mem_cgroup_iter(root, iter, NULL))
227
228#define for_each_mem_cgroup(iter) \
229 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
230 iter != NULL; \
231 iter = mem_cgroup_iter(NULL, iter, NULL))
232
a4ebf1b6 233static inline bool task_is_dying(void)
7775face
TH
234{
235 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
236 (current->flags & PF_EXITING);
237}
238
70ddf637
AV
239/* Some nice accessors for the vmpressure. */
240struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
241{
242 if (!memcg)
243 memcg = root_mem_cgroup;
244 return &memcg->vmpressure;
245}
246
9647875b 247struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
70ddf637 248{
9647875b 249 return container_of(vmpr, struct mem_cgroup, vmpressure);
70ddf637
AV
250}
251
84c07d11 252#ifdef CONFIG_MEMCG_KMEM
0764db9b 253static DEFINE_SPINLOCK(objcg_lock);
bf4f0599 254
4d5c8aed
RG
255bool mem_cgroup_kmem_disabled(void)
256{
257 return cgroup_memory_nokmem;
258}
259
f1286fae
MS
260static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
261 unsigned int nr_pages);
c1a660de 262
bf4f0599
RG
263static void obj_cgroup_release(struct percpu_ref *ref)
264{
265 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
bf4f0599
RG
266 unsigned int nr_bytes;
267 unsigned int nr_pages;
268 unsigned long flags;
269
270 /*
271 * At this point all allocated objects are freed, and
272 * objcg->nr_charged_bytes can't have an arbitrary byte value.
273 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
274 *
275 * The following sequence can lead to it:
276 * 1) CPU0: objcg == stock->cached_objcg
277 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
278 * PAGE_SIZE bytes are charged
279 * 3) CPU1: a process from another memcg is allocating something,
280 * the stock if flushed,
281 * objcg->nr_charged_bytes = PAGE_SIZE - 92
282 * 5) CPU0: we do release this object,
283 * 92 bytes are added to stock->nr_bytes
284 * 6) CPU0: stock is flushed,
285 * 92 bytes are added to objcg->nr_charged_bytes
286 *
287 * In the result, nr_charged_bytes == PAGE_SIZE.
288 * This page will be uncharged in obj_cgroup_release().
289 */
290 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
291 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
292 nr_pages = nr_bytes >> PAGE_SHIFT;
293
bf4f0599 294 if (nr_pages)
f1286fae 295 obj_cgroup_uncharge_pages(objcg, nr_pages);
271dd6b1 296
0764db9b 297 spin_lock_irqsave(&objcg_lock, flags);
bf4f0599 298 list_del(&objcg->list);
0764db9b 299 spin_unlock_irqrestore(&objcg_lock, flags);
bf4f0599
RG
300
301 percpu_ref_exit(ref);
302 kfree_rcu(objcg, rcu);
303}
304
305static struct obj_cgroup *obj_cgroup_alloc(void)
306{
307 struct obj_cgroup *objcg;
308 int ret;
309
310 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
311 if (!objcg)
312 return NULL;
313
314 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
315 GFP_KERNEL);
316 if (ret) {
317 kfree(objcg);
318 return NULL;
319 }
320 INIT_LIST_HEAD(&objcg->list);
321 return objcg;
322}
323
324static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
325 struct mem_cgroup *parent)
326{
327 struct obj_cgroup *objcg, *iter;
328
329 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
330
0764db9b 331 spin_lock_irq(&objcg_lock);
bf4f0599 332
9838354e
MS
333 /* 1) Ready to reparent active objcg. */
334 list_add(&objcg->list, &memcg->objcg_list);
335 /* 2) Reparent active objcg and already reparented objcgs to parent. */
336 list_for_each_entry(iter, &memcg->objcg_list, list)
337 WRITE_ONCE(iter->memcg, parent);
338 /* 3) Move already reparented objcgs to the parent's list */
bf4f0599
RG
339 list_splice(&memcg->objcg_list, &parent->objcg_list);
340
0764db9b 341 spin_unlock_irq(&objcg_lock);
bf4f0599
RG
342
343 percpu_ref_kill(&objcg->refcnt);
344}
345
d7f25f8a
GC
346/*
347 * A lot of the calls to the cache allocation functions are expected to be
272911a4 348 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
d7f25f8a
GC
349 * conditional to this static branch, we'll have to allow modules that does
350 * kmem_cache_alloc and the such to see this symbol as well
351 */
f7a449f7
RG
352DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
353EXPORT_SYMBOL(memcg_kmem_online_key);
b6c1a8af
YS
354
355DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
356EXPORT_SYMBOL(memcg_bpf_enabled_key);
0a432dcb 357#endif
17cc4dfe 358
ad7fa852 359/**
75376c6f
MWO
360 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
361 * @folio: folio of interest
ad7fa852
TH
362 *
363 * If memcg is bound to the default hierarchy, css of the memcg associated
75376c6f 364 * with @folio is returned. The returned css remains associated with @folio
ad7fa852
TH
365 * until it is released.
366 *
367 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368 * is returned.
ad7fa852 369 */
75376c6f 370struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
ad7fa852 371{
75376c6f 372 struct mem_cgroup *memcg = folio_memcg(folio);
ad7fa852 373
9e10a130 374 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
ad7fa852
TH
375 memcg = root_mem_cgroup;
376
ad7fa852
TH
377 return &memcg->css;
378}
379
2fc04524
VD
380/**
381 * page_cgroup_ino - return inode number of the memcg a page is charged to
382 * @page: the page
383 *
384 * Look up the closest online ancestor of the memory cgroup @page is charged to
385 * and return its inode number or 0 if @page is not charged to any cgroup. It
386 * is safe to call this function without holding a reference to @page.
387 *
388 * Note, this function is inherently racy, because there is nothing to prevent
389 * the cgroup inode from getting torn down and potentially reallocated a moment
390 * after page_cgroup_ino() returns, so it only should be used by callers that
391 * do not care (such as procfs interfaces).
392 */
393ino_t page_cgroup_ino(struct page *page)
394{
395 struct mem_cgroup *memcg;
396 unsigned long ino = 0;
397
398 rcu_read_lock();
bcfe06bf 399 memcg = page_memcg_check(page);
286e04b8 400
2fc04524
VD
401 while (memcg && !(memcg->css.flags & CSS_ONLINE))
402 memcg = parent_mem_cgroup(memcg);
403 if (memcg)
404 ino = cgroup_ino(memcg->css.cgroup);
405 rcu_read_unlock();
406 return ino;
407}
408
ef8f2327
MG
409static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
410 struct mem_cgroup_tree_per_node *mctz,
3e32cb2e 411 unsigned long new_usage_in_excess)
bb4cc1a8
AM
412{
413 struct rb_node **p = &mctz->rb_root.rb_node;
414 struct rb_node *parent = NULL;
ef8f2327 415 struct mem_cgroup_per_node *mz_node;
fa90b2fd 416 bool rightmost = true;
bb4cc1a8
AM
417
418 if (mz->on_tree)
419 return;
420
421 mz->usage_in_excess = new_usage_in_excess;
422 if (!mz->usage_in_excess)
423 return;
424 while (*p) {
425 parent = *p;
ef8f2327 426 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
bb4cc1a8 427 tree_node);
fa90b2fd 428 if (mz->usage_in_excess < mz_node->usage_in_excess) {
bb4cc1a8 429 p = &(*p)->rb_left;
fa90b2fd 430 rightmost = false;
378876b0 431 } else {
bb4cc1a8 432 p = &(*p)->rb_right;
378876b0 433 }
bb4cc1a8 434 }
fa90b2fd
DB
435
436 if (rightmost)
437 mctz->rb_rightmost = &mz->tree_node;
438
bb4cc1a8
AM
439 rb_link_node(&mz->tree_node, parent, p);
440 rb_insert_color(&mz->tree_node, &mctz->rb_root);
441 mz->on_tree = true;
442}
443
ef8f2327
MG
444static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
445 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
446{
447 if (!mz->on_tree)
448 return;
fa90b2fd
DB
449
450 if (&mz->tree_node == mctz->rb_rightmost)
451 mctz->rb_rightmost = rb_prev(&mz->tree_node);
452
bb4cc1a8
AM
453 rb_erase(&mz->tree_node, &mctz->rb_root);
454 mz->on_tree = false;
455}
456
ef8f2327
MG
457static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
458 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 459{
0a31bc97
JW
460 unsigned long flags;
461
462 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 463 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 464 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
465}
466
3e32cb2e
JW
467static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
468{
469 unsigned long nr_pages = page_counter_read(&memcg->memory);
4db0c3c2 470 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
3e32cb2e
JW
471 unsigned long excess = 0;
472
473 if (nr_pages > soft_limit)
474 excess = nr_pages - soft_limit;
475
476 return excess;
477}
bb4cc1a8 478
658b69c9 479static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
bb4cc1a8 480{
3e32cb2e 481 unsigned long excess;
ef8f2327
MG
482 struct mem_cgroup_per_node *mz;
483 struct mem_cgroup_tree_per_node *mctz;
bb4cc1a8 484
e4dde56c 485 if (lru_gen_enabled()) {
36c7b4db
A
486 if (soft_limit_excess(memcg))
487 lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec);
e4dde56c
YZ
488 return;
489 }
490
2ab082ba 491 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
492 if (!mctz)
493 return;
bb4cc1a8
AM
494 /*
495 * Necessary to update all ancestors when hierarchy is used.
496 * because their event counter is not touched.
497 */
498 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
658b69c9 499 mz = memcg->nodeinfo[nid];
3e32cb2e 500 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
501 /*
502 * We have to update the tree if mz is on RB-tree or
503 * mem is over its softlimit.
504 */
505 if (excess || mz->on_tree) {
0a31bc97
JW
506 unsigned long flags;
507
508 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
509 /* if on-tree, remove it */
510 if (mz->on_tree)
cf2c8127 511 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
512 /*
513 * Insert again. mz->usage_in_excess will be updated.
514 * If excess is 0, no tree ops.
515 */
cf2c8127 516 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 517 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
518 }
519 }
520}
521
522static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
523{
ef8f2327
MG
524 struct mem_cgroup_tree_per_node *mctz;
525 struct mem_cgroup_per_node *mz;
526 int nid;
bb4cc1a8 527
e231875b 528 for_each_node(nid) {
a3747b53 529 mz = memcg->nodeinfo[nid];
2ab082ba 530 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
531 if (mctz)
532 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
533 }
534}
535
ef8f2327
MG
536static struct mem_cgroup_per_node *
537__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 538{
ef8f2327 539 struct mem_cgroup_per_node *mz;
bb4cc1a8
AM
540
541retry:
542 mz = NULL;
fa90b2fd 543 if (!mctz->rb_rightmost)
bb4cc1a8
AM
544 goto done; /* Nothing to reclaim from */
545
fa90b2fd
DB
546 mz = rb_entry(mctz->rb_rightmost,
547 struct mem_cgroup_per_node, tree_node);
bb4cc1a8
AM
548 /*
549 * Remove the node now but someone else can add it back,
550 * we will to add it back at the end of reclaim to its correct
551 * position in the tree.
552 */
cf2c8127 553 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 554 if (!soft_limit_excess(mz->memcg) ||
8965aa28 555 !css_tryget(&mz->memcg->css))
bb4cc1a8
AM
556 goto retry;
557done:
558 return mz;
559}
560
ef8f2327
MG
561static struct mem_cgroup_per_node *
562mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 563{
ef8f2327 564 struct mem_cgroup_per_node *mz;
bb4cc1a8 565
0a31bc97 566 spin_lock_irq(&mctz->lock);
bb4cc1a8 567 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 568 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
569 return mz;
570}
571
11192d9c
SB
572/*
573 * memcg and lruvec stats flushing
574 *
575 * Many codepaths leading to stats update or read are performance sensitive and
576 * adding stats flushing in such codepaths is not desirable. So, to optimize the
577 * flushing the kernel does:
578 *
579 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
580 * rstat update tree grow unbounded.
581 *
582 * 2) Flush the stats synchronously on reader side only when there are more than
583 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
584 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
585 * only for 2 seconds due to (1).
586 */
587static void flush_memcg_stats_dwork(struct work_struct *w);
588static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
589static DEFINE_SPINLOCK(stats_flush_lock);
590static DEFINE_PER_CPU(unsigned int, stats_updates);
591static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
9b301615
SB
592static u64 flush_next_time;
593
594#define FLUSH_TIME (2UL*HZ)
11192d9c 595
be3e67b5
SAS
596/*
597 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
598 * not rely on this as part of an acquired spinlock_t lock. These functions are
599 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
600 * is sufficient.
601 */
602static void memcg_stats_lock(void)
603{
e575d401
TG
604 preempt_disable_nested();
605 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
606}
607
608static void __memcg_stats_lock(void)
609{
e575d401 610 preempt_disable_nested();
be3e67b5
SAS
611}
612
613static void memcg_stats_unlock(void)
614{
e575d401 615 preempt_enable_nested();
be3e67b5
SAS
616}
617
5b3be698 618static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
11192d9c 619{
5b3be698
SB
620 unsigned int x;
621
11192d9c 622 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
5b3be698
SB
623
624 x = __this_cpu_add_return(stats_updates, abs(val));
625 if (x > MEMCG_CHARGE_BATCH) {
873f64b7
JS
626 /*
627 * If stats_flush_threshold exceeds the threshold
628 * (>num_online_cpus()), cgroup stats update will be triggered
629 * in __mem_cgroup_flush_stats(). Increasing this var further
630 * is redundant and simply adds overhead in atomic update.
631 */
632 if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
633 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
5b3be698
SB
634 __this_cpu_write(stats_updates, 0);
635 }
11192d9c
SB
636}
637
638static void __mem_cgroup_flush_stats(void)
639{
fd25a9e0
SB
640 unsigned long flag;
641
642 if (!spin_trylock_irqsave(&stats_flush_lock, flag))
11192d9c
SB
643 return;
644
9b301615 645 flush_next_time = jiffies_64 + 2*FLUSH_TIME;
8bff9a04 646 cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup);
11192d9c 647 atomic_set(&stats_flush_threshold, 0);
fd25a9e0 648 spin_unlock_irqrestore(&stats_flush_lock, flag);
11192d9c
SB
649}
650
651void mem_cgroup_flush_stats(void)
652{
653 if (atomic_read(&stats_flush_threshold) > num_online_cpus())
654 __mem_cgroup_flush_stats();
655}
656
92fbbc72 657void mem_cgroup_flush_stats_ratelimited(void)
9b301615
SB
658{
659 if (time_after64(jiffies_64, flush_next_time))
660 mem_cgroup_flush_stats();
661}
662
11192d9c
SB
663static void flush_memcg_stats_dwork(struct work_struct *w)
664{
5b3be698 665 __mem_cgroup_flush_stats();
9b301615 666 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
11192d9c
SB
667}
668
d396def5
SB
669/* Subset of vm_event_item to report for memcg event stats */
670static const unsigned int memcg_vm_event_stat[] = {
8278f1c7
SB
671 PGPGIN,
672 PGPGOUT,
d396def5
SB
673 PGSCAN_KSWAPD,
674 PGSCAN_DIRECT,
57e9cc50 675 PGSCAN_KHUGEPAGED,
d396def5
SB
676 PGSTEAL_KSWAPD,
677 PGSTEAL_DIRECT,
57e9cc50 678 PGSTEAL_KHUGEPAGED,
d396def5
SB
679 PGFAULT,
680 PGMAJFAULT,
681 PGREFILL,
682 PGACTIVATE,
683 PGDEACTIVATE,
684 PGLAZYFREE,
685 PGLAZYFREED,
686#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
687 ZSWPIN,
688 ZSWPOUT,
689#endif
690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691 THP_FAULT_ALLOC,
692 THP_COLLAPSE_ALLOC,
693#endif
694};
695
8278f1c7
SB
696#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
697static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
698
699static void init_memcg_events(void)
700{
701 int i;
702
703 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
704 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
705}
706
707static inline int memcg_events_index(enum vm_event_item idx)
708{
709 return mem_cgroup_events_index[idx] - 1;
710}
711
410f8e82
SB
712struct memcg_vmstats_percpu {
713 /* Local (CPU and cgroup) page state & events */
714 long state[MEMCG_NR_STAT];
8278f1c7 715 unsigned long events[NR_MEMCG_EVENTS];
410f8e82
SB
716
717 /* Delta calculation for lockless upward propagation */
718 long state_prev[MEMCG_NR_STAT];
8278f1c7 719 unsigned long events_prev[NR_MEMCG_EVENTS];
410f8e82
SB
720
721 /* Cgroup1: threshold notifications & softlimit tree updates */
722 unsigned long nr_page_events;
723 unsigned long targets[MEM_CGROUP_NTARGETS];
724};
725
726struct memcg_vmstats {
727 /* Aggregated (CPU and subtree) page state & events */
728 long state[MEMCG_NR_STAT];
8278f1c7 729 unsigned long events[NR_MEMCG_EVENTS];
410f8e82
SB
730
731 /* Pending child counts during tree propagation */
732 long state_pending[MEMCG_NR_STAT];
8278f1c7 733 unsigned long events_pending[NR_MEMCG_EVENTS];
410f8e82
SB
734};
735
736unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
737{
738 long x = READ_ONCE(memcg->vmstats->state[idx]);
739#ifdef CONFIG_SMP
740 if (x < 0)
741 x = 0;
742#endif
743 return x;
744}
745
db9adbcb
JW
746/**
747 * __mod_memcg_state - update cgroup memory statistics
748 * @memcg: the memory cgroup
749 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
750 * @val: delta to add to the counter, can be negative
751 */
752void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
753{
db9adbcb
JW
754 if (mem_cgroup_disabled())
755 return;
756
2d146aa3 757 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
5b3be698 758 memcg_rstat_updated(memcg, val);
db9adbcb
JW
759}
760
2d146aa3 761/* idx can be of type enum memcg_stat_item or node_stat_item. */
a18e6e6e
JW
762static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
763{
764 long x = 0;
765 int cpu;
766
767 for_each_possible_cpu(cpu)
2d146aa3 768 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
a18e6e6e
JW
769#ifdef CONFIG_SMP
770 if (x < 0)
771 x = 0;
772#endif
773 return x;
774}
775
eedc4e5a
RG
776void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
777 int val)
db9adbcb
JW
778{
779 struct mem_cgroup_per_node *pn;
42a30035 780 struct mem_cgroup *memcg;
db9adbcb 781
db9adbcb 782 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
42a30035 783 memcg = pn->memcg;
db9adbcb 784
be3e67b5
SAS
785 /*
786 * The caller from rmap relay on disabled preemption becase they never
787 * update their counter from in-interrupt context. For these two
788 * counters we check that the update is never performed from an
789 * interrupt context while other caller need to have disabled interrupt.
790 */
791 __memcg_stats_lock();
e575d401 792 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
be3e67b5
SAS
793 switch (idx) {
794 case NR_ANON_MAPPED:
795 case NR_FILE_MAPPED:
796 case NR_ANON_THPS:
797 case NR_SHMEM_PMDMAPPED:
798 case NR_FILE_PMDMAPPED:
799 WARN_ON_ONCE(!in_task());
800 break;
801 default:
e575d401 802 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
803 }
804 }
805
db9adbcb 806 /* Update memcg */
11192d9c 807 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
db9adbcb 808
b4c46484 809 /* Update lruvec */
7e1c0d6f 810 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
11192d9c 811
5b3be698 812 memcg_rstat_updated(memcg, val);
be3e67b5 813 memcg_stats_unlock();
db9adbcb
JW
814}
815
eedc4e5a
RG
816/**
817 * __mod_lruvec_state - update lruvec memory statistics
818 * @lruvec: the lruvec
819 * @idx: the stat item
820 * @val: delta to add to the counter, can be negative
821 *
822 * The lruvec is the intersection of the NUMA node and a cgroup. This
823 * function updates the all three counters that are affected by a
824 * change of state at this level: per-node, per-cgroup, per-lruvec.
825 */
826void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
827 int val)
828{
829 /* Update node */
830 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
831
832 /* Update memcg and lruvec */
833 if (!mem_cgroup_disabled())
834 __mod_memcg_lruvec_state(lruvec, idx, val);
835}
836
c47d5032
SB
837void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
838 int val)
839{
840 struct page *head = compound_head(page); /* rmap on tail pages */
b4e0b68f 841 struct mem_cgroup *memcg;
c47d5032
SB
842 pg_data_t *pgdat = page_pgdat(page);
843 struct lruvec *lruvec;
844
b4e0b68f
MS
845 rcu_read_lock();
846 memcg = page_memcg(head);
c47d5032 847 /* Untracked pages have no memcg, no lruvec. Update only the node */
d635a69d 848 if (!memcg) {
b4e0b68f 849 rcu_read_unlock();
c47d5032
SB
850 __mod_node_page_state(pgdat, idx, val);
851 return;
852 }
853
d635a69d 854 lruvec = mem_cgroup_lruvec(memcg, pgdat);
c47d5032 855 __mod_lruvec_state(lruvec, idx, val);
b4e0b68f 856 rcu_read_unlock();
c47d5032 857}
f0c0c115 858EXPORT_SYMBOL(__mod_lruvec_page_state);
c47d5032 859
da3ceeff 860void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
ec9f0238 861{
4f103c63 862 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
ec9f0238
RG
863 struct mem_cgroup *memcg;
864 struct lruvec *lruvec;
865
866 rcu_read_lock();
fc4db90f 867 memcg = mem_cgroup_from_slab_obj(p);
ec9f0238 868
8faeb1ff
MS
869 /*
870 * Untracked pages have no memcg, no lruvec. Update only the
871 * node. If we reparent the slab objects to the root memcg,
872 * when we free the slab object, we need to update the per-memcg
873 * vmstats to keep it correct for the root memcg.
874 */
875 if (!memcg) {
ec9f0238
RG
876 __mod_node_page_state(pgdat, idx, val);
877 } else {
867e5e1d 878 lruvec = mem_cgroup_lruvec(memcg, pgdat);
ec9f0238
RG
879 __mod_lruvec_state(lruvec, idx, val);
880 }
881 rcu_read_unlock();
882}
883
db9adbcb
JW
884/**
885 * __count_memcg_events - account VM events in a cgroup
886 * @memcg: the memory cgroup
887 * @idx: the event item
f0953a1b 888 * @count: the number of events that occurred
db9adbcb
JW
889 */
890void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
891 unsigned long count)
892{
8278f1c7
SB
893 int index = memcg_events_index(idx);
894
895 if (mem_cgroup_disabled() || index < 0)
db9adbcb
JW
896 return;
897
be3e67b5 898 memcg_stats_lock();
8278f1c7 899 __this_cpu_add(memcg->vmstats_percpu->events[index], count);
5b3be698 900 memcg_rstat_updated(memcg, count);
be3e67b5 901 memcg_stats_unlock();
db9adbcb
JW
902}
903
42a30035 904static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
e9f8974f 905{
8278f1c7
SB
906 int index = memcg_events_index(event);
907
908 if (index < 0)
909 return 0;
910 return READ_ONCE(memcg->vmstats->events[index]);
e9f8974f
JW
911}
912
42a30035
JW
913static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
914{
815744d7
JW
915 long x = 0;
916 int cpu;
8278f1c7
SB
917 int index = memcg_events_index(event);
918
919 if (index < 0)
920 return 0;
815744d7
JW
921
922 for_each_possible_cpu(cpu)
8278f1c7 923 x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
815744d7 924 return x;
42a30035
JW
925}
926
c0ff4b85 927static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
3fba69a5 928 int nr_pages)
d52aa412 929{
e401f176
KH
930 /* pagein of a big page is an event. So, ignore page size */
931 if (nr_pages > 0)
c9019e9b 932 __count_memcg_events(memcg, PGPGIN, 1);
3751d604 933 else {
c9019e9b 934 __count_memcg_events(memcg, PGPGOUT, 1);
3751d604
KH
935 nr_pages = -nr_pages; /* for event */
936 }
e401f176 937
871789d4 938 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
6d12e2d8
KH
939}
940
f53d7ce3
JW
941static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
942 enum mem_cgroup_events_target target)
7a159cc9
JW
943{
944 unsigned long val, next;
945
871789d4
CD
946 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
947 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
7a159cc9 948 /* from time_after() in jiffies.h */
6a1a8b80 949 if ((long)(next - val) < 0) {
f53d7ce3
JW
950 switch (target) {
951 case MEM_CGROUP_TARGET_THRESH:
952 next = val + THRESHOLDS_EVENTS_TARGET;
953 break;
bb4cc1a8
AM
954 case MEM_CGROUP_TARGET_SOFTLIMIT:
955 next = val + SOFTLIMIT_EVENTS_TARGET;
956 break;
f53d7ce3
JW
957 default:
958 break;
959 }
871789d4 960 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
f53d7ce3 961 return true;
7a159cc9 962 }
f53d7ce3 963 return false;
d2265e6f
KH
964}
965
966/*
967 * Check events in order.
968 *
969 */
8e88bd2d 970static void memcg_check_events(struct mem_cgroup *memcg, int nid)
d2265e6f 971{
2343e88d
SAS
972 if (IS_ENABLED(CONFIG_PREEMPT_RT))
973 return;
974
d2265e6f 975 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
976 if (unlikely(mem_cgroup_event_ratelimit(memcg,
977 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 978 bool do_softlimit;
f53d7ce3 979
bb4cc1a8
AM
980 do_softlimit = mem_cgroup_event_ratelimit(memcg,
981 MEM_CGROUP_TARGET_SOFTLIMIT);
c0ff4b85 982 mem_cgroup_threshold(memcg);
bb4cc1a8 983 if (unlikely(do_softlimit))
8e88bd2d 984 mem_cgroup_update_tree(memcg, nid);
0a31bc97 985 }
d2265e6f
KH
986}
987
cf475ad2 988struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 989{
31a78f23
BS
990 /*
991 * mm_update_next_owner() may clear mm->owner to NULL
992 * if it races with swapoff, page migration, etc.
993 * So this can be called with p == NULL.
994 */
995 if (unlikely(!p))
996 return NULL;
997
073219e9 998 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466 999}
33398cf2 1000EXPORT_SYMBOL(mem_cgroup_from_task);
78fb7466 1001
04f94e3f
DS
1002static __always_inline struct mem_cgroup *active_memcg(void)
1003{
55a68c82 1004 if (!in_task())
04f94e3f
DS
1005 return this_cpu_read(int_active_memcg);
1006 else
1007 return current->active_memcg;
1008}
1009
d46eb14b
SB
1010/**
1011 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1012 * @mm: mm from which memcg should be extracted. It can be NULL.
1013 *
04f94e3f
DS
1014 * Obtain a reference on mm->memcg and returns it if successful. If mm
1015 * is NULL, then the memcg is chosen as follows:
1016 * 1) The active memcg, if set.
1017 * 2) current->mm->memcg, if available
1018 * 3) root memcg
1019 * If mem_cgroup is disabled, NULL is returned.
d46eb14b
SB
1020 */
1021struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1022{
d46eb14b
SB
1023 struct mem_cgroup *memcg;
1024
1025 if (mem_cgroup_disabled())
1026 return NULL;
0b7f569e 1027
2884b6b7
MS
1028 /*
1029 * Page cache insertions can happen without an
1030 * actual mm context, e.g. during disk probing
1031 * on boot, loopback IO, acct() writes etc.
1032 *
1033 * No need to css_get on root memcg as the reference
1034 * counting is disabled on the root level in the
1035 * cgroup core. See CSS_NO_REF.
1036 */
04f94e3f
DS
1037 if (unlikely(!mm)) {
1038 memcg = active_memcg();
1039 if (unlikely(memcg)) {
1040 /* remote memcg must hold a ref */
1041 css_get(&memcg->css);
1042 return memcg;
1043 }
1044 mm = current->mm;
1045 if (unlikely(!mm))
1046 return root_mem_cgroup;
1047 }
2884b6b7 1048
54595fe2
KH
1049 rcu_read_lock();
1050 do {
2884b6b7
MS
1051 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1052 if (unlikely(!memcg))
df381975 1053 memcg = root_mem_cgroup;
00d484f3 1054 } while (!css_tryget(&memcg->css));
54595fe2 1055 rcu_read_unlock();
c0ff4b85 1056 return memcg;
54595fe2 1057}
d46eb14b
SB
1058EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1059
4127c650
RG
1060static __always_inline bool memcg_kmem_bypass(void)
1061{
1062 /* Allow remote memcg charging from any context. */
1063 if (unlikely(active_memcg()))
1064 return false;
1065
1066 /* Memcg to charge can't be determined. */
6126891c 1067 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
4127c650
RG
1068 return true;
1069
1070 return false;
1071}
1072
5660048c
JW
1073/**
1074 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1075 * @root: hierarchy root
1076 * @prev: previously returned memcg, NULL on first invocation
1077 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1078 *
1079 * Returns references to children of the hierarchy below @root, or
1080 * @root itself, or %NULL after a full round-trip.
1081 *
1082 * Caller must pass the return value in @prev on subsequent
1083 * invocations for reference counting, or use mem_cgroup_iter_break()
1084 * to cancel a hierarchy walk before the round-trip is complete.
1085 *
05bdc520
ML
1086 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1087 * in the hierarchy among all concurrent reclaimers operating on the
1088 * same node.
5660048c 1089 */
694fbc0f 1090struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1091 struct mem_cgroup *prev,
694fbc0f 1092 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1093{
3f649ab7 1094 struct mem_cgroup_reclaim_iter *iter;
5ac8fb31 1095 struct cgroup_subsys_state *css = NULL;
9f3a0d09 1096 struct mem_cgroup *memcg = NULL;
5ac8fb31 1097 struct mem_cgroup *pos = NULL;
711d3d2c 1098
694fbc0f
AM
1099 if (mem_cgroup_disabled())
1100 return NULL;
5660048c 1101
9f3a0d09
JW
1102 if (!root)
1103 root = root_mem_cgroup;
7d74b06f 1104
542f85f9 1105 rcu_read_lock();
5f578161 1106
5ac8fb31 1107 if (reclaim) {
ef8f2327 1108 struct mem_cgroup_per_node *mz;
5ac8fb31 1109
a3747b53 1110 mz = root->nodeinfo[reclaim->pgdat->node_id];
9da83f3f 1111 iter = &mz->iter;
5ac8fb31 1112
a9320aae
WY
1113 /*
1114 * On start, join the current reclaim iteration cycle.
1115 * Exit when a concurrent walker completes it.
1116 */
1117 if (!prev)
1118 reclaim->generation = iter->generation;
1119 else if (reclaim->generation != iter->generation)
5ac8fb31
JW
1120 goto out_unlock;
1121
6df38689 1122 while (1) {
4db0c3c2 1123 pos = READ_ONCE(iter->position);
6df38689
VD
1124 if (!pos || css_tryget(&pos->css))
1125 break;
5ac8fb31 1126 /*
6df38689
VD
1127 * css reference reached zero, so iter->position will
1128 * be cleared by ->css_released. However, we should not
1129 * rely on this happening soon, because ->css_released
1130 * is called from a work queue, and by busy-waiting we
1131 * might block it. So we clear iter->position right
1132 * away.
5ac8fb31 1133 */
6df38689
VD
1134 (void)cmpxchg(&iter->position, pos, NULL);
1135 }
89d8330c
WY
1136 } else if (prev) {
1137 pos = prev;
5ac8fb31
JW
1138 }
1139
1140 if (pos)
1141 css = &pos->css;
1142
1143 for (;;) {
1144 css = css_next_descendant_pre(css, &root->css);
1145 if (!css) {
1146 /*
1147 * Reclaimers share the hierarchy walk, and a
1148 * new one might jump in right at the end of
1149 * the hierarchy - make sure they see at least
1150 * one group and restart from the beginning.
1151 */
1152 if (!prev)
1153 continue;
1154 break;
527a5ec9 1155 }
7d74b06f 1156
5ac8fb31
JW
1157 /*
1158 * Verify the css and acquire a reference. The root
1159 * is provided by the caller, so we know it's alive
1160 * and kicking, and don't take an extra reference.
1161 */
41555dad
WY
1162 if (css == &root->css || css_tryget(css)) {
1163 memcg = mem_cgroup_from_css(css);
0b8f73e1 1164 break;
41555dad 1165 }
9f3a0d09 1166 }
5ac8fb31
JW
1167
1168 if (reclaim) {
5ac8fb31 1169 /*
6df38689
VD
1170 * The position could have already been updated by a competing
1171 * thread, so check that the value hasn't changed since we read
1172 * it to avoid reclaiming from the same cgroup twice.
5ac8fb31 1173 */
6df38689
VD
1174 (void)cmpxchg(&iter->position, pos, memcg);
1175
5ac8fb31
JW
1176 if (pos)
1177 css_put(&pos->css);
1178
1179 if (!memcg)
1180 iter->generation++;
9f3a0d09 1181 }
5ac8fb31 1182
542f85f9
MH
1183out_unlock:
1184 rcu_read_unlock();
c40046f3
MH
1185 if (prev && prev != root)
1186 css_put(&prev->css);
1187
9f3a0d09 1188 return memcg;
14067bb3 1189}
7d74b06f 1190
5660048c
JW
1191/**
1192 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1193 * @root: hierarchy root
1194 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1195 */
1196void mem_cgroup_iter_break(struct mem_cgroup *root,
1197 struct mem_cgroup *prev)
9f3a0d09
JW
1198{
1199 if (!root)
1200 root = root_mem_cgroup;
1201 if (prev && prev != root)
1202 css_put(&prev->css);
1203}
7d74b06f 1204
54a83d6b
MC
1205static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1206 struct mem_cgroup *dead_memcg)
6df38689 1207{
6df38689 1208 struct mem_cgroup_reclaim_iter *iter;
ef8f2327
MG
1209 struct mem_cgroup_per_node *mz;
1210 int nid;
6df38689 1211
54a83d6b 1212 for_each_node(nid) {
a3747b53 1213 mz = from->nodeinfo[nid];
9da83f3f
YS
1214 iter = &mz->iter;
1215 cmpxchg(&iter->position, dead_memcg, NULL);
6df38689
VD
1216 }
1217}
1218
54a83d6b
MC
1219static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1220{
1221 struct mem_cgroup *memcg = dead_memcg;
1222 struct mem_cgroup *last;
1223
1224 do {
1225 __invalidate_reclaim_iterators(memcg, dead_memcg);
1226 last = memcg;
1227 } while ((memcg = parent_mem_cgroup(memcg)));
1228
1229 /*
b8dd3ee9 1230 * When cgroup1 non-hierarchy mode is used,
54a83d6b
MC
1231 * parent_mem_cgroup() does not walk all the way up to the
1232 * cgroup root (root_mem_cgroup). So we have to handle
1233 * dead_memcg from cgroup root separately.
1234 */
7848ed62 1235 if (!mem_cgroup_is_root(last))
54a83d6b
MC
1236 __invalidate_reclaim_iterators(root_mem_cgroup,
1237 dead_memcg);
1238}
1239
7c5f64f8
VD
1240/**
1241 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1242 * @memcg: hierarchy root
1243 * @fn: function to call for each task
1244 * @arg: argument passed to @fn
1245 *
1246 * This function iterates over tasks attached to @memcg or to any of its
1247 * descendants and calls @fn for each task. If @fn returns a non-zero
1248 * value, the function breaks the iteration loop and returns the value.
1249 * Otherwise, it will iterate over all tasks and return 0.
1250 *
1251 * This function must not be called for the root memory cgroup.
1252 */
1253int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1254 int (*fn)(struct task_struct *, void *), void *arg)
1255{
1256 struct mem_cgroup *iter;
1257 int ret = 0;
1258
7848ed62 1259 BUG_ON(mem_cgroup_is_root(memcg));
7c5f64f8
VD
1260
1261 for_each_mem_cgroup_tree(iter, memcg) {
1262 struct css_task_iter it;
1263 struct task_struct *task;
1264
f168a9a5 1265 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
7c5f64f8
VD
1266 while (!ret && (task = css_task_iter_next(&it)))
1267 ret = fn(task, arg);
1268 css_task_iter_end(&it);
1269 if (ret) {
1270 mem_cgroup_iter_break(memcg, iter);
1271 break;
1272 }
1273 }
1274 return ret;
1275}
1276
6168d0da 1277#ifdef CONFIG_DEBUG_VM
e809c3fe 1278void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
1279{
1280 struct mem_cgroup *memcg;
1281
1282 if (mem_cgroup_disabled())
1283 return;
1284
e809c3fe 1285 memcg = folio_memcg(folio);
6168d0da
AS
1286
1287 if (!memcg)
7848ed62 1288 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
6168d0da 1289 else
e809c3fe 1290 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
6168d0da
AS
1291}
1292#endif
1293
6168d0da 1294/**
e809c3fe
MWO
1295 * folio_lruvec_lock - Lock the lruvec for a folio.
1296 * @folio: Pointer to the folio.
6168d0da 1297 *
d7e3aba5 1298 * These functions are safe to use under any of the following conditions:
e809c3fe
MWO
1299 * - folio locked
1300 * - folio_test_lru false
1301 * - folio_memcg_lock()
1302 * - folio frozen (refcount of 0)
1303 *
1304 * Return: The lruvec this folio is on with its lock held.
6168d0da 1305 */
e809c3fe 1306struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1307{
e809c3fe 1308 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1309
6168d0da 1310 spin_lock(&lruvec->lru_lock);
e809c3fe 1311 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1312
1313 return lruvec;
1314}
1315
e809c3fe
MWO
1316/**
1317 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1318 * @folio: Pointer to the folio.
1319 *
1320 * These functions are safe to use under any of the following conditions:
1321 * - folio locked
1322 * - folio_test_lru false
1323 * - folio_memcg_lock()
1324 * - folio frozen (refcount of 0)
1325 *
1326 * Return: The lruvec this folio is on with its lock held and interrupts
1327 * disabled.
1328 */
1329struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1330{
e809c3fe 1331 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1332
6168d0da 1333 spin_lock_irq(&lruvec->lru_lock);
e809c3fe 1334 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1335
1336 return lruvec;
1337}
1338
e809c3fe
MWO
1339/**
1340 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1341 * @folio: Pointer to the folio.
1342 * @flags: Pointer to irqsave flags.
1343 *
1344 * These functions are safe to use under any of the following conditions:
1345 * - folio locked
1346 * - folio_test_lru false
1347 * - folio_memcg_lock()
1348 * - folio frozen (refcount of 0)
1349 *
1350 * Return: The lruvec this folio is on with its lock held and interrupts
1351 * disabled.
1352 */
1353struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1354 unsigned long *flags)
6168d0da 1355{
e809c3fe 1356 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1357
6168d0da 1358 spin_lock_irqsave(&lruvec->lru_lock, *flags);
e809c3fe 1359 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1360
1361 return lruvec;
1362}
1363
925b7673 1364/**
fa9add64
HD
1365 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1366 * @lruvec: mem_cgroup per zone lru vector
1367 * @lru: index of lru list the page is sitting on
b4536f0c 1368 * @zid: zone id of the accounted pages
fa9add64 1369 * @nr_pages: positive when adding or negative when removing
925b7673 1370 *
ca707239 1371 * This function must be called under lru_lock, just before a page is added
07ca7606 1372 * to or just after a page is removed from an lru list.
3f58a829 1373 */
fa9add64 1374void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 1375 int zid, int nr_pages)
3f58a829 1376{
ef8f2327 1377 struct mem_cgroup_per_node *mz;
fa9add64 1378 unsigned long *lru_size;
ca707239 1379 long size;
3f58a829
MK
1380
1381 if (mem_cgroup_disabled())
1382 return;
1383
ef8f2327 1384 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c 1385 lru_size = &mz->lru_zone_size[zid][lru];
ca707239
HD
1386
1387 if (nr_pages < 0)
1388 *lru_size += nr_pages;
1389
1390 size = *lru_size;
b4536f0c
MH
1391 if (WARN_ONCE(size < 0,
1392 "%s(%p, %d, %d): lru_size %ld\n",
1393 __func__, lruvec, lru, nr_pages, size)) {
ca707239
HD
1394 VM_BUG_ON(1);
1395 *lru_size = 0;
1396 }
1397
1398 if (nr_pages > 0)
1399 *lru_size += nr_pages;
08e552c6 1400}
544122e5 1401
19942822 1402/**
9d11ea9f 1403 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1404 * @memcg: the memory cgroup
19942822 1405 *
9d11ea9f 1406 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1407 * pages.
19942822 1408 */
c0ff4b85 1409static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1410{
3e32cb2e
JW
1411 unsigned long margin = 0;
1412 unsigned long count;
1413 unsigned long limit;
9d11ea9f 1414
3e32cb2e 1415 count = page_counter_read(&memcg->memory);
bbec2e15 1416 limit = READ_ONCE(memcg->memory.max);
3e32cb2e
JW
1417 if (count < limit)
1418 margin = limit - count;
1419
7941d214 1420 if (do_memsw_account()) {
3e32cb2e 1421 count = page_counter_read(&memcg->memsw);
bbec2e15 1422 limit = READ_ONCE(memcg->memsw.max);
1c4448ed 1423 if (count < limit)
3e32cb2e 1424 margin = min(margin, limit - count);
cbedbac3
LR
1425 else
1426 margin = 0;
3e32cb2e
JW
1427 }
1428
1429 return margin;
19942822
JW
1430}
1431
32047e2a 1432/*
bdcbb659 1433 * A routine for checking "mem" is under move_account() or not.
32047e2a 1434 *
bdcbb659
QH
1435 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1436 * moving cgroups. This is for waiting at high-memory pressure
1437 * caused by "move".
32047e2a 1438 */
c0ff4b85 1439static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1440{
2bd9bb20
KH
1441 struct mem_cgroup *from;
1442 struct mem_cgroup *to;
4b534334 1443 bool ret = false;
2bd9bb20
KH
1444 /*
1445 * Unlike task_move routines, we access mc.to, mc.from not under
1446 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1447 */
1448 spin_lock(&mc.lock);
1449 from = mc.from;
1450 to = mc.to;
1451 if (!from)
1452 goto unlock;
3e92041d 1453
2314b42d
JW
1454 ret = mem_cgroup_is_descendant(from, memcg) ||
1455 mem_cgroup_is_descendant(to, memcg);
2bd9bb20
KH
1456unlock:
1457 spin_unlock(&mc.lock);
4b534334
KH
1458 return ret;
1459}
1460
c0ff4b85 1461static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1462{
1463 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1464 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1465 DEFINE_WAIT(wait);
1466 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1467 /* moving charge context might have finished. */
1468 if (mc.moving_task)
1469 schedule();
1470 finish_wait(&mc.waitq, &wait);
1471 return true;
1472 }
1473 }
1474 return false;
1475}
1476
5f9a4f4a
MS
1477struct memory_stat {
1478 const char *name;
5f9a4f4a
MS
1479 unsigned int idx;
1480};
1481
57b2847d 1482static const struct memory_stat memory_stats[] = {
fff66b79
MS
1483 { "anon", NR_ANON_MAPPED },
1484 { "file", NR_FILE_PAGES },
a8c49af3 1485 { "kernel", MEMCG_KMEM },
fff66b79
MS
1486 { "kernel_stack", NR_KERNEL_STACK_KB },
1487 { "pagetables", NR_PAGETABLE },
ebc97a52 1488 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
fff66b79
MS
1489 { "percpu", MEMCG_PERCPU_B },
1490 { "sock", MEMCG_SOCK },
4e5aa1f4 1491 { "vmalloc", MEMCG_VMALLOC },
fff66b79 1492 { "shmem", NR_SHMEM },
f4840ccf
JW
1493#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1494 { "zswap", MEMCG_ZSWAP_B },
1495 { "zswapped", MEMCG_ZSWAPPED },
1496#endif
fff66b79
MS
1497 { "file_mapped", NR_FILE_MAPPED },
1498 { "file_dirty", NR_FILE_DIRTY },
1499 { "file_writeback", NR_WRITEBACK },
b6038942
SB
1500#ifdef CONFIG_SWAP
1501 { "swapcached", NR_SWAPCACHE },
1502#endif
5f9a4f4a 1503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fff66b79
MS
1504 { "anon_thp", NR_ANON_THPS },
1505 { "file_thp", NR_FILE_THPS },
1506 { "shmem_thp", NR_SHMEM_THPS },
5f9a4f4a 1507#endif
fff66b79
MS
1508 { "inactive_anon", NR_INACTIVE_ANON },
1509 { "active_anon", NR_ACTIVE_ANON },
1510 { "inactive_file", NR_INACTIVE_FILE },
1511 { "active_file", NR_ACTIVE_FILE },
1512 { "unevictable", NR_UNEVICTABLE },
1513 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1514 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
5f9a4f4a
MS
1515
1516 /* The memory events */
fff66b79
MS
1517 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1518 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1519 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1520 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1521 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1522 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1523 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
5f9a4f4a
MS
1524};
1525
fff66b79
MS
1526/* Translate stat items to the correct unit for memory.stat output */
1527static int memcg_page_state_unit(int item)
1528{
1529 switch (item) {
1530 case MEMCG_PERCPU_B:
f4840ccf 1531 case MEMCG_ZSWAP_B:
fff66b79
MS
1532 case NR_SLAB_RECLAIMABLE_B:
1533 case NR_SLAB_UNRECLAIMABLE_B:
1534 case WORKINGSET_REFAULT_ANON:
1535 case WORKINGSET_REFAULT_FILE:
1536 case WORKINGSET_ACTIVATE_ANON:
1537 case WORKINGSET_ACTIVATE_FILE:
1538 case WORKINGSET_RESTORE_ANON:
1539 case WORKINGSET_RESTORE_FILE:
1540 case WORKINGSET_NODERECLAIM:
1541 return 1;
1542 case NR_KERNEL_STACK_KB:
1543 return SZ_1K;
1544 default:
1545 return PAGE_SIZE;
1546 }
1547}
1548
1549static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1550 int item)
1551{
1552 return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1553}
1554
68aaee14 1555static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
c8713d0b
JW
1556{
1557 struct seq_buf s;
1558 int i;
71cd3113 1559
68aaee14 1560 seq_buf_init(&s, buf, bufsize);
c8713d0b
JW
1561
1562 /*
1563 * Provide statistics on the state of the memory subsystem as
1564 * well as cumulative event counters that show past behavior.
1565 *
1566 * This list is ordered following a combination of these gradients:
1567 * 1) generic big picture -> specifics and details
1568 * 2) reflecting userspace activity -> reflecting kernel heuristics
1569 *
1570 * Current memory state:
1571 */
fd25a9e0 1572 mem_cgroup_flush_stats();
c8713d0b 1573
5f9a4f4a
MS
1574 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1575 u64 size;
c8713d0b 1576
fff66b79 1577 size = memcg_page_state_output(memcg, memory_stats[i].idx);
5f9a4f4a 1578 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
c8713d0b 1579
5f9a4f4a 1580 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
fff66b79
MS
1581 size += memcg_page_state_output(memcg,
1582 NR_SLAB_RECLAIMABLE_B);
5f9a4f4a
MS
1583 seq_buf_printf(&s, "slab %llu\n", size);
1584 }
1585 }
c8713d0b
JW
1586
1587 /* Accumulated memory events */
c8713d0b
JW
1588 seq_buf_printf(&s, "pgscan %lu\n",
1589 memcg_events(memcg, PGSCAN_KSWAPD) +
57e9cc50
JW
1590 memcg_events(memcg, PGSCAN_DIRECT) +
1591 memcg_events(memcg, PGSCAN_KHUGEPAGED));
c8713d0b
JW
1592 seq_buf_printf(&s, "pgsteal %lu\n",
1593 memcg_events(memcg, PGSTEAL_KSWAPD) +
57e9cc50
JW
1594 memcg_events(memcg, PGSTEAL_DIRECT) +
1595 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
c8713d0b 1596
8278f1c7
SB
1597 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1598 if (memcg_vm_event_stat[i] == PGPGIN ||
1599 memcg_vm_event_stat[i] == PGPGOUT)
1600 continue;
1601
673520f8
QZ
1602 seq_buf_printf(&s, "%s %lu\n",
1603 vm_event_name(memcg_vm_event_stat[i]),
1604 memcg_events(memcg, memcg_vm_event_stat[i]));
8278f1c7 1605 }
c8713d0b
JW
1606
1607 /* The above should easily fit into one page */
1608 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
c8713d0b 1609}
71cd3113 1610
58cf188e 1611#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1612/**
f0c867d9 1613 * mem_cgroup_print_oom_context: Print OOM information relevant to
1614 * memory controller.
e222432b
BS
1615 * @memcg: The memory cgroup that went over limit
1616 * @p: Task that is going to be killed
1617 *
1618 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1619 * enabled
1620 */
f0c867d9 1621void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
e222432b 1622{
e222432b
BS
1623 rcu_read_lock();
1624
f0c867d9 1625 if (memcg) {
1626 pr_cont(",oom_memcg=");
1627 pr_cont_cgroup_path(memcg->css.cgroup);
1628 } else
1629 pr_cont(",global_oom");
2415b9f5 1630 if (p) {
f0c867d9 1631 pr_cont(",task_memcg=");
2415b9f5 1632 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
2415b9f5 1633 }
e222432b 1634 rcu_read_unlock();
f0c867d9 1635}
1636
1637/**
1638 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1639 * memory controller.
1640 * @memcg: The memory cgroup that went over limit
1641 */
1642void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1643{
68aaee14
TH
1644 /* Use static buffer, for the caller is holding oom_lock. */
1645 static char buf[PAGE_SIZE];
1646
1647 lockdep_assert_held(&oom_lock);
e222432b 1648
3e32cb2e
JW
1649 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1650 K((u64)page_counter_read(&memcg->memory)),
15b42562 1651 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
c8713d0b
JW
1652 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1653 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1654 K((u64)page_counter_read(&memcg->swap)),
32d087cd 1655 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
c8713d0b
JW
1656 else {
1657 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1658 K((u64)page_counter_read(&memcg->memsw)),
1659 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1660 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1661 K((u64)page_counter_read(&memcg->kmem)),
1662 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
58cf188e 1663 }
c8713d0b
JW
1664
1665 pr_info("Memory cgroup stats for ");
1666 pr_cont_cgroup_path(memcg->css.cgroup);
1667 pr_cont(":");
68aaee14 1668 memory_stat_format(memcg, buf, sizeof(buf));
c8713d0b 1669 pr_info("%s", buf);
e222432b
BS
1670}
1671
a63d83f4
DR
1672/*
1673 * Return the memory (and swap, if configured) limit for a memcg.
1674 */
bbec2e15 1675unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
a63d83f4 1676{
8d387a5f
WL
1677 unsigned long max = READ_ONCE(memcg->memory.max);
1678
b94c4e94 1679 if (do_memsw_account()) {
8d387a5f
WL
1680 if (mem_cgroup_swappiness(memcg)) {
1681 /* Calculate swap excess capacity from memsw limit */
1682 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1683
1684 max += min(swap, (unsigned long)total_swap_pages);
1685 }
b94c4e94
JW
1686 } else {
1687 if (mem_cgroup_swappiness(memcg))
1688 max += min(READ_ONCE(memcg->swap.max),
1689 (unsigned long)total_swap_pages);
9a5a8f19 1690 }
bbec2e15 1691 return max;
a63d83f4
DR
1692}
1693
9783aa99
CD
1694unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1695{
1696 return page_counter_read(&memcg->memory);
1697}
1698
b6e6edcf 1699static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
19965460 1700 int order)
9cbb78bb 1701{
6e0fc46d
DR
1702 struct oom_control oc = {
1703 .zonelist = NULL,
1704 .nodemask = NULL,
2a966b77 1705 .memcg = memcg,
6e0fc46d
DR
1706 .gfp_mask = gfp_mask,
1707 .order = order,
6e0fc46d 1708 };
1378b37d 1709 bool ret = true;
9cbb78bb 1710
7775face
TH
1711 if (mutex_lock_killable(&oom_lock))
1712 return true;
1378b37d
YS
1713
1714 if (mem_cgroup_margin(memcg) >= (1 << order))
1715 goto unlock;
1716
7775face
TH
1717 /*
1718 * A few threads which were not waiting at mutex_lock_killable() can
1719 * fail to bail out. Therefore, check again after holding oom_lock.
1720 */
a4ebf1b6 1721 ret = task_is_dying() || out_of_memory(&oc);
1378b37d
YS
1722
1723unlock:
dc56401f 1724 mutex_unlock(&oom_lock);
7c5f64f8 1725 return ret;
9cbb78bb
DR
1726}
1727
0608f43d 1728static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
ef8f2327 1729 pg_data_t *pgdat,
0608f43d
AM
1730 gfp_t gfp_mask,
1731 unsigned long *total_scanned)
1732{
1733 struct mem_cgroup *victim = NULL;
1734 int total = 0;
1735 int loop = 0;
1736 unsigned long excess;
1737 unsigned long nr_scanned;
1738 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 1739 .pgdat = pgdat,
0608f43d
AM
1740 };
1741
3e32cb2e 1742 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1743
1744 while (1) {
1745 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1746 if (!victim) {
1747 loop++;
1748 if (loop >= 2) {
1749 /*
1750 * If we have not been able to reclaim
1751 * anything, it might because there are
1752 * no reclaimable pages under this hierarchy
1753 */
1754 if (!total)
1755 break;
1756 /*
1757 * We want to do more targeted reclaim.
1758 * excess >> 2 is not to excessive so as to
1759 * reclaim too much, nor too less that we keep
1760 * coming back to reclaim from this cgroup
1761 */
1762 if (total >= (excess >> 2) ||
1763 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1764 break;
1765 }
1766 continue;
1767 }
a9dd0a83 1768 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
ef8f2327 1769 pgdat, &nr_scanned);
0608f43d 1770 *total_scanned += nr_scanned;
3e32cb2e 1771 if (!soft_limit_excess(root_memcg))
0608f43d 1772 break;
6d61ef40 1773 }
0608f43d
AM
1774 mem_cgroup_iter_break(root_memcg, victim);
1775 return total;
6d61ef40
BS
1776}
1777
0056f4e6
JW
1778#ifdef CONFIG_LOCKDEP
1779static struct lockdep_map memcg_oom_lock_dep_map = {
1780 .name = "memcg_oom_lock",
1781};
1782#endif
1783
fb2a6fc5
JW
1784static DEFINE_SPINLOCK(memcg_oom_lock);
1785
867578cb
KH
1786/*
1787 * Check OOM-Killer is already running under our hierarchy.
1788 * If someone is running, return false.
1789 */
fb2a6fc5 1790static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 1791{
79dfdacc 1792 struct mem_cgroup *iter, *failed = NULL;
a636b327 1793
fb2a6fc5
JW
1794 spin_lock(&memcg_oom_lock);
1795
9f3a0d09 1796 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 1797 if (iter->oom_lock) {
79dfdacc
MH
1798 /*
1799 * this subtree of our hierarchy is already locked
1800 * so we cannot give a lock.
1801 */
79dfdacc 1802 failed = iter;
9f3a0d09
JW
1803 mem_cgroup_iter_break(memcg, iter);
1804 break;
23751be0
JW
1805 } else
1806 iter->oom_lock = true;
7d74b06f 1807 }
867578cb 1808
fb2a6fc5
JW
1809 if (failed) {
1810 /*
1811 * OK, we failed to lock the whole subtree so we have
1812 * to clean up what we set up to the failing subtree
1813 */
1814 for_each_mem_cgroup_tree(iter, memcg) {
1815 if (iter == failed) {
1816 mem_cgroup_iter_break(memcg, iter);
1817 break;
1818 }
1819 iter->oom_lock = false;
79dfdacc 1820 }
0056f4e6
JW
1821 } else
1822 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
1823
1824 spin_unlock(&memcg_oom_lock);
1825
1826 return !failed;
a636b327 1827}
0b7f569e 1828
fb2a6fc5 1829static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 1830{
7d74b06f
KH
1831 struct mem_cgroup *iter;
1832
fb2a6fc5 1833 spin_lock(&memcg_oom_lock);
5facae4f 1834 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
c0ff4b85 1835 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1836 iter->oom_lock = false;
fb2a6fc5 1837 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1838}
1839
c0ff4b85 1840static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1841{
1842 struct mem_cgroup *iter;
1843
c2b42d3c 1844 spin_lock(&memcg_oom_lock);
c0ff4b85 1845 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1846 iter->under_oom++;
1847 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1848}
1849
c0ff4b85 1850static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1851{
1852 struct mem_cgroup *iter;
1853
867578cb 1854 /*
f0953a1b 1855 * Be careful about under_oom underflows because a child memcg
7a52d4d8 1856 * could have been added after mem_cgroup_mark_under_oom.
867578cb 1857 */
c2b42d3c 1858 spin_lock(&memcg_oom_lock);
c0ff4b85 1859 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1860 if (iter->under_oom > 0)
1861 iter->under_oom--;
1862 spin_unlock(&memcg_oom_lock);
0b7f569e
KH
1863}
1864
867578cb
KH
1865static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1866
dc98df5a 1867struct oom_wait_info {
d79154bb 1868 struct mem_cgroup *memcg;
ac6424b9 1869 wait_queue_entry_t wait;
dc98df5a
KH
1870};
1871
ac6424b9 1872static int memcg_oom_wake_function(wait_queue_entry_t *wait,
dc98df5a
KH
1873 unsigned mode, int sync, void *arg)
1874{
d79154bb
HD
1875 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1876 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
1877 struct oom_wait_info *oom_wait_info;
1878
1879 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 1880 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 1881
2314b42d
JW
1882 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1883 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
dc98df5a 1884 return 0;
dc98df5a
KH
1885 return autoremove_wake_function(wait, mode, sync, arg);
1886}
1887
c0ff4b85 1888static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 1889{
c2b42d3c
TH
1890 /*
1891 * For the following lockless ->under_oom test, the only required
1892 * guarantee is that it must see the state asserted by an OOM when
1893 * this function is called as a result of userland actions
1894 * triggered by the notification of the OOM. This is trivially
1895 * achieved by invoking mem_cgroup_mark_under_oom() before
1896 * triggering notification.
1897 */
1898 if (memcg && memcg->under_oom)
f4b90b70 1899 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
3c11ecf4
KH
1900}
1901
becdf89d
SB
1902/*
1903 * Returns true if successfully killed one or more processes. Though in some
1904 * corner cases it can return true even without killing any process.
1905 */
1906static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 1907{
becdf89d 1908 bool locked, ret;
7056d3a3 1909
29ef680a 1910 if (order > PAGE_ALLOC_COSTLY_ORDER)
becdf89d 1911 return false;
29ef680a 1912
7a1adfdd
RG
1913 memcg_memory_event(memcg, MEMCG_OOM);
1914
867578cb 1915 /*
49426420
JW
1916 * We are in the middle of the charge context here, so we
1917 * don't want to block when potentially sitting on a callstack
1918 * that holds all kinds of filesystem and mm locks.
1919 *
29ef680a
MH
1920 * cgroup1 allows disabling the OOM killer and waiting for outside
1921 * handling until the charge can succeed; remember the context and put
1922 * the task to sleep at the end of the page fault when all locks are
1923 * released.
49426420 1924 *
29ef680a
MH
1925 * On the other hand, in-kernel OOM killer allows for an async victim
1926 * memory reclaim (oom_reaper) and that means that we are not solely
1927 * relying on the oom victim to make a forward progress and we can
1928 * invoke the oom killer here.
1929 *
1930 * Please note that mem_cgroup_out_of_memory might fail to find a
1931 * victim and then we have to bail out from the charge path.
867578cb 1932 */
17c56de6 1933 if (READ_ONCE(memcg->oom_kill_disable)) {
becdf89d
SB
1934 if (current->in_user_fault) {
1935 css_get(&memcg->css);
1936 current->memcg_in_oom = memcg;
1937 current->memcg_oom_gfp_mask = mask;
1938 current->memcg_oom_order = order;
1939 }
1940 return false;
29ef680a
MH
1941 }
1942
7056d3a3
MH
1943 mem_cgroup_mark_under_oom(memcg);
1944
1945 locked = mem_cgroup_oom_trylock(memcg);
1946
1947 if (locked)
1948 mem_cgroup_oom_notify(memcg);
1949
1950 mem_cgroup_unmark_under_oom(memcg);
becdf89d 1951 ret = mem_cgroup_out_of_memory(memcg, mask, order);
7056d3a3
MH
1952
1953 if (locked)
1954 mem_cgroup_oom_unlock(memcg);
29ef680a 1955
7056d3a3 1956 return ret;
3812c8c8
JW
1957}
1958
1959/**
1960 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 1961 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 1962 *
49426420
JW
1963 * This has to be called at the end of a page fault if the memcg OOM
1964 * handler was enabled.
3812c8c8 1965 *
49426420 1966 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
1967 * sleep on a waitqueue until the userspace task resolves the
1968 * situation. Sleeping directly in the charge context with all kinds
1969 * of locks held is not a good idea, instead we remember an OOM state
1970 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 1971 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
1972 *
1973 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 1974 * completed, %false otherwise.
3812c8c8 1975 */
49426420 1976bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 1977{
626ebc41 1978 struct mem_cgroup *memcg = current->memcg_in_oom;
3812c8c8 1979 struct oom_wait_info owait;
49426420 1980 bool locked;
3812c8c8
JW
1981
1982 /* OOM is global, do not handle */
3812c8c8 1983 if (!memcg)
49426420 1984 return false;
3812c8c8 1985
7c5f64f8 1986 if (!handle)
49426420 1987 goto cleanup;
3812c8c8
JW
1988
1989 owait.memcg = memcg;
1990 owait.wait.flags = 0;
1991 owait.wait.func = memcg_oom_wake_function;
1992 owait.wait.private = current;
2055da97 1993 INIT_LIST_HEAD(&owait.wait.entry);
867578cb 1994
3812c8c8 1995 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
1996 mem_cgroup_mark_under_oom(memcg);
1997
1998 locked = mem_cgroup_oom_trylock(memcg);
1999
2000 if (locked)
2001 mem_cgroup_oom_notify(memcg);
2002
17c56de6 2003 if (locked && !READ_ONCE(memcg->oom_kill_disable)) {
49426420
JW
2004 mem_cgroup_unmark_under_oom(memcg);
2005 finish_wait(&memcg_oom_waitq, &owait.wait);
626ebc41
TH
2006 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2007 current->memcg_oom_order);
49426420 2008 } else {
3812c8c8 2009 schedule();
49426420
JW
2010 mem_cgroup_unmark_under_oom(memcg);
2011 finish_wait(&memcg_oom_waitq, &owait.wait);
2012 }
2013
2014 if (locked) {
fb2a6fc5
JW
2015 mem_cgroup_oom_unlock(memcg);
2016 /*
2017 * There is no guarantee that an OOM-lock contender
2018 * sees the wakeups triggered by the OOM kill
f0953a1b 2019 * uncharges. Wake any sleepers explicitly.
fb2a6fc5
JW
2020 */
2021 memcg_oom_recover(memcg);
2022 }
49426420 2023cleanup:
626ebc41 2024 current->memcg_in_oom = NULL;
3812c8c8 2025 css_put(&memcg->css);
867578cb 2026 return true;
0b7f569e
KH
2027}
2028
3d8b38eb
RG
2029/**
2030 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2031 * @victim: task to be killed by the OOM killer
2032 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2033 *
2034 * Returns a pointer to a memory cgroup, which has to be cleaned up
2035 * by killing all belonging OOM-killable tasks.
2036 *
2037 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2038 */
2039struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2040 struct mem_cgroup *oom_domain)
2041{
2042 struct mem_cgroup *oom_group = NULL;
2043 struct mem_cgroup *memcg;
2044
2045 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2046 return NULL;
2047
2048 if (!oom_domain)
2049 oom_domain = root_mem_cgroup;
2050
2051 rcu_read_lock();
2052
2053 memcg = mem_cgroup_from_task(victim);
7848ed62 2054 if (mem_cgroup_is_root(memcg))
3d8b38eb
RG
2055 goto out;
2056
48fe267c
RG
2057 /*
2058 * If the victim task has been asynchronously moved to a different
2059 * memory cgroup, we might end up killing tasks outside oom_domain.
2060 * In this case it's better to ignore memory.group.oom.
2061 */
2062 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2063 goto out;
2064
3d8b38eb
RG
2065 /*
2066 * Traverse the memory cgroup hierarchy from the victim task's
2067 * cgroup up to the OOMing cgroup (or root) to find the
2068 * highest-level memory cgroup with oom.group set.
2069 */
2070 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
eaf7b66b 2071 if (READ_ONCE(memcg->oom_group))
3d8b38eb
RG
2072 oom_group = memcg;
2073
2074 if (memcg == oom_domain)
2075 break;
2076 }
2077
2078 if (oom_group)
2079 css_get(&oom_group->css);
2080out:
2081 rcu_read_unlock();
2082
2083 return oom_group;
2084}
2085
2086void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2087{
2088 pr_info("Tasks in ");
2089 pr_cont_cgroup_path(memcg->css.cgroup);
2090 pr_cont(" are going to be killed due to memory.oom.group set\n");
2091}
2092
d7365e78 2093/**
f70ad448
MWO
2094 * folio_memcg_lock - Bind a folio to its memcg.
2095 * @folio: The folio.
32047e2a 2096 *
f70ad448 2097 * This function prevents unlocked LRU folios from being moved to
739f79fc
JW
2098 * another cgroup.
2099 *
f70ad448
MWO
2100 * It ensures lifetime of the bound memcg. The caller is responsible
2101 * for the lifetime of the folio.
d69b042f 2102 */
f70ad448 2103void folio_memcg_lock(struct folio *folio)
89c06bd5
KH
2104{
2105 struct mem_cgroup *memcg;
6de22619 2106 unsigned long flags;
89c06bd5 2107
6de22619
JW
2108 /*
2109 * The RCU lock is held throughout the transaction. The fast
2110 * path can get away without acquiring the memcg->move_lock
2111 * because page moving starts with an RCU grace period.
739f79fc 2112 */
d7365e78
JW
2113 rcu_read_lock();
2114
2115 if (mem_cgroup_disabled())
1c824a68 2116 return;
89c06bd5 2117again:
f70ad448 2118 memcg = folio_memcg(folio);
29833315 2119 if (unlikely(!memcg))
1c824a68 2120 return;
d7365e78 2121
20ad50d6
AS
2122#ifdef CONFIG_PROVE_LOCKING
2123 local_irq_save(flags);
2124 might_lock(&memcg->move_lock);
2125 local_irq_restore(flags);
2126#endif
2127
bdcbb659 2128 if (atomic_read(&memcg->moving_account) <= 0)
1c824a68 2129 return;
89c06bd5 2130
6de22619 2131 spin_lock_irqsave(&memcg->move_lock, flags);
f70ad448 2132 if (memcg != folio_memcg(folio)) {
6de22619 2133 spin_unlock_irqrestore(&memcg->move_lock, flags);
89c06bd5
KH
2134 goto again;
2135 }
6de22619
JW
2136
2137 /*
1c824a68
JW
2138 * When charge migration first begins, we can have multiple
2139 * critical sections holding the fast-path RCU lock and one
2140 * holding the slowpath move_lock. Track the task who has the
2141 * move_lock for unlock_page_memcg().
6de22619
JW
2142 */
2143 memcg->move_lock_task = current;
2144 memcg->move_lock_flags = flags;
89c06bd5 2145}
f70ad448
MWO
2146
2147void lock_page_memcg(struct page *page)
2148{
2149 folio_memcg_lock(page_folio(page));
2150}
89c06bd5 2151
f70ad448 2152static void __folio_memcg_unlock(struct mem_cgroup *memcg)
89c06bd5 2153{
6de22619
JW
2154 if (memcg && memcg->move_lock_task == current) {
2155 unsigned long flags = memcg->move_lock_flags;
2156
2157 memcg->move_lock_task = NULL;
2158 memcg->move_lock_flags = 0;
2159
2160 spin_unlock_irqrestore(&memcg->move_lock, flags);
2161 }
89c06bd5 2162
d7365e78 2163 rcu_read_unlock();
89c06bd5 2164}
739f79fc
JW
2165
2166/**
f70ad448
MWO
2167 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2168 * @folio: The folio.
2169 *
2170 * This releases the binding created by folio_memcg_lock(). This does
2171 * not change the accounting of this folio to its memcg, but it does
2172 * permit others to change it.
739f79fc 2173 */
f70ad448 2174void folio_memcg_unlock(struct folio *folio)
739f79fc 2175{
f70ad448
MWO
2176 __folio_memcg_unlock(folio_memcg(folio));
2177}
9da7b521 2178
f70ad448
MWO
2179void unlock_page_memcg(struct page *page)
2180{
2181 folio_memcg_unlock(page_folio(page));
739f79fc 2182}
89c06bd5 2183
fead2b86 2184struct memcg_stock_pcp {
56751146 2185 local_lock_t stock_lock;
fead2b86
MH
2186 struct mem_cgroup *cached; /* this never be root cgroup */
2187 unsigned int nr_pages;
2188
bf4f0599
RG
2189#ifdef CONFIG_MEMCG_KMEM
2190 struct obj_cgroup *cached_objcg;
68ac5b3c 2191 struct pglist_data *cached_pgdat;
bf4f0599 2192 unsigned int nr_bytes;
68ac5b3c
WL
2193 int nr_slab_reclaimable_b;
2194 int nr_slab_unreclaimable_b;
bf4f0599
RG
2195#endif
2196
cdec2e42 2197 struct work_struct work;
26fe6168 2198 unsigned long flags;
a0db00fc 2199#define FLUSHING_CACHED_CHARGE 0
cdec2e42 2200};
56751146
SAS
2201static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2202 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2203};
9f50fad6 2204static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2205
bf4f0599 2206#ifdef CONFIG_MEMCG_KMEM
56751146 2207static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
bf4f0599
RG
2208static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2209 struct mem_cgroup *root_memcg);
a8c49af3 2210static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
bf4f0599
RG
2211
2212#else
56751146 2213static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599 2214{
56751146 2215 return NULL;
bf4f0599
RG
2216}
2217static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2218 struct mem_cgroup *root_memcg)
2219{
2220 return false;
2221}
a8c49af3
YA
2222static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2223{
2224}
bf4f0599
RG
2225#endif
2226
a0956d54
SS
2227/**
2228 * consume_stock: Try to consume stocked charge on this cpu.
2229 * @memcg: memcg to consume from.
2230 * @nr_pages: how many pages to charge.
2231 *
2232 * The charges will only happen if @memcg matches the current cpu's memcg
2233 * stock, and at least @nr_pages are available in that stock. Failure to
2234 * service an allocation will refill the stock.
2235 *
2236 * returns true if successful, false otherwise.
cdec2e42 2237 */
a0956d54 2238static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2239{
2240 struct memcg_stock_pcp *stock;
db2ba40c 2241 unsigned long flags;
3e32cb2e 2242 bool ret = false;
cdec2e42 2243
a983b5eb 2244 if (nr_pages > MEMCG_CHARGE_BATCH)
3e32cb2e 2245 return ret;
a0956d54 2246
56751146 2247 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2248
2249 stock = this_cpu_ptr(&memcg_stock);
3e32cb2e 2250 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
a0956d54 2251 stock->nr_pages -= nr_pages;
3e32cb2e
JW
2252 ret = true;
2253 }
db2ba40c 2254
56751146 2255 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
db2ba40c 2256
cdec2e42
KH
2257 return ret;
2258}
2259
2260/*
3e32cb2e 2261 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
2262 */
2263static void drain_stock(struct memcg_stock_pcp *stock)
2264{
2265 struct mem_cgroup *old = stock->cached;
2266
1a3e1f40
JW
2267 if (!old)
2268 return;
2269
11c9ea4e 2270 if (stock->nr_pages) {
3e32cb2e 2271 page_counter_uncharge(&old->memory, stock->nr_pages);
7941d214 2272 if (do_memsw_account())
3e32cb2e 2273 page_counter_uncharge(&old->memsw, stock->nr_pages);
11c9ea4e 2274 stock->nr_pages = 0;
cdec2e42 2275 }
1a3e1f40
JW
2276
2277 css_put(&old->css);
cdec2e42 2278 stock->cached = NULL;
cdec2e42
KH
2279}
2280
cdec2e42
KH
2281static void drain_local_stock(struct work_struct *dummy)
2282{
db2ba40c 2283 struct memcg_stock_pcp *stock;
56751146 2284 struct obj_cgroup *old = NULL;
db2ba40c
JW
2285 unsigned long flags;
2286
72f0184c 2287 /*
5c49cf9a
MH
2288 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2289 * drain_stock races is that we always operate on local CPU stock
2290 * here with IRQ disabled
72f0184c 2291 */
56751146 2292 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2293
2294 stock = this_cpu_ptr(&memcg_stock);
56751146 2295 old = drain_obj_stock(stock);
cdec2e42 2296 drain_stock(stock);
26fe6168 2297 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
db2ba40c 2298
56751146
SAS
2299 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2300 if (old)
2301 obj_cgroup_put(old);
cdec2e42
KH
2302}
2303
2304/*
3e32cb2e 2305 * Cache charges(val) to local per_cpu area.
320cc51d 2306 * This will be consumed by consume_stock() function, later.
cdec2e42 2307 */
af9a3b69 2308static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42 2309{
db2ba40c 2310 struct memcg_stock_pcp *stock;
cdec2e42 2311
db2ba40c 2312 stock = this_cpu_ptr(&memcg_stock);
c0ff4b85 2313 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 2314 drain_stock(stock);
1a3e1f40 2315 css_get(&memcg->css);
c0ff4b85 2316 stock->cached = memcg;
cdec2e42 2317 }
11c9ea4e 2318 stock->nr_pages += nr_pages;
db2ba40c 2319
a983b5eb 2320 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
475d0487 2321 drain_stock(stock);
af9a3b69
JW
2322}
2323
2324static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2325{
2326 unsigned long flags;
475d0487 2327
56751146 2328 local_lock_irqsave(&memcg_stock.stock_lock, flags);
af9a3b69 2329 __refill_stock(memcg, nr_pages);
56751146 2330 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
cdec2e42
KH
2331}
2332
2333/*
c0ff4b85 2334 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 2335 * of the hierarchy under it.
cdec2e42 2336 */
6d3d6aa2 2337static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 2338{
26fe6168 2339 int cpu, curcpu;
d38144b7 2340
6d3d6aa2
JW
2341 /* If someone's already draining, avoid adding running more workers. */
2342 if (!mutex_trylock(&percpu_charge_mutex))
2343 return;
72f0184c
MH
2344 /*
2345 * Notify other cpus that system-wide "drain" is running
2346 * We do not care about races with the cpu hotplug because cpu down
2347 * as well as workers from this path always operate on the local
2348 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2349 */
0790ed62
SAS
2350 migrate_disable();
2351 curcpu = smp_processor_id();
cdec2e42
KH
2352 for_each_online_cpu(cpu) {
2353 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2354 struct mem_cgroup *memcg;
e1a366be 2355 bool flush = false;
26fe6168 2356
e1a366be 2357 rcu_read_lock();
c0ff4b85 2358 memcg = stock->cached;
e1a366be
RG
2359 if (memcg && stock->nr_pages &&
2360 mem_cgroup_is_descendant(memcg, root_memcg))
2361 flush = true;
27fb0956 2362 else if (obj_stock_flush_required(stock, root_memcg))
bf4f0599 2363 flush = true;
e1a366be
RG
2364 rcu_read_unlock();
2365
2366 if (flush &&
2367 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
d1a05b69
MH
2368 if (cpu == curcpu)
2369 drain_local_stock(&stock->work);
6a792697 2370 else if (!cpu_is_isolated(cpu))
d1a05b69
MH
2371 schedule_work_on(cpu, &stock->work);
2372 }
cdec2e42 2373 }
0790ed62 2374 migrate_enable();
9f50fad6 2375 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2376}
2377
2cd21c89
JW
2378static int memcg_hotplug_cpu_dead(unsigned int cpu)
2379{
2380 struct memcg_stock_pcp *stock;
a3d4c05a 2381
2cd21c89
JW
2382 stock = &per_cpu(memcg_stock, cpu);
2383 drain_stock(stock);
a3d4c05a 2384
308167fc 2385 return 0;
cdec2e42
KH
2386}
2387
b3ff9291
CD
2388static unsigned long reclaim_high(struct mem_cgroup *memcg,
2389 unsigned int nr_pages,
2390 gfp_t gfp_mask)
f7e1cb6e 2391{
b3ff9291
CD
2392 unsigned long nr_reclaimed = 0;
2393
f7e1cb6e 2394 do {
e22c6ed9
JW
2395 unsigned long pflags;
2396
d1663a90
JK
2397 if (page_counter_read(&memcg->memory) <=
2398 READ_ONCE(memcg->memory.high))
f7e1cb6e 2399 continue;
e22c6ed9 2400
e27be240 2401 memcg_memory_event(memcg, MEMCG_HIGH);
e22c6ed9
JW
2402
2403 psi_memstall_enter(&pflags);
b3ff9291 2404 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
73b73bac 2405 gfp_mask,
55ab834a 2406 MEMCG_RECLAIM_MAY_SWAP);
e22c6ed9 2407 psi_memstall_leave(&pflags);
4bf17307
CD
2408 } while ((memcg = parent_mem_cgroup(memcg)) &&
2409 !mem_cgroup_is_root(memcg));
b3ff9291
CD
2410
2411 return nr_reclaimed;
f7e1cb6e
JW
2412}
2413
2414static void high_work_func(struct work_struct *work)
2415{
2416 struct mem_cgroup *memcg;
2417
2418 memcg = container_of(work, struct mem_cgroup, high_work);
a983b5eb 2419 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
f7e1cb6e
JW
2420}
2421
0e4b01df
CD
2422/*
2423 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2424 * enough to still cause a significant slowdown in most cases, while still
2425 * allowing diagnostics and tracing to proceed without becoming stuck.
2426 */
2427#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2428
2429/*
2430 * When calculating the delay, we use these either side of the exponentiation to
2431 * maintain precision and scale to a reasonable number of jiffies (see the table
2432 * below.
2433 *
2434 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2435 * overage ratio to a delay.
ac5ddd0f 2436 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
0e4b01df
CD
2437 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2438 * to produce a reasonable delay curve.
2439 *
2440 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2441 * reasonable delay curve compared to precision-adjusted overage, not
2442 * penalising heavily at first, but still making sure that growth beyond the
2443 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2444 * example, with a high of 100 megabytes:
2445 *
2446 * +-------+------------------------+
2447 * | usage | time to allocate in ms |
2448 * +-------+------------------------+
2449 * | 100M | 0 |
2450 * | 101M | 6 |
2451 * | 102M | 25 |
2452 * | 103M | 57 |
2453 * | 104M | 102 |
2454 * | 105M | 159 |
2455 * | 106M | 230 |
2456 * | 107M | 313 |
2457 * | 108M | 409 |
2458 * | 109M | 518 |
2459 * | 110M | 639 |
2460 * | 111M | 774 |
2461 * | 112M | 921 |
2462 * | 113M | 1081 |
2463 * | 114M | 1254 |
2464 * | 115M | 1439 |
2465 * | 116M | 1638 |
2466 * | 117M | 1849 |
2467 * | 118M | 2000 |
2468 * | 119M | 2000 |
2469 * | 120M | 2000 |
2470 * +-------+------------------------+
2471 */
2472 #define MEMCG_DELAY_PRECISION_SHIFT 20
2473 #define MEMCG_DELAY_SCALING_SHIFT 14
2474
8a5dbc65 2475static u64 calculate_overage(unsigned long usage, unsigned long high)
b23afb93 2476{
8a5dbc65 2477 u64 overage;
b23afb93 2478
8a5dbc65
JK
2479 if (usage <= high)
2480 return 0;
e26733e0 2481
8a5dbc65
JK
2482 /*
2483 * Prevent division by 0 in overage calculation by acting as if
2484 * it was a threshold of 1 page
2485 */
2486 high = max(high, 1UL);
9b8b1754 2487
8a5dbc65
JK
2488 overage = usage - high;
2489 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2490 return div64_u64(overage, high);
2491}
e26733e0 2492
8a5dbc65
JK
2493static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2494{
2495 u64 overage, max_overage = 0;
e26733e0 2496
8a5dbc65
JK
2497 do {
2498 overage = calculate_overage(page_counter_read(&memcg->memory),
d1663a90 2499 READ_ONCE(memcg->memory.high));
8a5dbc65 2500 max_overage = max(overage, max_overage);
e26733e0
CD
2501 } while ((memcg = parent_mem_cgroup(memcg)) &&
2502 !mem_cgroup_is_root(memcg));
2503
8a5dbc65
JK
2504 return max_overage;
2505}
2506
4b82ab4f
JK
2507static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2508{
2509 u64 overage, max_overage = 0;
2510
2511 do {
2512 overage = calculate_overage(page_counter_read(&memcg->swap),
2513 READ_ONCE(memcg->swap.high));
2514 if (overage)
2515 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2516 max_overage = max(overage, max_overage);
2517 } while ((memcg = parent_mem_cgroup(memcg)) &&
2518 !mem_cgroup_is_root(memcg));
2519
2520 return max_overage;
2521}
2522
8a5dbc65
JK
2523/*
2524 * Get the number of jiffies that we should penalise a mischievous cgroup which
2525 * is exceeding its memory.high by checking both it and its ancestors.
2526 */
2527static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2528 unsigned int nr_pages,
2529 u64 max_overage)
2530{
2531 unsigned long penalty_jiffies;
2532
e26733e0
CD
2533 if (!max_overage)
2534 return 0;
0e4b01df
CD
2535
2536 /*
0e4b01df
CD
2537 * We use overage compared to memory.high to calculate the number of
2538 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2539 * fairly lenient on small overages, and increasingly harsh when the
2540 * memcg in question makes it clear that it has no intention of stopping
2541 * its crazy behaviour, so we exponentially increase the delay based on
2542 * overage amount.
2543 */
e26733e0
CD
2544 penalty_jiffies = max_overage * max_overage * HZ;
2545 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2546 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
0e4b01df
CD
2547
2548 /*
2549 * Factor in the task's own contribution to the overage, such that four
2550 * N-sized allocations are throttled approximately the same as one
2551 * 4N-sized allocation.
2552 *
2553 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2554 * larger the current charge patch is than that.
2555 */
ff144e69 2556 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
e26733e0
CD
2557}
2558
2559/*
2560 * Scheduled by try_charge() to be executed from the userland return path
2561 * and reclaims memory over the high limit.
2562 */
2563void mem_cgroup_handle_over_high(void)
2564{
2565 unsigned long penalty_jiffies;
2566 unsigned long pflags;
b3ff9291 2567 unsigned long nr_reclaimed;
e26733e0 2568 unsigned int nr_pages = current->memcg_nr_pages_over_high;
d977aa93 2569 int nr_retries = MAX_RECLAIM_RETRIES;
e26733e0 2570 struct mem_cgroup *memcg;
b3ff9291 2571 bool in_retry = false;
e26733e0
CD
2572
2573 if (likely(!nr_pages))
2574 return;
2575
2576 memcg = get_mem_cgroup_from_mm(current->mm);
e26733e0
CD
2577 current->memcg_nr_pages_over_high = 0;
2578
b3ff9291
CD
2579retry_reclaim:
2580 /*
2581 * The allocating task should reclaim at least the batch size, but for
2582 * subsequent retries we only want to do what's necessary to prevent oom
2583 * or breaching resource isolation.
2584 *
2585 * This is distinct from memory.max or page allocator behaviour because
2586 * memory.high is currently batched, whereas memory.max and the page
2587 * allocator run every time an allocation is made.
2588 */
2589 nr_reclaimed = reclaim_high(memcg,
2590 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2591 GFP_KERNEL);
2592
e26733e0
CD
2593 /*
2594 * memory.high is breached and reclaim is unable to keep up. Throttle
2595 * allocators proactively to slow down excessive growth.
2596 */
8a5dbc65
JK
2597 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2598 mem_find_max_overage(memcg));
0e4b01df 2599
4b82ab4f
JK
2600 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2601 swap_find_max_overage(memcg));
2602
ff144e69
JK
2603 /*
2604 * Clamp the max delay per usermode return so as to still keep the
2605 * application moving forwards and also permit diagnostics, albeit
2606 * extremely slowly.
2607 */
2608 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2609
0e4b01df
CD
2610 /*
2611 * Don't sleep if the amount of jiffies this memcg owes us is so low
2612 * that it's not even worth doing, in an attempt to be nice to those who
2613 * go only a small amount over their memory.high value and maybe haven't
2614 * been aggressively reclaimed enough yet.
2615 */
2616 if (penalty_jiffies <= HZ / 100)
2617 goto out;
2618
b3ff9291
CD
2619 /*
2620 * If reclaim is making forward progress but we're still over
2621 * memory.high, we want to encourage that rather than doing allocator
2622 * throttling.
2623 */
2624 if (nr_reclaimed || nr_retries--) {
2625 in_retry = true;
2626 goto retry_reclaim;
2627 }
2628
0e4b01df
CD
2629 /*
2630 * If we exit early, we're guaranteed to die (since
2631 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2632 * need to account for any ill-begotten jiffies to pay them off later.
2633 */
2634 psi_memstall_enter(&pflags);
2635 schedule_timeout_killable(penalty_jiffies);
2636 psi_memstall_leave(&pflags);
2637
2638out:
2639 css_put(&memcg->css);
b23afb93
TH
2640}
2641
c5c8b16b
MS
2642static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2643 unsigned int nr_pages)
8a9f3ccd 2644{
a983b5eb 2645 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
d977aa93 2646 int nr_retries = MAX_RECLAIM_RETRIES;
6539cc05 2647 struct mem_cgroup *mem_over_limit;
3e32cb2e 2648 struct page_counter *counter;
6539cc05 2649 unsigned long nr_reclaimed;
a4ebf1b6 2650 bool passed_oom = false;
73b73bac 2651 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
b70a2a21 2652 bool drained = false;
d6e103a7 2653 bool raised_max_event = false;
e22c6ed9 2654 unsigned long pflags;
a636b327 2655
6539cc05 2656retry:
b6b6cc72 2657 if (consume_stock(memcg, nr_pages))
10d53c74 2658 return 0;
8a9f3ccd 2659
7941d214 2660 if (!do_memsw_account() ||
6071ca52
JW
2661 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2662 if (page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 2663 goto done_restock;
7941d214 2664 if (do_memsw_account())
3e32cb2e
JW
2665 page_counter_uncharge(&memcg->memsw, batch);
2666 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 2667 } else {
3e32cb2e 2668 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
73b73bac 2669 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
3fbe7244 2670 }
7a81b88c 2671
6539cc05
JW
2672 if (batch > nr_pages) {
2673 batch = nr_pages;
2674 goto retry;
2675 }
6d61ef40 2676
89a28483
JW
2677 /*
2678 * Prevent unbounded recursion when reclaim operations need to
2679 * allocate memory. This might exceed the limits temporarily,
2680 * but we prefer facilitating memory reclaim and getting back
2681 * under the limit over triggering OOM kills in these cases.
2682 */
2683 if (unlikely(current->flags & PF_MEMALLOC))
2684 goto force;
2685
06b078fc
JW
2686 if (unlikely(task_in_memcg_oom(current)))
2687 goto nomem;
2688
d0164adc 2689 if (!gfpflags_allow_blocking(gfp_mask))
6539cc05 2690 goto nomem;
4b534334 2691
e27be240 2692 memcg_memory_event(mem_over_limit, MEMCG_MAX);
d6e103a7 2693 raised_max_event = true;
241994ed 2694
e22c6ed9 2695 psi_memstall_enter(&pflags);
b70a2a21 2696 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
55ab834a 2697 gfp_mask, reclaim_options);
e22c6ed9 2698 psi_memstall_leave(&pflags);
6539cc05 2699
61e02c74 2700 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 2701 goto retry;
28c34c29 2702
b70a2a21 2703 if (!drained) {
6d3d6aa2 2704 drain_all_stock(mem_over_limit);
b70a2a21
JW
2705 drained = true;
2706 goto retry;
2707 }
2708
28c34c29
JW
2709 if (gfp_mask & __GFP_NORETRY)
2710 goto nomem;
6539cc05
JW
2711 /*
2712 * Even though the limit is exceeded at this point, reclaim
2713 * may have been able to free some pages. Retry the charge
2714 * before killing the task.
2715 *
2716 * Only for regular pages, though: huge pages are rather
2717 * unlikely to succeed so close to the limit, and we fall back
2718 * to regular pages anyway in case of failure.
2719 */
61e02c74 2720 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
2721 goto retry;
2722 /*
2723 * At task move, charge accounts can be doubly counted. So, it's
2724 * better to wait until the end of task_move if something is going on.
2725 */
2726 if (mem_cgroup_wait_acct_move(mem_over_limit))
2727 goto retry;
2728
9b130619
JW
2729 if (nr_retries--)
2730 goto retry;
2731
38d38493 2732 if (gfp_mask & __GFP_RETRY_MAYFAIL)
29ef680a
MH
2733 goto nomem;
2734
a4ebf1b6
VA
2735 /* Avoid endless loop for tasks bypassed by the oom killer */
2736 if (passed_oom && task_is_dying())
2737 goto nomem;
6539cc05 2738
29ef680a
MH
2739 /*
2740 * keep retrying as long as the memcg oom killer is able to make
2741 * a forward progress or bypass the charge if the oom killer
2742 * couldn't make any progress.
2743 */
becdf89d
SB
2744 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2745 get_order(nr_pages * PAGE_SIZE))) {
a4ebf1b6 2746 passed_oom = true;
d977aa93 2747 nr_retries = MAX_RECLAIM_RETRIES;
29ef680a 2748 goto retry;
29ef680a 2749 }
7a81b88c 2750nomem:
1461e8c2
SB
2751 /*
2752 * Memcg doesn't have a dedicated reserve for atomic
2753 * allocations. But like the global atomic pool, we need to
2754 * put the burden of reclaim on regular allocation requests
2755 * and let these go through as privileged allocations.
2756 */
2757 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
3168ecbe 2758 return -ENOMEM;
10d53c74 2759force:
d6e103a7
RG
2760 /*
2761 * If the allocation has to be enforced, don't forget to raise
2762 * a MEMCG_MAX event.
2763 */
2764 if (!raised_max_event)
2765 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2766
10d53c74
TH
2767 /*
2768 * The allocation either can't fail or will lead to more memory
2769 * being freed very soon. Allow memory usage go over the limit
2770 * temporarily by force charging it.
2771 */
2772 page_counter_charge(&memcg->memory, nr_pages);
7941d214 2773 if (do_memsw_account())
10d53c74 2774 page_counter_charge(&memcg->memsw, nr_pages);
10d53c74
TH
2775
2776 return 0;
6539cc05
JW
2777
2778done_restock:
2779 if (batch > nr_pages)
2780 refill_stock(memcg, batch - nr_pages);
b23afb93 2781
241994ed 2782 /*
b23afb93
TH
2783 * If the hierarchy is above the normal consumption range, schedule
2784 * reclaim on returning to userland. We can perform reclaim here
71baba4b 2785 * if __GFP_RECLAIM but let's always punt for simplicity and so that
b23afb93
TH
2786 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2787 * not recorded as it most likely matches current's and won't
2788 * change in the meantime. As high limit is checked again before
2789 * reclaim, the cost of mismatch is negligible.
241994ed
JW
2790 */
2791 do {
4b82ab4f
JK
2792 bool mem_high, swap_high;
2793
2794 mem_high = page_counter_read(&memcg->memory) >
2795 READ_ONCE(memcg->memory.high);
2796 swap_high = page_counter_read(&memcg->swap) >
2797 READ_ONCE(memcg->swap.high);
2798
2799 /* Don't bother a random interrupted task */
086f694a 2800 if (!in_task()) {
4b82ab4f 2801 if (mem_high) {
f7e1cb6e
JW
2802 schedule_work(&memcg->high_work);
2803 break;
2804 }
4b82ab4f
JK
2805 continue;
2806 }
2807
2808 if (mem_high || swap_high) {
2809 /*
2810 * The allocating tasks in this cgroup will need to do
2811 * reclaim or be throttled to prevent further growth
2812 * of the memory or swap footprints.
2813 *
2814 * Target some best-effort fairness between the tasks,
2815 * and distribute reclaim work and delay penalties
2816 * based on how much each task is actually allocating.
2817 */
9516a18a 2818 current->memcg_nr_pages_over_high += batch;
b23afb93
TH
2819 set_notify_resume(current);
2820 break;
2821 }
241994ed 2822 } while ((memcg = parent_mem_cgroup(memcg)));
10d53c74 2823
c9afe31e
SB
2824 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2825 !(current->flags & PF_MEMALLOC) &&
2826 gfpflags_allow_blocking(gfp_mask)) {
2827 mem_cgroup_handle_over_high();
2828 }
10d53c74 2829 return 0;
7a81b88c 2830}
8a9f3ccd 2831
c5c8b16b
MS
2832static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2833 unsigned int nr_pages)
2834{
2835 if (mem_cgroup_is_root(memcg))
2836 return 0;
2837
2838 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2839}
2840
58056f77 2841static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 2842{
ce00a967
JW
2843 if (mem_cgroup_is_root(memcg))
2844 return;
2845
3e32cb2e 2846 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 2847 if (do_memsw_account())
3e32cb2e 2848 page_counter_uncharge(&memcg->memsw, nr_pages);
d01dd17f
KH
2849}
2850
118f2875 2851static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
0a31bc97 2852{
118f2875 2853 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
0a31bc97 2854 /*
a5eb011a 2855 * Any of the following ensures page's memcg stability:
0a31bc97 2856 *
a0b5b414
JW
2857 * - the page lock
2858 * - LRU isolation
2859 * - lock_page_memcg()
2860 * - exclusive reference
018ee47f 2861 * - mem_cgroup_trylock_pages()
0a31bc97 2862 */
118f2875 2863 folio->memcg_data = (unsigned long)memcg;
7a81b88c 2864}
66e1707b 2865
84c07d11 2866#ifdef CONFIG_MEMCG_KMEM
41eb5df1
WL
2867/*
2868 * The allocated objcg pointers array is not accounted directly.
2869 * Moreover, it should not come from DMA buffer and is not readily
2870 * reclaimable. So those GFP bits should be masked off.
2871 */
2872#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2873
a7ebf564
WL
2874/*
2875 * mod_objcg_mlstate() may be called with irq enabled, so
2876 * mod_memcg_lruvec_state() should be used.
2877 */
2878static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2879 struct pglist_data *pgdat,
2880 enum node_stat_item idx, int nr)
2881{
2882 struct mem_cgroup *memcg;
2883 struct lruvec *lruvec;
2884
2885 rcu_read_lock();
2886 memcg = obj_cgroup_memcg(objcg);
2887 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2888 mod_memcg_lruvec_state(lruvec, idx, nr);
2889 rcu_read_unlock();
2890}
2891
4b5f8d9a
VB
2892int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2893 gfp_t gfp, bool new_slab)
10befea9 2894{
4b5f8d9a 2895 unsigned int objects = objs_per_slab(s, slab);
2e9bd483 2896 unsigned long memcg_data;
10befea9
RG
2897 void *vec;
2898
41eb5df1 2899 gfp &= ~OBJCGS_CLEAR_MASK;
10befea9 2900 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
4b5f8d9a 2901 slab_nid(slab));
10befea9
RG
2902 if (!vec)
2903 return -ENOMEM;
2904
2e9bd483 2905 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
4b5f8d9a 2906 if (new_slab) {
2e9bd483 2907 /*
4b5f8d9a
VB
2908 * If the slab is brand new and nobody can yet access its
2909 * memcg_data, no synchronization is required and memcg_data can
2910 * be simply assigned.
2e9bd483 2911 */
4b5f8d9a
VB
2912 slab->memcg_data = memcg_data;
2913 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2e9bd483 2914 /*
4b5f8d9a
VB
2915 * If the slab is already in use, somebody can allocate and
2916 * assign obj_cgroups in parallel. In this case the existing
2e9bd483
RG
2917 * objcg vector should be reused.
2918 */
10befea9 2919 kfree(vec);
2e9bd483
RG
2920 return 0;
2921 }
10befea9 2922
2e9bd483 2923 kmemleak_not_leak(vec);
10befea9
RG
2924 return 0;
2925}
2926
fc4db90f
RG
2927static __always_inline
2928struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
8380ce47 2929{
8380ce47 2930 /*
9855609b
RG
2931 * Slab objects are accounted individually, not per-page.
2932 * Memcg membership data for each individual object is saved in
4b5f8d9a 2933 * slab->memcg_data.
8380ce47 2934 */
4b5f8d9a
VB
2935 if (folio_test_slab(folio)) {
2936 struct obj_cgroup **objcgs;
2937 struct slab *slab;
9855609b
RG
2938 unsigned int off;
2939
4b5f8d9a
VB
2940 slab = folio_slab(folio);
2941 objcgs = slab_objcgs(slab);
2942 if (!objcgs)
2943 return NULL;
2944
2945 off = obj_to_index(slab->slab_cache, slab, p);
2946 if (objcgs[off])
2947 return obj_cgroup_memcg(objcgs[off]);
10befea9
RG
2948
2949 return NULL;
9855609b 2950 }
8380ce47 2951
bcfe06bf 2952 /*
becacb04 2953 * folio_memcg_check() is used here, because in theory we can encounter
4b5f8d9a
VB
2954 * a folio where the slab flag has been cleared already, but
2955 * slab->memcg_data has not been freed yet
becacb04 2956 * folio_memcg_check() will guarantee that a proper memory
bcfe06bf
RG
2957 * cgroup pointer or NULL will be returned.
2958 */
becacb04 2959 return folio_memcg_check(folio);
8380ce47
RG
2960}
2961
fc4db90f
RG
2962/*
2963 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2964 *
2965 * A passed kernel object can be a slab object, vmalloc object or a generic
2966 * kernel page, so different mechanisms for getting the memory cgroup pointer
2967 * should be used.
2968 *
2969 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2970 * can not know for sure how the kernel object is implemented.
2971 * mem_cgroup_from_obj() can be safely used in such cases.
2972 *
2973 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2974 * cgroup_mutex, etc.
2975 */
2976struct mem_cgroup *mem_cgroup_from_obj(void *p)
2977{
2978 struct folio *folio;
2979
2980 if (mem_cgroup_disabled())
2981 return NULL;
2982
2983 if (unlikely(is_vmalloc_addr(p)))
2984 folio = page_folio(vmalloc_to_page(p));
2985 else
2986 folio = virt_to_folio(p);
2987
2988 return mem_cgroup_from_obj_folio(folio, p);
2989}
2990
2991/*
2992 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2993 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2994 * allocated using vmalloc().
2995 *
2996 * A passed kernel object must be a slab object or a generic kernel page.
2997 *
2998 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2999 * cgroup_mutex, etc.
3000 */
3001struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3002{
3003 if (mem_cgroup_disabled())
3004 return NULL;
3005
3006 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3007}
3008
f4840ccf
JW
3009static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3010{
3011 struct obj_cgroup *objcg = NULL;
3012
7848ed62 3013 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
f4840ccf
JW
3014 objcg = rcu_dereference(memcg->objcg);
3015 if (objcg && obj_cgroup_tryget(objcg))
3016 break;
3017 objcg = NULL;
3018 }
3019 return objcg;
3020}
3021
bf4f0599
RG
3022__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3023{
3024 struct obj_cgroup *objcg = NULL;
3025 struct mem_cgroup *memcg;
3026
279c3393
RG
3027 if (memcg_kmem_bypass())
3028 return NULL;
3029
bf4f0599 3030 rcu_read_lock();
37d5985c
RG
3031 if (unlikely(active_memcg()))
3032 memcg = active_memcg();
bf4f0599
RG
3033 else
3034 memcg = mem_cgroup_from_task(current);
f4840ccf 3035 objcg = __get_obj_cgroup_from_memcg(memcg);
bf4f0599 3036 rcu_read_unlock();
f4840ccf
JW
3037 return objcg;
3038}
3039
3040struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
3041{
3042 struct obj_cgroup *objcg;
3043
f7a449f7 3044 if (!memcg_kmem_online())
f4840ccf
JW
3045 return NULL;
3046
3047 if (PageMemcgKmem(page)) {
3048 objcg = __folio_objcg(page_folio(page));
3049 obj_cgroup_get(objcg);
3050 } else {
3051 struct mem_cgroup *memcg;
bf4f0599 3052
f4840ccf
JW
3053 rcu_read_lock();
3054 memcg = __folio_memcg(page_folio(page));
3055 if (memcg)
3056 objcg = __get_obj_cgroup_from_memcg(memcg);
3057 else
3058 objcg = NULL;
3059 rcu_read_unlock();
3060 }
bf4f0599
RG
3061 return objcg;
3062}
3063
a8c49af3
YA
3064static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3065{
3066 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3067 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3068 if (nr_pages > 0)
3069 page_counter_charge(&memcg->kmem, nr_pages);
3070 else
3071 page_counter_uncharge(&memcg->kmem, -nr_pages);
3072 }
3073}
3074
3075
f1286fae
MS
3076/*
3077 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3078 * @objcg: object cgroup to uncharge
3079 * @nr_pages: number of pages to uncharge
3080 */
e74d2259
MS
3081static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3082 unsigned int nr_pages)
3083{
3084 struct mem_cgroup *memcg;
3085
3086 memcg = get_mem_cgroup_from_objcg(objcg);
e74d2259 3087
a8c49af3 3088 memcg_account_kmem(memcg, -nr_pages);
f1286fae 3089 refill_stock(memcg, nr_pages);
e74d2259 3090
e74d2259 3091 css_put(&memcg->css);
e74d2259
MS
3092}
3093
f1286fae
MS
3094/*
3095 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3096 * @objcg: object cgroup to charge
45264778 3097 * @gfp: reclaim mode
92d0510c 3098 * @nr_pages: number of pages to charge
45264778
VD
3099 *
3100 * Returns 0 on success, an error code on failure.
3101 */
f1286fae
MS
3102static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3103 unsigned int nr_pages)
7ae1e1d0 3104{
f1286fae 3105 struct mem_cgroup *memcg;
7ae1e1d0
GC
3106 int ret;
3107
f1286fae
MS
3108 memcg = get_mem_cgroup_from_objcg(objcg);
3109
c5c8b16b 3110 ret = try_charge_memcg(memcg, gfp, nr_pages);
52c29b04 3111 if (ret)
f1286fae 3112 goto out;
52c29b04 3113
a8c49af3 3114 memcg_account_kmem(memcg, nr_pages);
f1286fae
MS
3115out:
3116 css_put(&memcg->css);
4b13f64d 3117
f1286fae 3118 return ret;
4b13f64d
RG
3119}
3120
45264778 3121/**
f4b00eab 3122 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
45264778
VD
3123 * @page: page to charge
3124 * @gfp: reclaim mode
3125 * @order: allocation order
3126 *
3127 * Returns 0 on success, an error code on failure.
3128 */
f4b00eab 3129int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
7ae1e1d0 3130{
b4e0b68f 3131 struct obj_cgroup *objcg;
fcff7d7e 3132 int ret = 0;
7ae1e1d0 3133
b4e0b68f
MS
3134 objcg = get_obj_cgroup_from_current();
3135 if (objcg) {
3136 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
4d96ba35 3137 if (!ret) {
b4e0b68f 3138 page->memcg_data = (unsigned long)objcg |
18b2db3b 3139 MEMCG_DATA_KMEM;
1a3e1f40 3140 return 0;
4d96ba35 3141 }
b4e0b68f 3142 obj_cgroup_put(objcg);
c4159a75 3143 }
d05e83a6 3144 return ret;
7ae1e1d0 3145}
49a18eae 3146
45264778 3147/**
f4b00eab 3148 * __memcg_kmem_uncharge_page: uncharge a kmem page
45264778
VD
3149 * @page: page to uncharge
3150 * @order: allocation order
3151 */
f4b00eab 3152void __memcg_kmem_uncharge_page(struct page *page, int order)
7ae1e1d0 3153{
1b7e4464 3154 struct folio *folio = page_folio(page);
b4e0b68f 3155 struct obj_cgroup *objcg;
f3ccb2c4 3156 unsigned int nr_pages = 1 << order;
7ae1e1d0 3157
1b7e4464 3158 if (!folio_memcg_kmem(folio))
7ae1e1d0
GC
3159 return;
3160
1b7e4464 3161 objcg = __folio_objcg(folio);
b4e0b68f 3162 obj_cgroup_uncharge_pages(objcg, nr_pages);
1b7e4464 3163 folio->memcg_data = 0;
b4e0b68f 3164 obj_cgroup_put(objcg);
60d3fd32 3165}
bf4f0599 3166
68ac5b3c
WL
3167void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3168 enum node_stat_item idx, int nr)
3169{
fead2b86 3170 struct memcg_stock_pcp *stock;
56751146 3171 struct obj_cgroup *old = NULL;
68ac5b3c
WL
3172 unsigned long flags;
3173 int *bytes;
3174
56751146 3175 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3176 stock = this_cpu_ptr(&memcg_stock);
3177
68ac5b3c
WL
3178 /*
3179 * Save vmstat data in stock and skip vmstat array update unless
3180 * accumulating over a page of vmstat data or when pgdat or idx
3181 * changes.
3182 */
3183 if (stock->cached_objcg != objcg) {
56751146 3184 old = drain_obj_stock(stock);
68ac5b3c
WL
3185 obj_cgroup_get(objcg);
3186 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3187 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3188 stock->cached_objcg = objcg;
3189 stock->cached_pgdat = pgdat;
3190 } else if (stock->cached_pgdat != pgdat) {
3191 /* Flush the existing cached vmstat data */
7fa0dacb
WL
3192 struct pglist_data *oldpg = stock->cached_pgdat;
3193
68ac5b3c 3194 if (stock->nr_slab_reclaimable_b) {
7fa0dacb 3195 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
68ac5b3c
WL
3196 stock->nr_slab_reclaimable_b);
3197 stock->nr_slab_reclaimable_b = 0;
3198 }
3199 if (stock->nr_slab_unreclaimable_b) {
7fa0dacb 3200 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
68ac5b3c
WL
3201 stock->nr_slab_unreclaimable_b);
3202 stock->nr_slab_unreclaimable_b = 0;
3203 }
3204 stock->cached_pgdat = pgdat;
3205 }
3206
3207 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3208 : &stock->nr_slab_unreclaimable_b;
3209 /*
3210 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3211 * cached locally at least once before pushing it out.
3212 */
3213 if (!*bytes) {
3214 *bytes = nr;
3215 nr = 0;
3216 } else {
3217 *bytes += nr;
3218 if (abs(*bytes) > PAGE_SIZE) {
3219 nr = *bytes;
3220 *bytes = 0;
3221 } else {
3222 nr = 0;
3223 }
3224 }
3225 if (nr)
3226 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3227
56751146
SAS
3228 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3229 if (old)
3230 obj_cgroup_put(old);
68ac5b3c
WL
3231}
3232
bf4f0599
RG
3233static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3234{
fead2b86 3235 struct memcg_stock_pcp *stock;
bf4f0599
RG
3236 unsigned long flags;
3237 bool ret = false;
3238
56751146 3239 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3240
3241 stock = this_cpu_ptr(&memcg_stock);
bf4f0599
RG
3242 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3243 stock->nr_bytes -= nr_bytes;
3244 ret = true;
3245 }
3246
56751146 3247 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
bf4f0599
RG
3248
3249 return ret;
3250}
3251
56751146 3252static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599
RG
3253{
3254 struct obj_cgroup *old = stock->cached_objcg;
3255
3256 if (!old)
56751146 3257 return NULL;
bf4f0599
RG
3258
3259 if (stock->nr_bytes) {
3260 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3261 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3262
af9a3b69
JW
3263 if (nr_pages) {
3264 struct mem_cgroup *memcg;
3265
3266 memcg = get_mem_cgroup_from_objcg(old);
3267
3268 memcg_account_kmem(memcg, -nr_pages);
3269 __refill_stock(memcg, nr_pages);
3270
3271 css_put(&memcg->css);
3272 }
bf4f0599
RG
3273
3274 /*
3275 * The leftover is flushed to the centralized per-memcg value.
3276 * On the next attempt to refill obj stock it will be moved
3277 * to a per-cpu stock (probably, on an other CPU), see
3278 * refill_obj_stock().
3279 *
3280 * How often it's flushed is a trade-off between the memory
3281 * limit enforcement accuracy and potential CPU contention,
3282 * so it might be changed in the future.
3283 */
3284 atomic_add(nr_bytes, &old->nr_charged_bytes);
3285 stock->nr_bytes = 0;
3286 }
3287
68ac5b3c
WL
3288 /*
3289 * Flush the vmstat data in current stock
3290 */
3291 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3292 if (stock->nr_slab_reclaimable_b) {
3293 mod_objcg_mlstate(old, stock->cached_pgdat,
3294 NR_SLAB_RECLAIMABLE_B,
3295 stock->nr_slab_reclaimable_b);
3296 stock->nr_slab_reclaimable_b = 0;
3297 }
3298 if (stock->nr_slab_unreclaimable_b) {
3299 mod_objcg_mlstate(old, stock->cached_pgdat,
3300 NR_SLAB_UNRECLAIMABLE_B,
3301 stock->nr_slab_unreclaimable_b);
3302 stock->nr_slab_unreclaimable_b = 0;
3303 }
3304 stock->cached_pgdat = NULL;
3305 }
3306
bf4f0599 3307 stock->cached_objcg = NULL;
56751146
SAS
3308 /*
3309 * The `old' objects needs to be released by the caller via
3310 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3311 */
3312 return old;
bf4f0599
RG
3313}
3314
3315static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3316 struct mem_cgroup *root_memcg)
3317{
3318 struct mem_cgroup *memcg;
3319
fead2b86
MH
3320 if (stock->cached_objcg) {
3321 memcg = obj_cgroup_memcg(stock->cached_objcg);
bf4f0599
RG
3322 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3323 return true;
3324 }
3325
3326 return false;
3327}
3328
5387c904
WL
3329static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3330 bool allow_uncharge)
bf4f0599 3331{
fead2b86 3332 struct memcg_stock_pcp *stock;
56751146 3333 struct obj_cgroup *old = NULL;
bf4f0599 3334 unsigned long flags;
5387c904 3335 unsigned int nr_pages = 0;
bf4f0599 3336
56751146 3337 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3338
3339 stock = this_cpu_ptr(&memcg_stock);
bf4f0599 3340 if (stock->cached_objcg != objcg) { /* reset if necessary */
56751146 3341 old = drain_obj_stock(stock);
bf4f0599
RG
3342 obj_cgroup_get(objcg);
3343 stock->cached_objcg = objcg;
5387c904
WL
3344 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3345 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3346 allow_uncharge = true; /* Allow uncharge when objcg changes */
bf4f0599
RG
3347 }
3348 stock->nr_bytes += nr_bytes;
3349
5387c904
WL
3350 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3351 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3352 stock->nr_bytes &= (PAGE_SIZE - 1);
3353 }
bf4f0599 3354
56751146
SAS
3355 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3356 if (old)
3357 obj_cgroup_put(old);
5387c904
WL
3358
3359 if (nr_pages)
3360 obj_cgroup_uncharge_pages(objcg, nr_pages);
bf4f0599
RG
3361}
3362
3363int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3364{
bf4f0599
RG
3365 unsigned int nr_pages, nr_bytes;
3366 int ret;
3367
3368 if (consume_obj_stock(objcg, size))
3369 return 0;
3370
3371 /*
5387c904 3372 * In theory, objcg->nr_charged_bytes can have enough
bf4f0599 3373 * pre-charged bytes to satisfy the allocation. However,
5387c904
WL
3374 * flushing objcg->nr_charged_bytes requires two atomic
3375 * operations, and objcg->nr_charged_bytes can't be big.
3376 * The shared objcg->nr_charged_bytes can also become a
3377 * performance bottleneck if all tasks of the same memcg are
3378 * trying to update it. So it's better to ignore it and try
3379 * grab some new pages. The stock's nr_bytes will be flushed to
3380 * objcg->nr_charged_bytes later on when objcg changes.
3381 *
3382 * The stock's nr_bytes may contain enough pre-charged bytes
3383 * to allow one less page from being charged, but we can't rely
3384 * on the pre-charged bytes not being changed outside of
3385 * consume_obj_stock() or refill_obj_stock(). So ignore those
3386 * pre-charged bytes as well when charging pages. To avoid a
3387 * page uncharge right after a page charge, we set the
3388 * allow_uncharge flag to false when calling refill_obj_stock()
3389 * to temporarily allow the pre-charged bytes to exceed the page
3390 * size limit. The maximum reachable value of the pre-charged
3391 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3392 * race.
bf4f0599 3393 */
bf4f0599
RG
3394 nr_pages = size >> PAGE_SHIFT;
3395 nr_bytes = size & (PAGE_SIZE - 1);
3396
3397 if (nr_bytes)
3398 nr_pages += 1;
3399
e74d2259 3400 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
bf4f0599 3401 if (!ret && nr_bytes)
5387c904 3402 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
bf4f0599 3403
bf4f0599
RG
3404 return ret;
3405}
3406
3407void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3408{
5387c904 3409 refill_obj_stock(objcg, size, true);
bf4f0599
RG
3410}
3411
84c07d11 3412#endif /* CONFIG_MEMCG_KMEM */
7ae1e1d0 3413
ca3e0214 3414/*
be6c8982 3415 * Because page_memcg(head) is not set on tails, set it now.
ca3e0214 3416 */
be6c8982 3417void split_page_memcg(struct page *head, unsigned int nr)
ca3e0214 3418{
1b7e4464
MWO
3419 struct folio *folio = page_folio(head);
3420 struct mem_cgroup *memcg = folio_memcg(folio);
e94c8a9c 3421 int i;
ca3e0214 3422
be6c8982 3423 if (mem_cgroup_disabled() || !memcg)
3d37c4a9 3424 return;
b070e65c 3425
be6c8982 3426 for (i = 1; i < nr; i++)
1b7e4464 3427 folio_page(folio, i)->memcg_data = folio->memcg_data;
b4e0b68f 3428
1b7e4464
MWO
3429 if (folio_memcg_kmem(folio))
3430 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
b4e0b68f
MS
3431 else
3432 css_get_many(&memcg->css, nr - 1);
ca3e0214 3433}
ca3e0214 3434
e55b9f96 3435#ifdef CONFIG_SWAP
02491447
DN
3436/**
3437 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3438 * @entry: swap entry to be moved
3439 * @from: mem_cgroup which the entry is moved from
3440 * @to: mem_cgroup which the entry is moved to
3441 *
3442 * It succeeds only when the swap_cgroup's record for this entry is the same
3443 * as the mem_cgroup's id of @from.
3444 *
3445 * Returns 0 on success, -EINVAL on failure.
3446 *
3e32cb2e 3447 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
3448 * both res and memsw, and called css_get().
3449 */
3450static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3451 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3452{
3453 unsigned short old_id, new_id;
3454
34c00c31
LZ
3455 old_id = mem_cgroup_id(from);
3456 new_id = mem_cgroup_id(to);
02491447
DN
3457
3458 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
c9019e9b
JW
3459 mod_memcg_state(from, MEMCG_SWAP, -1);
3460 mod_memcg_state(to, MEMCG_SWAP, 1);
02491447
DN
3461 return 0;
3462 }
3463 return -EINVAL;
3464}
3465#else
3466static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3467 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3468{
3469 return -EINVAL;
3470}
8c7c6e34 3471#endif
d13d1443 3472
bbec2e15 3473static DEFINE_MUTEX(memcg_max_mutex);
f212ad7c 3474
bbec2e15
RG
3475static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3476 unsigned long max, bool memsw)
628f4235 3477{
3e32cb2e 3478 bool enlarge = false;
bb4a7ea2 3479 bool drained = false;
3e32cb2e 3480 int ret;
c054a78c
YZ
3481 bool limits_invariant;
3482 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
81d39c20 3483
3e32cb2e 3484 do {
628f4235
KH
3485 if (signal_pending(current)) {
3486 ret = -EINTR;
3487 break;
3488 }
3e32cb2e 3489
bbec2e15 3490 mutex_lock(&memcg_max_mutex);
c054a78c
YZ
3491 /*
3492 * Make sure that the new limit (memsw or memory limit) doesn't
bbec2e15 3493 * break our basic invariant rule memory.max <= memsw.max.
c054a78c 3494 */
15b42562 3495 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
bbec2e15 3496 max <= memcg->memsw.max;
c054a78c 3497 if (!limits_invariant) {
bbec2e15 3498 mutex_unlock(&memcg_max_mutex);
8c7c6e34 3499 ret = -EINVAL;
8c7c6e34
KH
3500 break;
3501 }
bbec2e15 3502 if (max > counter->max)
3e32cb2e 3503 enlarge = true;
bbec2e15
RG
3504 ret = page_counter_set_max(counter, max);
3505 mutex_unlock(&memcg_max_mutex);
8c7c6e34
KH
3506
3507 if (!ret)
3508 break;
3509
bb4a7ea2
SB
3510 if (!drained) {
3511 drain_all_stock(memcg);
3512 drained = true;
3513 continue;
3514 }
3515
73b73bac 3516 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 3517 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
1ab5c056
AR
3518 ret = -EBUSY;
3519 break;
3520 }
3521 } while (true);
3e32cb2e 3522
3c11ecf4
KH
3523 if (!ret && enlarge)
3524 memcg_oom_recover(memcg);
3e32cb2e 3525
628f4235
KH
3526 return ret;
3527}
3528
ef8f2327 3529unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
3530 gfp_t gfp_mask,
3531 unsigned long *total_scanned)
3532{
3533 unsigned long nr_reclaimed = 0;
ef8f2327 3534 struct mem_cgroup_per_node *mz, *next_mz = NULL;
0608f43d
AM
3535 unsigned long reclaimed;
3536 int loop = 0;
ef8f2327 3537 struct mem_cgroup_tree_per_node *mctz;
3e32cb2e 3538 unsigned long excess;
0608f43d 3539
e4dde56c
YZ
3540 if (lru_gen_enabled())
3541 return 0;
3542
0608f43d
AM
3543 if (order > 0)
3544 return 0;
3545
2ab082ba 3546 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
d6507ff5
MH
3547
3548 /*
3549 * Do not even bother to check the largest node if the root
3550 * is empty. Do it lockless to prevent lock bouncing. Races
3551 * are acceptable as soft limit is best effort anyway.
3552 */
bfc7228b 3553 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
d6507ff5
MH
3554 return 0;
3555
0608f43d
AM
3556 /*
3557 * This loop can run a while, specially if mem_cgroup's continuously
3558 * keep exceeding their soft limit and putting the system under
3559 * pressure
3560 */
3561 do {
3562 if (next_mz)
3563 mz = next_mz;
3564 else
3565 mz = mem_cgroup_largest_soft_limit_node(mctz);
3566 if (!mz)
3567 break;
3568
ef8f2327 3569 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
d8f65338 3570 gfp_mask, total_scanned);
0608f43d 3571 nr_reclaimed += reclaimed;
0a31bc97 3572 spin_lock_irq(&mctz->lock);
0608f43d
AM
3573
3574 /*
3575 * If we failed to reclaim anything from this memory cgroup
3576 * it is time to move on to the next cgroup
3577 */
3578 next_mz = NULL;
bc2f2e7f
VD
3579 if (!reclaimed)
3580 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3581
3e32cb2e 3582 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
3583 /*
3584 * One school of thought says that we should not add
3585 * back the node to the tree if reclaim returns 0.
3586 * But our reclaim could return 0, simply because due
3587 * to priority we are exposing a smaller subset of
3588 * memory to reclaim from. Consider this as a longer
3589 * term TODO.
3590 */
3591 /* If excess == 0, no tree ops */
cf2c8127 3592 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 3593 spin_unlock_irq(&mctz->lock);
0608f43d
AM
3594 css_put(&mz->memcg->css);
3595 loop++;
3596 /*
3597 * Could not reclaim anything and there are no more
3598 * mem cgroups to try or we seem to be looping without
3599 * reclaiming anything.
3600 */
3601 if (!nr_reclaimed &&
3602 (next_mz == NULL ||
3603 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3604 break;
3605 } while (!nr_reclaimed);
3606 if (next_mz)
3607 css_put(&next_mz->memcg->css);
3608 return nr_reclaimed;
3609}
3610
c26251f9 3611/*
51038171 3612 * Reclaims as many pages from the given memcg as possible.
c26251f9
MH
3613 *
3614 * Caller is responsible for holding css reference for memcg.
3615 */
3616static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3617{
d977aa93 3618 int nr_retries = MAX_RECLAIM_RETRIES;
c26251f9 3619
c1e862c1
KH
3620 /* we call try-to-free pages for make this cgroup empty */
3621 lru_add_drain_all();
d12c60f6
JS
3622
3623 drain_all_stock(memcg);
3624
f817ed48 3625 /* try to free all pages in this cgroup */
3e32cb2e 3626 while (nr_retries && page_counter_read(&memcg->memory)) {
c26251f9
MH
3627 if (signal_pending(current))
3628 return -EINTR;
3629
73b73bac 3630 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 3631 MEMCG_RECLAIM_MAY_SWAP))
f817ed48 3632 nr_retries--;
f817ed48 3633 }
ab5196c2
MH
3634
3635 return 0;
cc847582
KH
3636}
3637
6770c64e
TH
3638static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3639 char *buf, size_t nbytes,
3640 loff_t off)
c1e862c1 3641{
6770c64e 3642 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 3643
d8423011
MH
3644 if (mem_cgroup_is_root(memcg))
3645 return -EINVAL;
6770c64e 3646 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
3647}
3648
182446d0
TH
3649static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3650 struct cftype *cft)
18f59ea7 3651{
bef8620c 3652 return 1;
18f59ea7
BS
3653}
3654
182446d0
TH
3655static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3656 struct cftype *cft, u64 val)
18f59ea7 3657{
bef8620c 3658 if (val == 1)
0b8f73e1 3659 return 0;
567fb435 3660
bef8620c
RG
3661 pr_warn_once("Non-hierarchical mode is deprecated. "
3662 "Please report your usecase to linux-mm@kvack.org if you "
3663 "depend on this functionality.\n");
567fb435 3664
bef8620c 3665 return -EINVAL;
18f59ea7
BS
3666}
3667
6f646156 3668static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
ce00a967 3669{
42a30035 3670 unsigned long val;
ce00a967 3671
3e32cb2e 3672 if (mem_cgroup_is_root(memcg)) {
fd25a9e0 3673 mem_cgroup_flush_stats();
0d1c2072 3674 val = memcg_page_state(memcg, NR_FILE_PAGES) +
be5d0a74 3675 memcg_page_state(memcg, NR_ANON_MAPPED);
42a30035
JW
3676 if (swap)
3677 val += memcg_page_state(memcg, MEMCG_SWAP);
3e32cb2e 3678 } else {
ce00a967 3679 if (!swap)
3e32cb2e 3680 val = page_counter_read(&memcg->memory);
ce00a967 3681 else
3e32cb2e 3682 val = page_counter_read(&memcg->memsw);
ce00a967 3683 }
c12176d3 3684 return val;
ce00a967
JW
3685}
3686
3e32cb2e
JW
3687enum {
3688 RES_USAGE,
3689 RES_LIMIT,
3690 RES_MAX_USAGE,
3691 RES_FAILCNT,
3692 RES_SOFT_LIMIT,
3693};
ce00a967 3694
791badbd 3695static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 3696 struct cftype *cft)
8cdea7c0 3697{
182446d0 3698 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 3699 struct page_counter *counter;
af36f906 3700
3e32cb2e 3701 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 3702 case _MEM:
3e32cb2e
JW
3703 counter = &memcg->memory;
3704 break;
8c7c6e34 3705 case _MEMSWAP:
3e32cb2e
JW
3706 counter = &memcg->memsw;
3707 break;
510fc4e1 3708 case _KMEM:
3e32cb2e 3709 counter = &memcg->kmem;
510fc4e1 3710 break;
d55f90bf 3711 case _TCP:
0db15298 3712 counter = &memcg->tcpmem;
d55f90bf 3713 break;
8c7c6e34
KH
3714 default:
3715 BUG();
8c7c6e34 3716 }
3e32cb2e
JW
3717
3718 switch (MEMFILE_ATTR(cft->private)) {
3719 case RES_USAGE:
3720 if (counter == &memcg->memory)
c12176d3 3721 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3e32cb2e 3722 if (counter == &memcg->memsw)
c12176d3 3723 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3e32cb2e
JW
3724 return (u64)page_counter_read(counter) * PAGE_SIZE;
3725 case RES_LIMIT:
bbec2e15 3726 return (u64)counter->max * PAGE_SIZE;
3e32cb2e
JW
3727 case RES_MAX_USAGE:
3728 return (u64)counter->watermark * PAGE_SIZE;
3729 case RES_FAILCNT:
3730 return counter->failcnt;
3731 case RES_SOFT_LIMIT:
2178e20c 3732 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3e32cb2e
JW
3733 default:
3734 BUG();
3735 }
8cdea7c0 3736}
510fc4e1 3737
84c07d11 3738#ifdef CONFIG_MEMCG_KMEM
567e9ab2 3739static int memcg_online_kmem(struct mem_cgroup *memcg)
d6441637 3740{
bf4f0599 3741 struct obj_cgroup *objcg;
d6441637 3742
9c94bef9 3743 if (mem_cgroup_kmem_disabled())
b313aeee
VD
3744 return 0;
3745
da0efe30
MS
3746 if (unlikely(mem_cgroup_is_root(memcg)))
3747 return 0;
d6441637 3748
bf4f0599 3749 objcg = obj_cgroup_alloc();
f9c69d63 3750 if (!objcg)
bf4f0599 3751 return -ENOMEM;
f9c69d63 3752
bf4f0599
RG
3753 objcg->memcg = memcg;
3754 rcu_assign_pointer(memcg->objcg, objcg);
3755
f7a449f7 3756 static_branch_enable(&memcg_kmem_online_key);
d648bcc7 3757
f9c69d63 3758 memcg->kmemcg_id = memcg->id.id;
0b8f73e1
JW
3759
3760 return 0;
d6441637
VD
3761}
3762
8e0a8912
JW
3763static void memcg_offline_kmem(struct mem_cgroup *memcg)
3764{
64268868 3765 struct mem_cgroup *parent;
8e0a8912 3766
9c94bef9 3767 if (mem_cgroup_kmem_disabled())
da0efe30
MS
3768 return;
3769
3770 if (unlikely(mem_cgroup_is_root(memcg)))
8e0a8912 3771 return;
9855609b 3772
8e0a8912
JW
3773 parent = parent_mem_cgroup(memcg);
3774 if (!parent)
3775 parent = root_mem_cgroup;
3776
bf4f0599 3777 memcg_reparent_objcgs(memcg, parent);
fb2f2b0a 3778
8e0a8912 3779 /*
64268868
MS
3780 * After we have finished memcg_reparent_objcgs(), all list_lrus
3781 * corresponding to this cgroup are guaranteed to remain empty.
3782 * The ordering is imposed by list_lru_node->lock taken by
1f391eb2 3783 * memcg_reparent_list_lrus().
8e0a8912 3784 */
1f391eb2 3785 memcg_reparent_list_lrus(memcg, parent);
8e0a8912 3786}
d6441637 3787#else
0b8f73e1 3788static int memcg_online_kmem(struct mem_cgroup *memcg)
127424c8
JW
3789{
3790 return 0;
3791}
3792static void memcg_offline_kmem(struct mem_cgroup *memcg)
3793{
3794}
84c07d11 3795#endif /* CONFIG_MEMCG_KMEM */
127424c8 3796
bbec2e15 3797static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
d55f90bf
VD
3798{
3799 int ret;
3800
bbec2e15 3801 mutex_lock(&memcg_max_mutex);
d55f90bf 3802
bbec2e15 3803 ret = page_counter_set_max(&memcg->tcpmem, max);
d55f90bf
VD
3804 if (ret)
3805 goto out;
3806
0db15298 3807 if (!memcg->tcpmem_active) {
d55f90bf
VD
3808 /*
3809 * The active flag needs to be written after the static_key
3810 * update. This is what guarantees that the socket activation
2d758073
JW
3811 * function is the last one to run. See mem_cgroup_sk_alloc()
3812 * for details, and note that we don't mark any socket as
3813 * belonging to this memcg until that flag is up.
d55f90bf
VD
3814 *
3815 * We need to do this, because static_keys will span multiple
3816 * sites, but we can't control their order. If we mark a socket
3817 * as accounted, but the accounting functions are not patched in
3818 * yet, we'll lose accounting.
3819 *
2d758073 3820 * We never race with the readers in mem_cgroup_sk_alloc(),
d55f90bf
VD
3821 * because when this value change, the code to process it is not
3822 * patched in yet.
3823 */
3824 static_branch_inc(&memcg_sockets_enabled_key);
0db15298 3825 memcg->tcpmem_active = true;
d55f90bf
VD
3826 }
3827out:
bbec2e15 3828 mutex_unlock(&memcg_max_mutex);
d55f90bf
VD
3829 return ret;
3830}
d55f90bf 3831
628f4235
KH
3832/*
3833 * The user of this function is...
3834 * RES_LIMIT.
3835 */
451af504
TH
3836static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3837 char *buf, size_t nbytes, loff_t off)
8cdea7c0 3838{
451af504 3839 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3840 unsigned long nr_pages;
628f4235
KH
3841 int ret;
3842
451af504 3843 buf = strstrip(buf);
650c5e56 3844 ret = page_counter_memparse(buf, "-1", &nr_pages);
3e32cb2e
JW
3845 if (ret)
3846 return ret;
af36f906 3847
3e32cb2e 3848 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 3849 case RES_LIMIT:
4b3bde4c
BS
3850 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3851 ret = -EINVAL;
3852 break;
3853 }
3e32cb2e
JW
3854 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3855 case _MEM:
bbec2e15 3856 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
8c7c6e34 3857 break;
3e32cb2e 3858 case _MEMSWAP:
bbec2e15 3859 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
296c81d8 3860 break;
3e32cb2e 3861 case _KMEM:
58056f77
SB
3862 /* kmem.limit_in_bytes is deprecated. */
3863 ret = -EOPNOTSUPP;
3e32cb2e 3864 break;
d55f90bf 3865 case _TCP:
bbec2e15 3866 ret = memcg_update_tcp_max(memcg, nr_pages);
d55f90bf 3867 break;
3e32cb2e 3868 }
296c81d8 3869 break;
3e32cb2e 3870 case RES_SOFT_LIMIT:
2343e88d
SAS
3871 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3872 ret = -EOPNOTSUPP;
3873 } else {
2178e20c 3874 WRITE_ONCE(memcg->soft_limit, nr_pages);
2343e88d
SAS
3875 ret = 0;
3876 }
628f4235
KH
3877 break;
3878 }
451af504 3879 return ret ?: nbytes;
8cdea7c0
BS
3880}
3881
6770c64e
TH
3882static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3883 size_t nbytes, loff_t off)
c84872e1 3884{
6770c64e 3885 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3886 struct page_counter *counter;
c84872e1 3887
3e32cb2e
JW
3888 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3889 case _MEM:
3890 counter = &memcg->memory;
3891 break;
3892 case _MEMSWAP:
3893 counter = &memcg->memsw;
3894 break;
3895 case _KMEM:
3896 counter = &memcg->kmem;
3897 break;
d55f90bf 3898 case _TCP:
0db15298 3899 counter = &memcg->tcpmem;
d55f90bf 3900 break;
3e32cb2e
JW
3901 default:
3902 BUG();
3903 }
af36f906 3904
3e32cb2e 3905 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 3906 case RES_MAX_USAGE:
3e32cb2e 3907 page_counter_reset_watermark(counter);
29f2a4da
PE
3908 break;
3909 case RES_FAILCNT:
3e32cb2e 3910 counter->failcnt = 0;
29f2a4da 3911 break;
3e32cb2e
JW
3912 default:
3913 BUG();
29f2a4da 3914 }
f64c3f54 3915
6770c64e 3916 return nbytes;
c84872e1
PE
3917}
3918
182446d0 3919static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
3920 struct cftype *cft)
3921{
182446d0 3922 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
3923}
3924
02491447 3925#ifdef CONFIG_MMU
182446d0 3926static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
3927 struct cftype *cft, u64 val)
3928{
182446d0 3929 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0 3930
da34a848
JW
3931 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3932 "Please report your usecase to linux-mm@kvack.org if you "
3933 "depend on this functionality.\n");
3934
1dfab5ab 3935 if (val & ~MOVE_MASK)
7dc74be0 3936 return -EINVAL;
ee5e8472 3937
7dc74be0 3938 /*
ee5e8472
GC
3939 * No kind of locking is needed in here, because ->can_attach() will
3940 * check this value once in the beginning of the process, and then carry
3941 * on with stale data. This means that changes to this value will only
3942 * affect task migrations starting after the change.
7dc74be0 3943 */
c0ff4b85 3944 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
3945 return 0;
3946}
02491447 3947#else
182446d0 3948static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
3949 struct cftype *cft, u64 val)
3950{
3951 return -ENOSYS;
3952}
3953#endif
7dc74be0 3954
406eb0c9 3955#ifdef CONFIG_NUMA
113b7dfd
JW
3956
3957#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3958#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3959#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3960
3961static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6 3962 int nid, unsigned int lru_mask, bool tree)
113b7dfd 3963{
867e5e1d 3964 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
113b7dfd
JW
3965 unsigned long nr = 0;
3966 enum lru_list lru;
3967
3968 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3969
3970 for_each_lru(lru) {
3971 if (!(BIT(lru) & lru_mask))
3972 continue;
dd8657b6
SB
3973 if (tree)
3974 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3975 else
3976 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
113b7dfd
JW
3977 }
3978 return nr;
3979}
3980
3981static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6
SB
3982 unsigned int lru_mask,
3983 bool tree)
113b7dfd
JW
3984{
3985 unsigned long nr = 0;
3986 enum lru_list lru;
3987
3988 for_each_lru(lru) {
3989 if (!(BIT(lru) & lru_mask))
3990 continue;
dd8657b6
SB
3991 if (tree)
3992 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3993 else
3994 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
113b7dfd
JW
3995 }
3996 return nr;
3997}
3998
2da8ca82 3999static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 4000{
25485de6
GT
4001 struct numa_stat {
4002 const char *name;
4003 unsigned int lru_mask;
4004 };
4005
4006 static const struct numa_stat stats[] = {
4007 { "total", LRU_ALL },
4008 { "file", LRU_ALL_FILE },
4009 { "anon", LRU_ALL_ANON },
4010 { "unevictable", BIT(LRU_UNEVICTABLE) },
4011 };
4012 const struct numa_stat *stat;
406eb0c9 4013 int nid;
aa9694bb 4014 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
406eb0c9 4015
fd25a9e0 4016 mem_cgroup_flush_stats();
2d146aa3 4017
25485de6 4018 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4019 seq_printf(m, "%s=%lu", stat->name,
4020 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4021 false));
4022 for_each_node_state(nid, N_MEMORY)
4023 seq_printf(m, " N%d=%lu", nid,
4024 mem_cgroup_node_nr_lru_pages(memcg, nid,
4025 stat->lru_mask, false));
25485de6 4026 seq_putc(m, '\n');
406eb0c9 4027 }
406eb0c9 4028
071aee13 4029 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4030
4031 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4032 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4033 true));
4034 for_each_node_state(nid, N_MEMORY)
4035 seq_printf(m, " N%d=%lu", nid,
4036 mem_cgroup_node_nr_lru_pages(memcg, nid,
4037 stat->lru_mask, true));
071aee13 4038 seq_putc(m, '\n');
406eb0c9 4039 }
406eb0c9 4040
406eb0c9
YH
4041 return 0;
4042}
4043#endif /* CONFIG_NUMA */
4044
c8713d0b 4045static const unsigned int memcg1_stats[] = {
0d1c2072 4046 NR_FILE_PAGES,
be5d0a74 4047 NR_ANON_MAPPED,
468c3982
JW
4048#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4049 NR_ANON_THPS,
4050#endif
c8713d0b
JW
4051 NR_SHMEM,
4052 NR_FILE_MAPPED,
4053 NR_FILE_DIRTY,
4054 NR_WRITEBACK,
e09b0b61
YS
4055 WORKINGSET_REFAULT_ANON,
4056 WORKINGSET_REFAULT_FILE,
c8713d0b
JW
4057 MEMCG_SWAP,
4058};
4059
4060static const char *const memcg1_stat_names[] = {
4061 "cache",
4062 "rss",
468c3982 4063#ifdef CONFIG_TRANSPARENT_HUGEPAGE
c8713d0b 4064 "rss_huge",
468c3982 4065#endif
c8713d0b
JW
4066 "shmem",
4067 "mapped_file",
4068 "dirty",
4069 "writeback",
e09b0b61
YS
4070 "workingset_refault_anon",
4071 "workingset_refault_file",
c8713d0b
JW
4072 "swap",
4073};
4074
df0e53d0 4075/* Universal VM events cgroup1 shows, original sort order */
8dd53fd3 4076static const unsigned int memcg1_events[] = {
df0e53d0
JW
4077 PGPGIN,
4078 PGPGOUT,
4079 PGFAULT,
4080 PGMAJFAULT,
4081};
4082
2da8ca82 4083static int memcg_stat_show(struct seq_file *m, void *v)
d2ceb9b7 4084{
aa9694bb 4085 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3e32cb2e 4086 unsigned long memory, memsw;
af7c4b0e
JW
4087 struct mem_cgroup *mi;
4088 unsigned int i;
406eb0c9 4089
71cd3113 4090 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
70bc068c 4091
fd25a9e0 4092 mem_cgroup_flush_stats();
2d146aa3 4093
71cd3113 4094 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
468c3982
JW
4095 unsigned long nr;
4096
71cd3113 4097 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
1dd3a273 4098 continue;
468c3982 4099 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
e09b0b61
YS
4100 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
4101 nr * memcg_page_state_unit(memcg1_stats[i]));
1dd3a273 4102 }
7b854121 4103
df0e53d0 4104 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
ebc5d83d 4105 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
205b20cc 4106 memcg_events_local(memcg, memcg1_events[i]));
af7c4b0e
JW
4107
4108 for (i = 0; i < NR_LRU_LISTS; i++)
ebc5d83d 4109 seq_printf(m, "%s %lu\n", lru_list_name(i),
205b20cc 4110 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
21d89d15 4111 PAGE_SIZE);
af7c4b0e 4112
14067bb3 4113 /* Hierarchical information */
3e32cb2e
JW
4114 memory = memsw = PAGE_COUNTER_MAX;
4115 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
15b42562
CD
4116 memory = min(memory, READ_ONCE(mi->memory.max));
4117 memsw = min(memsw, READ_ONCE(mi->memsw.max));
fee7b548 4118 }
3e32cb2e
JW
4119 seq_printf(m, "hierarchical_memory_limit %llu\n",
4120 (u64)memory * PAGE_SIZE);
7941d214 4121 if (do_memsw_account())
3e32cb2e
JW
4122 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4123 (u64)memsw * PAGE_SIZE);
7f016ee8 4124
8de7ecc6 4125 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
7de2e9f1 4126 unsigned long nr;
4127
71cd3113 4128 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
1dd3a273 4129 continue;
7de2e9f1 4130 nr = memcg_page_state(memcg, memcg1_stats[i]);
8de7ecc6 4131 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
e09b0b61 4132 (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
af7c4b0e
JW
4133 }
4134
8de7ecc6 4135 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
ebc5d83d
KK
4136 seq_printf(m, "total_%s %llu\n",
4137 vm_event_name(memcg1_events[i]),
dd923990 4138 (u64)memcg_events(memcg, memcg1_events[i]));
af7c4b0e 4139
8de7ecc6 4140 for (i = 0; i < NR_LRU_LISTS; i++)
ebc5d83d 4141 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
42a30035
JW
4142 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4143 PAGE_SIZE);
14067bb3 4144
7f016ee8 4145#ifdef CONFIG_DEBUG_VM
7f016ee8 4146 {
ef8f2327
MG
4147 pg_data_t *pgdat;
4148 struct mem_cgroup_per_node *mz;
1431d4d1
JW
4149 unsigned long anon_cost = 0;
4150 unsigned long file_cost = 0;
7f016ee8 4151
ef8f2327 4152 for_each_online_pgdat(pgdat) {
a3747b53 4153 mz = memcg->nodeinfo[pgdat->node_id];
7f016ee8 4154
1431d4d1
JW
4155 anon_cost += mz->lruvec.anon_cost;
4156 file_cost += mz->lruvec.file_cost;
ef8f2327 4157 }
1431d4d1
JW
4158 seq_printf(m, "anon_cost %lu\n", anon_cost);
4159 seq_printf(m, "file_cost %lu\n", file_cost);
7f016ee8
KM
4160 }
4161#endif
4162
d2ceb9b7
KH
4163 return 0;
4164}
4165
182446d0
TH
4166static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4167 struct cftype *cft)
a7885eb8 4168{
182446d0 4169 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4170
1f4c025b 4171 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4172}
4173
182446d0
TH
4174static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4175 struct cftype *cft, u64 val)
a7885eb8 4176{
182446d0 4177 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4178
37bc3cb9 4179 if (val > 200)
a7885eb8
KM
4180 return -EINVAL;
4181
a4792030 4182 if (!mem_cgroup_is_root(memcg))
82b3aa26 4183 WRITE_ONCE(memcg->swappiness, val);
3dae7fec 4184 else
82b3aa26 4185 WRITE_ONCE(vm_swappiness, val);
068b38c1 4186
a7885eb8
KM
4187 return 0;
4188}
4189
2e72b634
KS
4190static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4191{
4192 struct mem_cgroup_threshold_ary *t;
3e32cb2e 4193 unsigned long usage;
2e72b634
KS
4194 int i;
4195
4196 rcu_read_lock();
4197 if (!swap)
2c488db2 4198 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4199 else
2c488db2 4200 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4201
4202 if (!t)
4203 goto unlock;
4204
ce00a967 4205 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
4206
4207 /*
748dad36 4208 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
4209 * If it's not true, a threshold was crossed after last
4210 * call of __mem_cgroup_threshold().
4211 */
5407a562 4212 i = t->current_threshold;
2e72b634
KS
4213
4214 /*
4215 * Iterate backward over array of thresholds starting from
4216 * current_threshold and check if a threshold is crossed.
4217 * If none of thresholds below usage is crossed, we read
4218 * only one element of the array here.
4219 */
4220 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4221 eventfd_signal(t->entries[i].eventfd, 1);
4222
4223 /* i = current_threshold + 1 */
4224 i++;
4225
4226 /*
4227 * Iterate forward over array of thresholds starting from
4228 * current_threshold+1 and check if a threshold is crossed.
4229 * If none of thresholds above usage is crossed, we read
4230 * only one element of the array here.
4231 */
4232 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4233 eventfd_signal(t->entries[i].eventfd, 1);
4234
4235 /* Update current_threshold */
5407a562 4236 t->current_threshold = i - 1;
2e72b634
KS
4237unlock:
4238 rcu_read_unlock();
4239}
4240
4241static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4242{
ad4ca5f4
KS
4243 while (memcg) {
4244 __mem_cgroup_threshold(memcg, false);
7941d214 4245 if (do_memsw_account())
ad4ca5f4
KS
4246 __mem_cgroup_threshold(memcg, true);
4247
4248 memcg = parent_mem_cgroup(memcg);
4249 }
2e72b634
KS
4250}
4251
4252static int compare_thresholds(const void *a, const void *b)
4253{
4254 const struct mem_cgroup_threshold *_a = a;
4255 const struct mem_cgroup_threshold *_b = b;
4256
2bff24a3
GT
4257 if (_a->threshold > _b->threshold)
4258 return 1;
4259
4260 if (_a->threshold < _b->threshold)
4261 return -1;
4262
4263 return 0;
2e72b634
KS
4264}
4265
c0ff4b85 4266static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
4267{
4268 struct mem_cgroup_eventfd_list *ev;
4269
2bcf2e92
MH
4270 spin_lock(&memcg_oom_lock);
4271
c0ff4b85 4272 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27 4273 eventfd_signal(ev->eventfd, 1);
2bcf2e92
MH
4274
4275 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4276 return 0;
4277}
4278
c0ff4b85 4279static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 4280{
7d74b06f
KH
4281 struct mem_cgroup *iter;
4282
c0ff4b85 4283 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 4284 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4285}
4286
59b6f873 4287static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 4288 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 4289{
2c488db2
KS
4290 struct mem_cgroup_thresholds *thresholds;
4291 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
4292 unsigned long threshold;
4293 unsigned long usage;
2c488db2 4294 int i, size, ret;
2e72b634 4295
650c5e56 4296 ret = page_counter_memparse(args, "-1", &threshold);
2e72b634
KS
4297 if (ret)
4298 return ret;
4299
4300 mutex_lock(&memcg->thresholds_lock);
2c488db2 4301
05b84301 4302 if (type == _MEM) {
2c488db2 4303 thresholds = &memcg->thresholds;
ce00a967 4304 usage = mem_cgroup_usage(memcg, false);
05b84301 4305 } else if (type == _MEMSWAP) {
2c488db2 4306 thresholds = &memcg->memsw_thresholds;
ce00a967 4307 usage = mem_cgroup_usage(memcg, true);
05b84301 4308 } else
2e72b634
KS
4309 BUG();
4310
2e72b634 4311 /* Check if a threshold crossed before adding a new one */
2c488db2 4312 if (thresholds->primary)
2e72b634
KS
4313 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4314
2c488db2 4315 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4316
4317 /* Allocate memory for new array of thresholds */
67b8046f 4318 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
2c488db2 4319 if (!new) {
2e72b634
KS
4320 ret = -ENOMEM;
4321 goto unlock;
4322 }
2c488db2 4323 new->size = size;
2e72b634
KS
4324
4325 /* Copy thresholds (if any) to new array */
e90342e6
GS
4326 if (thresholds->primary)
4327 memcpy(new->entries, thresholds->primary->entries,
4328 flex_array_size(new, entries, size - 1));
2c488db2 4329
2e72b634 4330 /* Add new threshold */
2c488db2
KS
4331 new->entries[size - 1].eventfd = eventfd;
4332 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4333
4334 /* Sort thresholds. Registering of new threshold isn't time-critical */
61e604e6 4335 sort(new->entries, size, sizeof(*new->entries),
2e72b634
KS
4336 compare_thresholds, NULL);
4337
4338 /* Find current threshold */
2c488db2 4339 new->current_threshold = -1;
2e72b634 4340 for (i = 0; i < size; i++) {
748dad36 4341 if (new->entries[i].threshold <= usage) {
2e72b634 4342 /*
2c488db2
KS
4343 * new->current_threshold will not be used until
4344 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4345 * it here.
4346 */
2c488db2 4347 ++new->current_threshold;
748dad36
SZ
4348 } else
4349 break;
2e72b634
KS
4350 }
4351
2c488db2
KS
4352 /* Free old spare buffer and save old primary buffer as spare */
4353 kfree(thresholds->spare);
4354 thresholds->spare = thresholds->primary;
4355
4356 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4357
907860ed 4358 /* To be sure that nobody uses thresholds */
2e72b634
KS
4359 synchronize_rcu();
4360
2e72b634
KS
4361unlock:
4362 mutex_unlock(&memcg->thresholds_lock);
4363
4364 return ret;
4365}
4366
59b6f873 4367static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4368 struct eventfd_ctx *eventfd, const char *args)
4369{
59b6f873 4370 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
4371}
4372
59b6f873 4373static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4374 struct eventfd_ctx *eventfd, const char *args)
4375{
59b6f873 4376 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
4377}
4378
59b6f873 4379static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 4380 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 4381{
2c488db2
KS
4382 struct mem_cgroup_thresholds *thresholds;
4383 struct mem_cgroup_threshold_ary *new;
3e32cb2e 4384 unsigned long usage;
7d36665a 4385 int i, j, size, entries;
2e72b634
KS
4386
4387 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
4388
4389 if (type == _MEM) {
2c488db2 4390 thresholds = &memcg->thresholds;
ce00a967 4391 usage = mem_cgroup_usage(memcg, false);
05b84301 4392 } else if (type == _MEMSWAP) {
2c488db2 4393 thresholds = &memcg->memsw_thresholds;
ce00a967 4394 usage = mem_cgroup_usage(memcg, true);
05b84301 4395 } else
2e72b634
KS
4396 BUG();
4397
371528ca
AV
4398 if (!thresholds->primary)
4399 goto unlock;
4400
2e72b634
KS
4401 /* Check if a threshold crossed before removing */
4402 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4403
4404 /* Calculate new number of threshold */
7d36665a 4405 size = entries = 0;
2c488db2
KS
4406 for (i = 0; i < thresholds->primary->size; i++) {
4407 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634 4408 size++;
7d36665a
CX
4409 else
4410 entries++;
2e72b634
KS
4411 }
4412
2c488db2 4413 new = thresholds->spare;
907860ed 4414
7d36665a
CX
4415 /* If no items related to eventfd have been cleared, nothing to do */
4416 if (!entries)
4417 goto unlock;
4418
2e72b634
KS
4419 /* Set thresholds array to NULL if we don't have thresholds */
4420 if (!size) {
2c488db2
KS
4421 kfree(new);
4422 new = NULL;
907860ed 4423 goto swap_buffers;
2e72b634
KS
4424 }
4425
2c488db2 4426 new->size = size;
2e72b634
KS
4427
4428 /* Copy thresholds and find current threshold */
2c488db2
KS
4429 new->current_threshold = -1;
4430 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4431 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4432 continue;
4433
2c488db2 4434 new->entries[j] = thresholds->primary->entries[i];
748dad36 4435 if (new->entries[j].threshold <= usage) {
2e72b634 4436 /*
2c488db2 4437 * new->current_threshold will not be used
2e72b634
KS
4438 * until rcu_assign_pointer(), so it's safe to increment
4439 * it here.
4440 */
2c488db2 4441 ++new->current_threshold;
2e72b634
KS
4442 }
4443 j++;
4444 }
4445
907860ed 4446swap_buffers:
2c488db2
KS
4447 /* Swap primary and spare array */
4448 thresholds->spare = thresholds->primary;
8c757763 4449
2c488db2 4450 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4451
907860ed 4452 /* To be sure that nobody uses thresholds */
2e72b634 4453 synchronize_rcu();
6611d8d7
MC
4454
4455 /* If all events are unregistered, free the spare array */
4456 if (!new) {
4457 kfree(thresholds->spare);
4458 thresholds->spare = NULL;
4459 }
371528ca 4460unlock:
2e72b634 4461 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4462}
c1e862c1 4463
59b6f873 4464static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4465 struct eventfd_ctx *eventfd)
4466{
59b6f873 4467 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
4468}
4469
59b6f873 4470static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4471 struct eventfd_ctx *eventfd)
4472{
59b6f873 4473 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
4474}
4475
59b6f873 4476static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 4477 struct eventfd_ctx *eventfd, const char *args)
9490ff27 4478{
9490ff27 4479 struct mem_cgroup_eventfd_list *event;
9490ff27 4480
9490ff27
KH
4481 event = kmalloc(sizeof(*event), GFP_KERNEL);
4482 if (!event)
4483 return -ENOMEM;
4484
1af8efe9 4485 spin_lock(&memcg_oom_lock);
9490ff27
KH
4486
4487 event->eventfd = eventfd;
4488 list_add(&event->list, &memcg->oom_notify);
4489
4490 /* already in OOM ? */
c2b42d3c 4491 if (memcg->under_oom)
9490ff27 4492 eventfd_signal(eventfd, 1);
1af8efe9 4493 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4494
4495 return 0;
4496}
4497
59b6f873 4498static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 4499 struct eventfd_ctx *eventfd)
9490ff27 4500{
9490ff27 4501 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 4502
1af8efe9 4503 spin_lock(&memcg_oom_lock);
9490ff27 4504
c0ff4b85 4505 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
4506 if (ev->eventfd == eventfd) {
4507 list_del(&ev->list);
4508 kfree(ev);
4509 }
4510 }
4511
1af8efe9 4512 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4513}
4514
2da8ca82 4515static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 4516{
aa9694bb 4517 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
3c11ecf4 4518
17c56de6 4519 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
c2b42d3c 4520 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
fe6bdfc8
RG
4521 seq_printf(sf, "oom_kill %lu\n",
4522 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
3c11ecf4
KH
4523 return 0;
4524}
4525
182446d0 4526static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
4527 struct cftype *cft, u64 val)
4528{
182446d0 4529 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
4530
4531 /* cannot set to root cgroup and only 0 and 1 are allowed */
a4792030 4532 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
3c11ecf4
KH
4533 return -EINVAL;
4534
17c56de6 4535 WRITE_ONCE(memcg->oom_kill_disable, val);
4d845ebf 4536 if (!val)
c0ff4b85 4537 memcg_oom_recover(memcg);
3dae7fec 4538
3c11ecf4
KH
4539 return 0;
4540}
4541
52ebea74
TH
4542#ifdef CONFIG_CGROUP_WRITEBACK
4543
3a8e9ac8
TH
4544#include <trace/events/writeback.h>
4545
841710aa
TH
4546static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4547{
4548 return wb_domain_init(&memcg->cgwb_domain, gfp);
4549}
4550
4551static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4552{
4553 wb_domain_exit(&memcg->cgwb_domain);
4554}
4555
2529bb3a
TH
4556static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4557{
4558 wb_domain_size_changed(&memcg->cgwb_domain);
4559}
4560
841710aa
TH
4561struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4562{
4563 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4564
4565 if (!memcg->css.parent)
4566 return NULL;
4567
4568 return &memcg->cgwb_domain;
4569}
4570
c2aa723a
TH
4571/**
4572 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4573 * @wb: bdi_writeback in question
c5edf9cd
TH
4574 * @pfilepages: out parameter for number of file pages
4575 * @pheadroom: out parameter for number of allocatable pages according to memcg
c2aa723a
TH
4576 * @pdirty: out parameter for number of dirty pages
4577 * @pwriteback: out parameter for number of pages under writeback
4578 *
c5edf9cd
TH
4579 * Determine the numbers of file, headroom, dirty, and writeback pages in
4580 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4581 * is a bit more involved.
c2aa723a 4582 *
c5edf9cd
TH
4583 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4584 * headroom is calculated as the lowest headroom of itself and the
4585 * ancestors. Note that this doesn't consider the actual amount of
4586 * available memory in the system. The caller should further cap
4587 * *@pheadroom accordingly.
c2aa723a 4588 */
c5edf9cd
TH
4589void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4590 unsigned long *pheadroom, unsigned long *pdirty,
4591 unsigned long *pwriteback)
c2aa723a
TH
4592{
4593 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4594 struct mem_cgroup *parent;
c2aa723a 4595
fd25a9e0 4596 mem_cgroup_flush_stats();
c2aa723a 4597
2d146aa3
JW
4598 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4599 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4600 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4601 memcg_page_state(memcg, NR_ACTIVE_FILE);
c2aa723a 4602
2d146aa3 4603 *pheadroom = PAGE_COUNTER_MAX;
c2aa723a 4604 while ((parent = parent_mem_cgroup(memcg))) {
15b42562 4605 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
d1663a90 4606 READ_ONCE(memcg->memory.high));
c2aa723a
TH
4607 unsigned long used = page_counter_read(&memcg->memory);
4608
c5edf9cd 4609 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
c2aa723a
TH
4610 memcg = parent;
4611 }
c2aa723a
TH
4612}
4613
97b27821
TH
4614/*
4615 * Foreign dirty flushing
4616 *
4617 * There's an inherent mismatch between memcg and writeback. The former
f0953a1b 4618 * tracks ownership per-page while the latter per-inode. This was a
97b27821
TH
4619 * deliberate design decision because honoring per-page ownership in the
4620 * writeback path is complicated, may lead to higher CPU and IO overheads
4621 * and deemed unnecessary given that write-sharing an inode across
4622 * different cgroups isn't a common use-case.
4623 *
4624 * Combined with inode majority-writer ownership switching, this works well
4625 * enough in most cases but there are some pathological cases. For
4626 * example, let's say there are two cgroups A and B which keep writing to
4627 * different but confined parts of the same inode. B owns the inode and
4628 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4629 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4630 * triggering background writeback. A will be slowed down without a way to
4631 * make writeback of the dirty pages happen.
4632 *
f0953a1b 4633 * Conditions like the above can lead to a cgroup getting repeatedly and
97b27821 4634 * severely throttled after making some progress after each
f0953a1b 4635 * dirty_expire_interval while the underlying IO device is almost
97b27821
TH
4636 * completely idle.
4637 *
4638 * Solving this problem completely requires matching the ownership tracking
4639 * granularities between memcg and writeback in either direction. However,
4640 * the more egregious behaviors can be avoided by simply remembering the
4641 * most recent foreign dirtying events and initiating remote flushes on
4642 * them when local writeback isn't enough to keep the memory clean enough.
4643 *
4644 * The following two functions implement such mechanism. When a foreign
4645 * page - a page whose memcg and writeback ownerships don't match - is
4646 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4647 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4648 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4649 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4650 * foreign bdi_writebacks which haven't expired. Both the numbers of
4651 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4652 * limited to MEMCG_CGWB_FRN_CNT.
4653 *
4654 * The mechanism only remembers IDs and doesn't hold any object references.
4655 * As being wrong occasionally doesn't matter, updates and accesses to the
4656 * records are lockless and racy.
4657 */
9d8053fc 4658void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
4659 struct bdi_writeback *wb)
4660{
9d8053fc 4661 struct mem_cgroup *memcg = folio_memcg(folio);
97b27821
TH
4662 struct memcg_cgwb_frn *frn;
4663 u64 now = get_jiffies_64();
4664 u64 oldest_at = now;
4665 int oldest = -1;
4666 int i;
4667
9d8053fc 4668 trace_track_foreign_dirty(folio, wb);
3a8e9ac8 4669
97b27821
TH
4670 /*
4671 * Pick the slot to use. If there is already a slot for @wb, keep
4672 * using it. If not replace the oldest one which isn't being
4673 * written out.
4674 */
4675 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4676 frn = &memcg->cgwb_frn[i];
4677 if (frn->bdi_id == wb->bdi->id &&
4678 frn->memcg_id == wb->memcg_css->id)
4679 break;
4680 if (time_before64(frn->at, oldest_at) &&
4681 atomic_read(&frn->done.cnt) == 1) {
4682 oldest = i;
4683 oldest_at = frn->at;
4684 }
4685 }
4686
4687 if (i < MEMCG_CGWB_FRN_CNT) {
4688 /*
4689 * Re-using an existing one. Update timestamp lazily to
4690 * avoid making the cacheline hot. We want them to be
4691 * reasonably up-to-date and significantly shorter than
4692 * dirty_expire_interval as that's what expires the record.
4693 * Use the shorter of 1s and dirty_expire_interval / 8.
4694 */
4695 unsigned long update_intv =
4696 min_t(unsigned long, HZ,
4697 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4698
4699 if (time_before64(frn->at, now - update_intv))
4700 frn->at = now;
4701 } else if (oldest >= 0) {
4702 /* replace the oldest free one */
4703 frn = &memcg->cgwb_frn[oldest];
4704 frn->bdi_id = wb->bdi->id;
4705 frn->memcg_id = wb->memcg_css->id;
4706 frn->at = now;
4707 }
4708}
4709
4710/* issue foreign writeback flushes for recorded foreign dirtying events */
4711void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4712{
4713 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4714 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4715 u64 now = jiffies_64;
4716 int i;
4717
4718 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4719 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4720
4721 /*
4722 * If the record is older than dirty_expire_interval,
4723 * writeback on it has already started. No need to kick it
4724 * off again. Also, don't start a new one if there's
4725 * already one in flight.
4726 */
4727 if (time_after64(frn->at, now - intv) &&
4728 atomic_read(&frn->done.cnt) == 1) {
4729 frn->at = 0;
3a8e9ac8 4730 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
7490a2d2 4731 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
97b27821
TH
4732 WB_REASON_FOREIGN_FLUSH,
4733 &frn->done);
4734 }
4735 }
4736}
4737
841710aa
TH
4738#else /* CONFIG_CGROUP_WRITEBACK */
4739
4740static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4741{
4742 return 0;
4743}
4744
4745static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4746{
4747}
4748
2529bb3a
TH
4749static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4750{
4751}
4752
52ebea74
TH
4753#endif /* CONFIG_CGROUP_WRITEBACK */
4754
3bc942f3
TH
4755/*
4756 * DO NOT USE IN NEW FILES.
4757 *
4758 * "cgroup.event_control" implementation.
4759 *
4760 * This is way over-engineered. It tries to support fully configurable
4761 * events for each user. Such level of flexibility is completely
4762 * unnecessary especially in the light of the planned unified hierarchy.
4763 *
4764 * Please deprecate this and replace with something simpler if at all
4765 * possible.
4766 */
4767
79bd9814
TH
4768/*
4769 * Unregister event and free resources.
4770 *
4771 * Gets called from workqueue.
4772 */
3bc942f3 4773static void memcg_event_remove(struct work_struct *work)
79bd9814 4774{
3bc942f3
TH
4775 struct mem_cgroup_event *event =
4776 container_of(work, struct mem_cgroup_event, remove);
59b6f873 4777 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
4778
4779 remove_wait_queue(event->wqh, &event->wait);
4780
59b6f873 4781 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
4782
4783 /* Notify userspace the event is going away. */
4784 eventfd_signal(event->eventfd, 1);
4785
4786 eventfd_ctx_put(event->eventfd);
4787 kfree(event);
59b6f873 4788 css_put(&memcg->css);
79bd9814
TH
4789}
4790
4791/*
a9a08845 4792 * Gets called on EPOLLHUP on eventfd when user closes it.
79bd9814
TH
4793 *
4794 * Called with wqh->lock held and interrupts disabled.
4795 */
ac6424b9 4796static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3bc942f3 4797 int sync, void *key)
79bd9814 4798{
3bc942f3
TH
4799 struct mem_cgroup_event *event =
4800 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 4801 struct mem_cgroup *memcg = event->memcg;
3ad6f93e 4802 __poll_t flags = key_to_poll(key);
79bd9814 4803
a9a08845 4804 if (flags & EPOLLHUP) {
79bd9814
TH
4805 /*
4806 * If the event has been detached at cgroup removal, we
4807 * can simply return knowing the other side will cleanup
4808 * for us.
4809 *
4810 * We can't race against event freeing since the other
4811 * side will require wqh->lock via remove_wait_queue(),
4812 * which we hold.
4813 */
fba94807 4814 spin_lock(&memcg->event_list_lock);
79bd9814
TH
4815 if (!list_empty(&event->list)) {
4816 list_del_init(&event->list);
4817 /*
4818 * We are in atomic context, but cgroup_event_remove()
4819 * may sleep, so we have to call it in workqueue.
4820 */
4821 schedule_work(&event->remove);
4822 }
fba94807 4823 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
4824 }
4825
4826 return 0;
4827}
4828
3bc942f3 4829static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
4830 wait_queue_head_t *wqh, poll_table *pt)
4831{
3bc942f3
TH
4832 struct mem_cgroup_event *event =
4833 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
4834
4835 event->wqh = wqh;
4836 add_wait_queue(wqh, &event->wait);
4837}
4838
4839/*
3bc942f3
TH
4840 * DO NOT USE IN NEW FILES.
4841 *
79bd9814
TH
4842 * Parse input and register new cgroup event handler.
4843 *
4844 * Input must be in format '<event_fd> <control_fd> <args>'.
4845 * Interpretation of args is defined by control file implementation.
4846 */
451af504
TH
4847static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4848 char *buf, size_t nbytes, loff_t off)
79bd9814 4849{
451af504 4850 struct cgroup_subsys_state *css = of_css(of);
fba94807 4851 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 4852 struct mem_cgroup_event *event;
79bd9814
TH
4853 struct cgroup_subsys_state *cfile_css;
4854 unsigned int efd, cfd;
4855 struct fd efile;
4856 struct fd cfile;
4a7ba45b 4857 struct dentry *cdentry;
fba94807 4858 const char *name;
79bd9814
TH
4859 char *endp;
4860 int ret;
4861
2343e88d
SAS
4862 if (IS_ENABLED(CONFIG_PREEMPT_RT))
4863 return -EOPNOTSUPP;
4864
451af504
TH
4865 buf = strstrip(buf);
4866
4867 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4868 if (*endp != ' ')
4869 return -EINVAL;
451af504 4870 buf = endp + 1;
79bd9814 4871
451af504 4872 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
4873 if ((*endp != ' ') && (*endp != '\0'))
4874 return -EINVAL;
451af504 4875 buf = endp + 1;
79bd9814
TH
4876
4877 event = kzalloc(sizeof(*event), GFP_KERNEL);
4878 if (!event)
4879 return -ENOMEM;
4880
59b6f873 4881 event->memcg = memcg;
79bd9814 4882 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
4883 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4884 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4885 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
4886
4887 efile = fdget(efd);
4888 if (!efile.file) {
4889 ret = -EBADF;
4890 goto out_kfree;
4891 }
4892
4893 event->eventfd = eventfd_ctx_fileget(efile.file);
4894 if (IS_ERR(event->eventfd)) {
4895 ret = PTR_ERR(event->eventfd);
4896 goto out_put_efile;
4897 }
4898
4899 cfile = fdget(cfd);
4900 if (!cfile.file) {
4901 ret = -EBADF;
4902 goto out_put_eventfd;
4903 }
4904
4905 /* the process need read permission on control file */
4906 /* AV: shouldn't we check that it's been opened for read instead? */
02f92b38 4907 ret = file_permission(cfile.file, MAY_READ);
79bd9814
TH
4908 if (ret < 0)
4909 goto out_put_cfile;
4910
4a7ba45b
TH
4911 /*
4912 * The control file must be a regular cgroup1 file. As a regular cgroup
4913 * file can't be renamed, it's safe to access its name afterwards.
4914 */
4915 cdentry = cfile.file->f_path.dentry;
4916 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4917 ret = -EINVAL;
4918 goto out_put_cfile;
4919 }
4920
fba94807
TH
4921 /*
4922 * Determine the event callbacks and set them in @event. This used
4923 * to be done via struct cftype but cgroup core no longer knows
4924 * about these events. The following is crude but the whole thing
4925 * is for compatibility anyway.
3bc942f3
TH
4926 *
4927 * DO NOT ADD NEW FILES.
fba94807 4928 */
4a7ba45b 4929 name = cdentry->d_name.name;
fba94807
TH
4930
4931 if (!strcmp(name, "memory.usage_in_bytes")) {
4932 event->register_event = mem_cgroup_usage_register_event;
4933 event->unregister_event = mem_cgroup_usage_unregister_event;
4934 } else if (!strcmp(name, "memory.oom_control")) {
4935 event->register_event = mem_cgroup_oom_register_event;
4936 event->unregister_event = mem_cgroup_oom_unregister_event;
4937 } else if (!strcmp(name, "memory.pressure_level")) {
4938 event->register_event = vmpressure_register_event;
4939 event->unregister_event = vmpressure_unregister_event;
4940 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
4941 event->register_event = memsw_cgroup_usage_register_event;
4942 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
4943 } else {
4944 ret = -EINVAL;
4945 goto out_put_cfile;
4946 }
4947
79bd9814 4948 /*
b5557c4c
TH
4949 * Verify @cfile should belong to @css. Also, remaining events are
4950 * automatically removed on cgroup destruction but the removal is
4951 * asynchronous, so take an extra ref on @css.
79bd9814 4952 */
4a7ba45b 4953 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
ec903c0c 4954 &memory_cgrp_subsys);
79bd9814 4955 ret = -EINVAL;
5a17f543 4956 if (IS_ERR(cfile_css))
79bd9814 4957 goto out_put_cfile;
5a17f543
TH
4958 if (cfile_css != css) {
4959 css_put(cfile_css);
79bd9814 4960 goto out_put_cfile;
5a17f543 4961 }
79bd9814 4962
451af504 4963 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
4964 if (ret)
4965 goto out_put_css;
4966
9965ed17 4967 vfs_poll(efile.file, &event->pt);
79bd9814 4968
4ba9515d 4969 spin_lock_irq(&memcg->event_list_lock);
fba94807 4970 list_add(&event->list, &memcg->event_list);
4ba9515d 4971 spin_unlock_irq(&memcg->event_list_lock);
79bd9814
TH
4972
4973 fdput(cfile);
4974 fdput(efile);
4975
451af504 4976 return nbytes;
79bd9814
TH
4977
4978out_put_css:
b5557c4c 4979 css_put(css);
79bd9814
TH
4980out_put_cfile:
4981 fdput(cfile);
4982out_put_eventfd:
4983 eventfd_ctx_put(event->eventfd);
4984out_put_efile:
4985 fdput(efile);
4986out_kfree:
4987 kfree(event);
4988
4989 return ret;
4990}
4991
c29b5b3d
MS
4992#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4993static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4994{
4995 /*
4996 * Deprecated.
df4ae285 4997 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
c29b5b3d
MS
4998 */
4999 return 0;
5000}
5001#endif
5002
241994ed 5003static struct cftype mem_cgroup_legacy_files[] = {
8cdea7c0 5004 {
0eea1030 5005 .name = "usage_in_bytes",
8c7c6e34 5006 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 5007 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5008 },
c84872e1
PE
5009 {
5010 .name = "max_usage_in_bytes",
8c7c6e34 5011 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 5012 .write = mem_cgroup_reset,
791badbd 5013 .read_u64 = mem_cgroup_read_u64,
c84872e1 5014 },
8cdea7c0 5015 {
0eea1030 5016 .name = "limit_in_bytes",
8c7c6e34 5017 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 5018 .write = mem_cgroup_write,
791badbd 5019 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5020 },
296c81d8
BS
5021 {
5022 .name = "soft_limit_in_bytes",
5023 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 5024 .write = mem_cgroup_write,
791badbd 5025 .read_u64 = mem_cgroup_read_u64,
296c81d8 5026 },
8cdea7c0
BS
5027 {
5028 .name = "failcnt",
8c7c6e34 5029 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 5030 .write = mem_cgroup_reset,
791badbd 5031 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5032 },
d2ceb9b7
KH
5033 {
5034 .name = "stat",
2da8ca82 5035 .seq_show = memcg_stat_show,
d2ceb9b7 5036 },
c1e862c1
KH
5037 {
5038 .name = "force_empty",
6770c64e 5039 .write = mem_cgroup_force_empty_write,
c1e862c1 5040 },
18f59ea7
BS
5041 {
5042 .name = "use_hierarchy",
5043 .write_u64 = mem_cgroup_hierarchy_write,
5044 .read_u64 = mem_cgroup_hierarchy_read,
5045 },
79bd9814 5046 {
3bc942f3 5047 .name = "cgroup.event_control", /* XXX: for compat */
451af504 5048 .write = memcg_write_event_control,
7dbdb199 5049 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
79bd9814 5050 },
a7885eb8
KM
5051 {
5052 .name = "swappiness",
5053 .read_u64 = mem_cgroup_swappiness_read,
5054 .write_u64 = mem_cgroup_swappiness_write,
5055 },
7dc74be0
DN
5056 {
5057 .name = "move_charge_at_immigrate",
5058 .read_u64 = mem_cgroup_move_charge_read,
5059 .write_u64 = mem_cgroup_move_charge_write,
5060 },
9490ff27
KH
5061 {
5062 .name = "oom_control",
2da8ca82 5063 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 5064 .write_u64 = mem_cgroup_oom_control_write,
9490ff27 5065 },
70ddf637
AV
5066 {
5067 .name = "pressure_level",
70ddf637 5068 },
406eb0c9
YH
5069#ifdef CONFIG_NUMA
5070 {
5071 .name = "numa_stat",
2da8ca82 5072 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
5073 },
5074#endif
510fc4e1
GC
5075 {
5076 .name = "kmem.limit_in_bytes",
5077 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
451af504 5078 .write = mem_cgroup_write,
791badbd 5079 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5080 },
5081 {
5082 .name = "kmem.usage_in_bytes",
5083 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 5084 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5085 },
5086 {
5087 .name = "kmem.failcnt",
5088 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 5089 .write = mem_cgroup_reset,
791badbd 5090 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5091 },
5092 {
5093 .name = "kmem.max_usage_in_bytes",
5094 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 5095 .write = mem_cgroup_reset,
791badbd 5096 .read_u64 = mem_cgroup_read_u64,
510fc4e1 5097 },
a87425a3
YS
5098#if defined(CONFIG_MEMCG_KMEM) && \
5099 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
749c5415
GC
5100 {
5101 .name = "kmem.slabinfo",
c29b5b3d 5102 .seq_show = mem_cgroup_slab_show,
749c5415
GC
5103 },
5104#endif
d55f90bf
VD
5105 {
5106 .name = "kmem.tcp.limit_in_bytes",
5107 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5108 .write = mem_cgroup_write,
5109 .read_u64 = mem_cgroup_read_u64,
5110 },
5111 {
5112 .name = "kmem.tcp.usage_in_bytes",
5113 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5114 .read_u64 = mem_cgroup_read_u64,
5115 },
5116 {
5117 .name = "kmem.tcp.failcnt",
5118 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5119 .write = mem_cgroup_reset,
5120 .read_u64 = mem_cgroup_read_u64,
5121 },
5122 {
5123 .name = "kmem.tcp.max_usage_in_bytes",
5124 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5125 .write = mem_cgroup_reset,
5126 .read_u64 = mem_cgroup_read_u64,
5127 },
6bc10349 5128 { }, /* terminate */
af36f906 5129};
8c7c6e34 5130
73f576c0
JW
5131/*
5132 * Private memory cgroup IDR
5133 *
5134 * Swap-out records and page cache shadow entries need to store memcg
5135 * references in constrained space, so we maintain an ID space that is
5136 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5137 * memory-controlled cgroups to 64k.
5138 *
b8f2935f 5139 * However, there usually are many references to the offline CSS after
73f576c0
JW
5140 * the cgroup has been destroyed, such as page cache or reclaimable
5141 * slab objects, that don't need to hang on to the ID. We want to keep
5142 * those dead CSS from occupying IDs, or we might quickly exhaust the
5143 * relatively small ID space and prevent the creation of new cgroups
5144 * even when there are much fewer than 64k cgroups - possibly none.
5145 *
5146 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5147 * be freed and recycled when it's no longer needed, which is usually
5148 * when the CSS is offlined.
5149 *
5150 * The only exception to that are records of swapped out tmpfs/shmem
5151 * pages that need to be attributed to live ancestors on swapin. But
5152 * those references are manageable from userspace.
5153 */
5154
5155static DEFINE_IDR(mem_cgroup_idr);
5156
7e97de0b
KT
5157static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5158{
5159 if (memcg->id.id > 0) {
5160 idr_remove(&mem_cgroup_idr, memcg->id.id);
5161 memcg->id.id = 0;
5162 }
5163}
5164
c1514c0a
VF
5165static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5166 unsigned int n)
73f576c0 5167{
1c2d479a 5168 refcount_add(n, &memcg->id.ref);
73f576c0
JW
5169}
5170
615d66c3 5171static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 5172{
1c2d479a 5173 if (refcount_sub_and_test(n, &memcg->id.ref)) {
7e97de0b 5174 mem_cgroup_id_remove(memcg);
73f576c0
JW
5175
5176 /* Memcg ID pins CSS */
5177 css_put(&memcg->css);
5178 }
5179}
5180
615d66c3
VD
5181static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5182{
5183 mem_cgroup_id_put_many(memcg, 1);
5184}
5185
73f576c0
JW
5186/**
5187 * mem_cgroup_from_id - look up a memcg from a memcg id
5188 * @id: the memcg id to look up
5189 *
5190 * Caller must hold rcu_read_lock().
5191 */
5192struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5193{
5194 WARN_ON_ONCE(!rcu_read_lock_held());
5195 return idr_find(&mem_cgroup_idr, id);
5196}
5197
c15187a4
RG
5198#ifdef CONFIG_SHRINKER_DEBUG
5199struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5200{
5201 struct cgroup *cgrp;
5202 struct cgroup_subsys_state *css;
5203 struct mem_cgroup *memcg;
5204
5205 cgrp = cgroup_get_from_id(ino);
fa7e439c 5206 if (IS_ERR(cgrp))
c0f2df49 5207 return ERR_CAST(cgrp);
c15187a4
RG
5208
5209 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5210 if (css)
5211 memcg = container_of(css, struct mem_cgroup, css);
5212 else
5213 memcg = ERR_PTR(-ENOENT);
5214
5215 cgroup_put(cgrp);
5216
5217 return memcg;
5218}
5219#endif
5220
ef8f2327 5221static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
5222{
5223 struct mem_cgroup_per_node *pn;
8c9bb398
WY
5224
5225 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
6d12e2d8
KH
5226 if (!pn)
5227 return 1;
1ecaab2b 5228
7e1c0d6f
SB
5229 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5230 GFP_KERNEL_ACCOUNT);
5231 if (!pn->lruvec_stats_percpu) {
00f3ca2c
JW
5232 kfree(pn);
5233 return 1;
5234 }
5235
ef8f2327 5236 lruvec_init(&pn->lruvec);
ef8f2327
MG
5237 pn->memcg = memcg;
5238
54f72fe0 5239 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
5240 return 0;
5241}
5242
ef8f2327 5243static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1ecaab2b 5244{
00f3ca2c
JW
5245 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5246
4eaf431f
MH
5247 if (!pn)
5248 return;
5249
7e1c0d6f 5250 free_percpu(pn->lruvec_stats_percpu);
00f3ca2c 5251 kfree(pn);
1ecaab2b
KH
5252}
5253
40e952f9 5254static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 5255{
c8b2a36f 5256 int node;
59927fb9 5257
c8b2a36f 5258 for_each_node(node)
ef8f2327 5259 free_mem_cgroup_per_node_info(memcg, node);
410f8e82 5260 kfree(memcg->vmstats);
871789d4 5261 free_percpu(memcg->vmstats_percpu);
8ff69e2c 5262 kfree(memcg);
59927fb9 5263}
3afe36b1 5264
40e952f9
TE
5265static void mem_cgroup_free(struct mem_cgroup *memcg)
5266{
ec1c86b2 5267 lru_gen_exit_memcg(memcg);
40e952f9
TE
5268 memcg_wb_domain_exit(memcg);
5269 __mem_cgroup_free(memcg);
5270}
5271
0b8f73e1 5272static struct mem_cgroup *mem_cgroup_alloc(void)
8cdea7c0 5273{
d142e3e6 5274 struct mem_cgroup *memcg;
6d12e2d8 5275 int node;
97b27821 5276 int __maybe_unused i;
11d67612 5277 long error = -ENOMEM;
8cdea7c0 5278
06b2c3b0 5279 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
c0ff4b85 5280 if (!memcg)
11d67612 5281 return ERR_PTR(error);
0b8f73e1 5282
73f576c0 5283 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
be740503 5284 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
11d67612
YS
5285 if (memcg->id.id < 0) {
5286 error = memcg->id.id;
73f576c0 5287 goto fail;
11d67612 5288 }
73f576c0 5289
410f8e82
SB
5290 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5291 if (!memcg->vmstats)
5292 goto fail;
5293
3e38e0aa
RG
5294 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5295 GFP_KERNEL_ACCOUNT);
871789d4 5296 if (!memcg->vmstats_percpu)
0b8f73e1 5297 goto fail;
78fb7466 5298
3ed28fa1 5299 for_each_node(node)
ef8f2327 5300 if (alloc_mem_cgroup_per_node_info(memcg, node))
0b8f73e1 5301 goto fail;
f64c3f54 5302
0b8f73e1
JW
5303 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5304 goto fail;
28dbc4b6 5305
f7e1cb6e 5306 INIT_WORK(&memcg->high_work, high_work_func);
d142e3e6 5307 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
5308 mutex_init(&memcg->thresholds_lock);
5309 spin_lock_init(&memcg->move_lock);
70ddf637 5310 vmpressure_init(&memcg->vmpressure);
fba94807
TH
5311 INIT_LIST_HEAD(&memcg->event_list);
5312 spin_lock_init(&memcg->event_list_lock);
d886f4e4 5313 memcg->socket_pressure = jiffies;
84c07d11 5314#ifdef CONFIG_MEMCG_KMEM
900a38f0 5315 memcg->kmemcg_id = -1;
bf4f0599 5316 INIT_LIST_HEAD(&memcg->objcg_list);
900a38f0 5317#endif
52ebea74
TH
5318#ifdef CONFIG_CGROUP_WRITEBACK
5319 INIT_LIST_HEAD(&memcg->cgwb_list);
97b27821
TH
5320 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5321 memcg->cgwb_frn[i].done =
5322 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
87eaceb3
YS
5323#endif
5324#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5325 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5326 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5327 memcg->deferred_split_queue.split_queue_len = 0;
52ebea74 5328#endif
73f576c0 5329 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
ec1c86b2 5330 lru_gen_init_memcg(memcg);
0b8f73e1
JW
5331 return memcg;
5332fail:
7e97de0b 5333 mem_cgroup_id_remove(memcg);
40e952f9 5334 __mem_cgroup_free(memcg);
11d67612 5335 return ERR_PTR(error);
d142e3e6
GC
5336}
5337
0b8f73e1
JW
5338static struct cgroup_subsys_state * __ref
5339mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
d142e3e6 5340{
0b8f73e1 5341 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
b87d8cef 5342 struct mem_cgroup *memcg, *old_memcg;
d142e3e6 5343
b87d8cef 5344 old_memcg = set_active_memcg(parent);
0b8f73e1 5345 memcg = mem_cgroup_alloc();
b87d8cef 5346 set_active_memcg(old_memcg);
11d67612
YS
5347 if (IS_ERR(memcg))
5348 return ERR_CAST(memcg);
d142e3e6 5349
d1663a90 5350 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5351 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
f4840ccf
JW
5352#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5353 memcg->zswap_max = PAGE_COUNTER_MAX;
5354#endif
4b82ab4f 5355 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
0b8f73e1 5356 if (parent) {
82b3aa26 5357 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
17c56de6 5358 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
bef8620c 5359
3e32cb2e 5360 page_counter_init(&memcg->memory, &parent->memory);
37e84351 5361 page_counter_init(&memcg->swap, &parent->swap);
3e32cb2e 5362 page_counter_init(&memcg->kmem, &parent->kmem);
0db15298 5363 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
18f59ea7 5364 } else {
8278f1c7 5365 init_memcg_events();
bef8620c
RG
5366 page_counter_init(&memcg->memory, NULL);
5367 page_counter_init(&memcg->swap, NULL);
5368 page_counter_init(&memcg->kmem, NULL);
5369 page_counter_init(&memcg->tcpmem, NULL);
d6441637 5370
0b8f73e1
JW
5371 root_mem_cgroup = memcg;
5372 return &memcg->css;
5373 }
5374
f7e1cb6e 5375 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5376 static_branch_inc(&memcg_sockets_enabled_key);
f7e1cb6e 5377
b6c1a8af
YS
5378#if defined(CONFIG_MEMCG_KMEM)
5379 if (!cgroup_memory_nobpf)
5380 static_branch_inc(&memcg_bpf_enabled_key);
5381#endif
5382
0b8f73e1 5383 return &memcg->css;
0b8f73e1
JW
5384}
5385
73f576c0 5386static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
0b8f73e1 5387{
58fa2a55
VD
5388 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5389
da0efe30
MS
5390 if (memcg_online_kmem(memcg))
5391 goto remove_id;
5392
0a4465d3 5393 /*
e4262c4f 5394 * A memcg must be visible for expand_shrinker_info()
0a4465d3
KT
5395 * by the time the maps are allocated. So, we allocate maps
5396 * here, when for_each_mem_cgroup() can't skip it.
5397 */
da0efe30
MS
5398 if (alloc_shrinker_info(memcg))
5399 goto offline_kmem;
0a4465d3 5400
73f576c0 5401 /* Online state pins memcg ID, memcg ID pins CSS */
1c2d479a 5402 refcount_set(&memcg->id.ref, 1);
73f576c0 5403 css_get(css);
aa48e47e
SB
5404
5405 if (unlikely(mem_cgroup_is_root(memcg)))
5406 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5407 2UL*HZ);
e4dde56c 5408 lru_gen_online_memcg(memcg);
2f7dd7a4 5409 return 0;
da0efe30
MS
5410offline_kmem:
5411 memcg_offline_kmem(memcg);
5412remove_id:
5413 mem_cgroup_id_remove(memcg);
5414 return -ENOMEM;
8cdea7c0
BS
5415}
5416
eb95419b 5417static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 5418{
eb95419b 5419 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5420 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
5421
5422 /*
5423 * Unregister events and notify userspace.
5424 * Notify userspace about cgroup removing only after rmdir of cgroup
5425 * directory to avoid race between userspace and kernelspace.
5426 */
4ba9515d 5427 spin_lock_irq(&memcg->event_list_lock);
fba94807 5428 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
5429 list_del_init(&event->list);
5430 schedule_work(&event->remove);
5431 }
4ba9515d 5432 spin_unlock_irq(&memcg->event_list_lock);
ec64f515 5433
bf8d5d52 5434 page_counter_set_min(&memcg->memory, 0);
23067153 5435 page_counter_set_low(&memcg->memory, 0);
63677c74 5436
567e9ab2 5437 memcg_offline_kmem(memcg);
a178015c 5438 reparent_shrinker_deferred(memcg);
52ebea74 5439 wb_memcg_offline(memcg);
e4dde56c 5440 lru_gen_offline_memcg(memcg);
73f576c0 5441
591edfb1
RG
5442 drain_all_stock(memcg);
5443
73f576c0 5444 mem_cgroup_id_put(memcg);
df878fb0
KH
5445}
5446
6df38689
VD
5447static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5448{
5449 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5450
5451 invalidate_reclaim_iterators(memcg);
e4dde56c 5452 lru_gen_release_memcg(memcg);
6df38689
VD
5453}
5454
eb95419b 5455static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 5456{
eb95419b 5457 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
97b27821 5458 int __maybe_unused i;
c268e994 5459
97b27821
TH
5460#ifdef CONFIG_CGROUP_WRITEBACK
5461 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5462 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5463#endif
f7e1cb6e 5464 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5465 static_branch_dec(&memcg_sockets_enabled_key);
127424c8 5466
0db15298 5467 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
d55f90bf 5468 static_branch_dec(&memcg_sockets_enabled_key);
3893e302 5469
b6c1a8af
YS
5470#if defined(CONFIG_MEMCG_KMEM)
5471 if (!cgroup_memory_nobpf)
5472 static_branch_dec(&memcg_bpf_enabled_key);
5473#endif
5474
0b8f73e1
JW
5475 vmpressure_cleanup(&memcg->vmpressure);
5476 cancel_work_sync(&memcg->high_work);
5477 mem_cgroup_remove_from_trees(memcg);
e4262c4f 5478 free_shrinker_info(memcg);
0b8f73e1 5479 mem_cgroup_free(memcg);
8cdea7c0
BS
5480}
5481
1ced953b
TH
5482/**
5483 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5484 * @css: the target css
5485 *
5486 * Reset the states of the mem_cgroup associated with @css. This is
5487 * invoked when the userland requests disabling on the default hierarchy
5488 * but the memcg is pinned through dependency. The memcg should stop
5489 * applying policies and should revert to the vanilla state as it may be
5490 * made visible again.
5491 *
5492 * The current implementation only resets the essential configurations.
5493 * This needs to be expanded to cover all the visible parts.
5494 */
5495static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5496{
5497 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5498
bbec2e15
RG
5499 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5500 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
bbec2e15
RG
5501 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5502 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
bf8d5d52 5503 page_counter_set_min(&memcg->memory, 0);
23067153 5504 page_counter_set_low(&memcg->memory, 0);
d1663a90 5505 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5506 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
4b82ab4f 5507 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
2529bb3a 5508 memcg_wb_domain_size_changed(memcg);
1ced953b
TH
5509}
5510
2d146aa3
JW
5511static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5512{
5513 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5514 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5515 struct memcg_vmstats_percpu *statc;
5516 long delta, v;
7e1c0d6f 5517 int i, nid;
2d146aa3
JW
5518
5519 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5520
5521 for (i = 0; i < MEMCG_NR_STAT; i++) {
5522 /*
5523 * Collect the aggregated propagation counts of groups
5524 * below us. We're in a per-cpu loop here and this is
5525 * a global counter, so the first cycle will get them.
5526 */
410f8e82 5527 delta = memcg->vmstats->state_pending[i];
2d146aa3 5528 if (delta)
410f8e82 5529 memcg->vmstats->state_pending[i] = 0;
2d146aa3
JW
5530
5531 /* Add CPU changes on this level since the last flush */
5532 v = READ_ONCE(statc->state[i]);
5533 if (v != statc->state_prev[i]) {
5534 delta += v - statc->state_prev[i];
5535 statc->state_prev[i] = v;
5536 }
5537
5538 if (!delta)
5539 continue;
5540
5541 /* Aggregate counts on this level and propagate upwards */
410f8e82 5542 memcg->vmstats->state[i] += delta;
2d146aa3 5543 if (parent)
410f8e82 5544 parent->vmstats->state_pending[i] += delta;
2d146aa3
JW
5545 }
5546
8278f1c7 5547 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
410f8e82 5548 delta = memcg->vmstats->events_pending[i];
2d146aa3 5549 if (delta)
410f8e82 5550 memcg->vmstats->events_pending[i] = 0;
2d146aa3
JW
5551
5552 v = READ_ONCE(statc->events[i]);
5553 if (v != statc->events_prev[i]) {
5554 delta += v - statc->events_prev[i];
5555 statc->events_prev[i] = v;
5556 }
5557
5558 if (!delta)
5559 continue;
5560
410f8e82 5561 memcg->vmstats->events[i] += delta;
2d146aa3 5562 if (parent)
410f8e82 5563 parent->vmstats->events_pending[i] += delta;
2d146aa3 5564 }
7e1c0d6f
SB
5565
5566 for_each_node_state(nid, N_MEMORY) {
5567 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5568 struct mem_cgroup_per_node *ppn = NULL;
5569 struct lruvec_stats_percpu *lstatc;
5570
5571 if (parent)
5572 ppn = parent->nodeinfo[nid];
5573
5574 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5575
5576 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5577 delta = pn->lruvec_stats.state_pending[i];
5578 if (delta)
5579 pn->lruvec_stats.state_pending[i] = 0;
5580
5581 v = READ_ONCE(lstatc->state[i]);
5582 if (v != lstatc->state_prev[i]) {
5583 delta += v - lstatc->state_prev[i];
5584 lstatc->state_prev[i] = v;
5585 }
5586
5587 if (!delta)
5588 continue;
5589
5590 pn->lruvec_stats.state[i] += delta;
5591 if (ppn)
5592 ppn->lruvec_stats.state_pending[i] += delta;
5593 }
5594 }
2d146aa3
JW
5595}
5596
02491447 5597#ifdef CONFIG_MMU
7dc74be0 5598/* Handlers for move charge at task migration. */
854ffa8d 5599static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 5600{
05b84301 5601 int ret;
9476db97 5602
d0164adc
MG
5603 /* Try a single bulk charge without reclaim first, kswapd may wake */
5604 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
9476db97 5605 if (!ret) {
854ffa8d 5606 mc.precharge += count;
854ffa8d
DN
5607 return ret;
5608 }
9476db97 5609
3674534b 5610 /* Try charges one by one with reclaim, but do not retry */
854ffa8d 5611 while (count--) {
3674534b 5612 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
38c5d72f 5613 if (ret)
38c5d72f 5614 return ret;
854ffa8d 5615 mc.precharge++;
9476db97 5616 cond_resched();
854ffa8d 5617 }
9476db97 5618 return 0;
4ffef5fe
DN
5619}
5620
4ffef5fe
DN
5621union mc_target {
5622 struct page *page;
02491447 5623 swp_entry_t ent;
4ffef5fe
DN
5624};
5625
4ffef5fe 5626enum mc_target_type {
8d32ff84 5627 MC_TARGET_NONE = 0,
4ffef5fe 5628 MC_TARGET_PAGE,
02491447 5629 MC_TARGET_SWAP,
c733a828 5630 MC_TARGET_DEVICE,
4ffef5fe
DN
5631};
5632
90254a65
DN
5633static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5634 unsigned long addr, pte_t ptent)
4ffef5fe 5635{
25b2995a 5636 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 5637
90254a65
DN
5638 if (!page || !page_mapped(page))
5639 return NULL;
5640 if (PageAnon(page)) {
1dfab5ab 5641 if (!(mc.flags & MOVE_ANON))
90254a65 5642 return NULL;
1dfab5ab
JW
5643 } else {
5644 if (!(mc.flags & MOVE_FILE))
5645 return NULL;
5646 }
90254a65
DN
5647 if (!get_page_unless_zero(page))
5648 return NULL;
5649
5650 return page;
5651}
5652
c733a828 5653#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
90254a65 5654static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5655 pte_t ptent, swp_entry_t *entry)
90254a65 5656{
90254a65
DN
5657 struct page *page = NULL;
5658 swp_entry_t ent = pte_to_swp_entry(ptent);
5659
9a137153 5660 if (!(mc.flags & MOVE_ANON))
90254a65 5661 return NULL;
c733a828
JG
5662
5663 /*
27674ef6
CH
5664 * Handle device private pages that are not accessible by the CPU, but
5665 * stored as special swap entries in the page table.
c733a828
JG
5666 */
5667 if (is_device_private_entry(ent)) {
af5cdaf8 5668 page = pfn_swap_entry_to_page(ent);
27674ef6 5669 if (!get_page_unless_zero(page))
c733a828
JG
5670 return NULL;
5671 return page;
5672 }
5673
9a137153
RC
5674 if (non_swap_entry(ent))
5675 return NULL;
5676
4b91355e 5677 /*
cb691e2f 5678 * Because swap_cache_get_folio() updates some statistics counter,
4b91355e
KH
5679 * we call find_get_page() with swapper_space directly.
5680 */
f6ab1f7f 5681 page = find_get_page(swap_address_space(ent), swp_offset(ent));
2d1c4980 5682 entry->val = ent.val;
90254a65
DN
5683
5684 return page;
5685}
4b91355e
KH
5686#else
5687static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5688 pte_t ptent, swp_entry_t *entry)
4b91355e
KH
5689{
5690 return NULL;
5691}
5692#endif
90254a65 5693
87946a72 5694static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
48384b0b 5695 unsigned long addr, pte_t ptent)
87946a72 5696{
524984ff
MWO
5697 unsigned long index;
5698 struct folio *folio;
5699
87946a72
DN
5700 if (!vma->vm_file) /* anonymous vma */
5701 return NULL;
1dfab5ab 5702 if (!(mc.flags & MOVE_FILE))
87946a72
DN
5703 return NULL;
5704
524984ff 5705 /* folio is moved even if it's not RSS of this task(page-faulted). */
aa3b1895 5706 /* shmem/tmpfs may report page out on swap: account for that too. */
524984ff
MWO
5707 index = linear_page_index(vma, addr);
5708 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
66dabbb6 5709 if (IS_ERR(folio))
524984ff
MWO
5710 return NULL;
5711 return folio_file_page(folio, index);
87946a72
DN
5712}
5713
b1b0deab
CG
5714/**
5715 * mem_cgroup_move_account - move account of the page
5716 * @page: the page
25843c2b 5717 * @compound: charge the page as compound or small page
b1b0deab
CG
5718 * @from: mem_cgroup which the page is moved from.
5719 * @to: mem_cgroup which the page is moved to. @from != @to.
5720 *
4e0cf05f 5721 * The page must be locked and not on the LRU.
b1b0deab
CG
5722 *
5723 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5724 * from old cgroup.
5725 */
5726static int mem_cgroup_move_account(struct page *page,
f627c2f5 5727 bool compound,
b1b0deab
CG
5728 struct mem_cgroup *from,
5729 struct mem_cgroup *to)
5730{
fcce4672 5731 struct folio *folio = page_folio(page);
ae8af438
KK
5732 struct lruvec *from_vec, *to_vec;
5733 struct pglist_data *pgdat;
fcce4672 5734 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
8e88bd2d 5735 int nid, ret;
b1b0deab
CG
5736
5737 VM_BUG_ON(from == to);
4e0cf05f 5738 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
fcce4672 5739 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
9c325215 5740 VM_BUG_ON(compound && !folio_test_large(folio));
b1b0deab 5741
b1b0deab 5742 ret = -EINVAL;
fcce4672 5743 if (folio_memcg(folio) != from)
4e0cf05f 5744 goto out;
b1b0deab 5745
fcce4672 5746 pgdat = folio_pgdat(folio);
867e5e1d
JW
5747 from_vec = mem_cgroup_lruvec(from, pgdat);
5748 to_vec = mem_cgroup_lruvec(to, pgdat);
ae8af438 5749
fcce4672 5750 folio_memcg_lock(folio);
b1b0deab 5751
fcce4672
MWO
5752 if (folio_test_anon(folio)) {
5753 if (folio_mapped(folio)) {
be5d0a74
JW
5754 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5755 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
fcce4672 5756 if (folio_test_transhuge(folio)) {
69473e5d
MS
5757 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5758 -nr_pages);
5759 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5760 nr_pages);
468c3982 5761 }
be5d0a74
JW
5762 }
5763 } else {
0d1c2072
JW
5764 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5765 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5766
fcce4672 5767 if (folio_test_swapbacked(folio)) {
0d1c2072
JW
5768 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5769 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5770 }
5771
fcce4672 5772 if (folio_mapped(folio)) {
49e50d27
JW
5773 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5774 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5775 }
b1b0deab 5776
fcce4672
MWO
5777 if (folio_test_dirty(folio)) {
5778 struct address_space *mapping = folio_mapping(folio);
c4843a75 5779
f56753ac 5780 if (mapping_can_writeback(mapping)) {
49e50d27
JW
5781 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5782 -nr_pages);
5783 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5784 nr_pages);
5785 }
c4843a75
GT
5786 }
5787 }
5788
c449deb2
HD
5789#ifdef CONFIG_SWAP
5790 if (folio_test_swapcache(folio)) {
5791 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5792 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5793 }
5794#endif
fcce4672 5795 if (folio_test_writeback(folio)) {
ae8af438
KK
5796 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5797 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
b1b0deab
CG
5798 }
5799
5800 /*
abb242f5
JW
5801 * All state has been migrated, let's switch to the new memcg.
5802 *
bcfe06bf 5803 * It is safe to change page's memcg here because the page
abb242f5
JW
5804 * is referenced, charged, isolated, and locked: we can't race
5805 * with (un)charging, migration, LRU putback, or anything else
bcfe06bf 5806 * that would rely on a stable page's memory cgroup.
abb242f5
JW
5807 *
5808 * Note that lock_page_memcg is a memcg lock, not a page lock,
bcfe06bf 5809 * to save space. As soon as we switch page's memory cgroup to a
abb242f5
JW
5810 * new memcg that isn't locked, the above state can change
5811 * concurrently again. Make sure we're truly done with it.
b1b0deab 5812 */
abb242f5 5813 smp_mb();
b1b0deab 5814
1a3e1f40
JW
5815 css_get(&to->css);
5816 css_put(&from->css);
5817
fcce4672 5818 folio->memcg_data = (unsigned long)to;
87eaceb3 5819
f70ad448 5820 __folio_memcg_unlock(from);
b1b0deab
CG
5821
5822 ret = 0;
fcce4672 5823 nid = folio_nid(folio);
b1b0deab
CG
5824
5825 local_irq_disable();
6e0110c2 5826 mem_cgroup_charge_statistics(to, nr_pages);
8e88bd2d 5827 memcg_check_events(to, nid);
6e0110c2 5828 mem_cgroup_charge_statistics(from, -nr_pages);
8e88bd2d 5829 memcg_check_events(from, nid);
b1b0deab 5830 local_irq_enable();
b1b0deab
CG
5831out:
5832 return ret;
5833}
5834
7cf7806c
LR
5835/**
5836 * get_mctgt_type - get target type of moving charge
5837 * @vma: the vma the pte to be checked belongs
5838 * @addr: the address corresponding to the pte to be checked
5839 * @ptent: the pte to be checked
5840 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5841 *
5842 * Returns
5843 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5844 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5845 * move charge. if @target is not NULL, the page is stored in target->page
5846 * with extra refcnt got(Callers should handle it).
5847 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5848 * target for charge migration. if @target is not NULL, the entry is stored
5849 * in target->ent.
f25cbb7a
AS
5850 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is device memory and
5851 * thus not on the lru.
df6ad698
JG
5852 * For now we such page is charge like a regular page would be as for all
5853 * intent and purposes it is just special memory taking the place of a
5854 * regular page.
c733a828
JG
5855 *
5856 * See Documentations/vm/hmm.txt and include/linux/hmm.h
7cf7806c
LR
5857 *
5858 * Called with pte lock held.
5859 */
5860
8d32ff84 5861static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
5862 unsigned long addr, pte_t ptent, union mc_target *target)
5863{
5864 struct page *page = NULL;
8d32ff84 5865 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
5866 swp_entry_t ent = { .val = 0 };
5867
5868 if (pte_present(ptent))
5869 page = mc_handle_present_pte(vma, addr, ptent);
5c041f5d
PX
5870 else if (pte_none_mostly(ptent))
5871 /*
5872 * PTE markers should be treated as a none pte here, separated
5873 * from other swap handling below.
5874 */
5875 page = mc_handle_file_pte(vma, addr, ptent);
90254a65 5876 else if (is_swap_pte(ptent))
48406ef8 5877 page = mc_handle_swap_pte(vma, ptent, &ent);
90254a65 5878
4e0cf05f
JW
5879 if (target && page) {
5880 if (!trylock_page(page)) {
5881 put_page(page);
5882 return ret;
5883 }
5884 /*
5885 * page_mapped() must be stable during the move. This
5886 * pte is locked, so if it's present, the page cannot
5887 * become unmapped. If it isn't, we have only partial
5888 * control over the mapped state: the page lock will
5889 * prevent new faults against pagecache and swapcache,
5890 * so an unmapped page cannot become mapped. However,
5891 * if the page is already mapped elsewhere, it can
5892 * unmap, and there is nothing we can do about it.
5893 * Alas, skip moving the page in this case.
5894 */
5895 if (!pte_present(ptent) && page_mapped(page)) {
5896 unlock_page(page);
5897 put_page(page);
5898 return ret;
5899 }
5900 }
5901
90254a65 5902 if (!page && !ent.val)
8d32ff84 5903 return ret;
02491447 5904 if (page) {
02491447 5905 /*
0a31bc97 5906 * Do only loose check w/o serialization.
1306a85a 5907 * mem_cgroup_move_account() checks the page is valid or
0a31bc97 5908 * not under LRU exclusion.
02491447 5909 */
bcfe06bf 5910 if (page_memcg(page) == mc.from) {
02491447 5911 ret = MC_TARGET_PAGE;
f25cbb7a
AS
5912 if (is_device_private_page(page) ||
5913 is_device_coherent_page(page))
c733a828 5914 ret = MC_TARGET_DEVICE;
02491447
DN
5915 if (target)
5916 target->page = page;
5917 }
4e0cf05f
JW
5918 if (!ret || !target) {
5919 if (target)
5920 unlock_page(page);
02491447 5921 put_page(page);
4e0cf05f 5922 }
02491447 5923 }
3e14a57b
HY
5924 /*
5925 * There is a swap entry and a page doesn't exist or isn't charged.
5926 * But we cannot move a tail-page in a THP.
5927 */
5928 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
34c00c31 5929 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
5930 ret = MC_TARGET_SWAP;
5931 if (target)
5932 target->ent = ent;
4ffef5fe 5933 }
4ffef5fe
DN
5934 return ret;
5935}
5936
12724850
NH
5937#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5938/*
d6810d73
HY
5939 * We don't consider PMD mapped swapping or file mapped pages because THP does
5940 * not support them for now.
12724850
NH
5941 * Caller should make sure that pmd_trans_huge(pmd) is true.
5942 */
5943static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5944 unsigned long addr, pmd_t pmd, union mc_target *target)
5945{
5946 struct page *page = NULL;
12724850
NH
5947 enum mc_target_type ret = MC_TARGET_NONE;
5948
84c3fc4e
ZY
5949 if (unlikely(is_swap_pmd(pmd))) {
5950 VM_BUG_ON(thp_migration_supported() &&
5951 !is_pmd_migration_entry(pmd));
5952 return ret;
5953 }
12724850 5954 page = pmd_page(pmd);
309381fe 5955 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
1dfab5ab 5956 if (!(mc.flags & MOVE_ANON))
12724850 5957 return ret;
bcfe06bf 5958 if (page_memcg(page) == mc.from) {
12724850
NH
5959 ret = MC_TARGET_PAGE;
5960 if (target) {
5961 get_page(page);
4e0cf05f
JW
5962 if (!trylock_page(page)) {
5963 put_page(page);
5964 return MC_TARGET_NONE;
5965 }
12724850
NH
5966 target->page = page;
5967 }
5968 }
5969 return ret;
5970}
5971#else
5972static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5973 unsigned long addr, pmd_t pmd, union mc_target *target)
5974{
5975 return MC_TARGET_NONE;
5976}
5977#endif
5978
4ffef5fe
DN
5979static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5980 unsigned long addr, unsigned long end,
5981 struct mm_walk *walk)
5982{
26bcd64a 5983 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
5984 pte_t *pte;
5985 spinlock_t *ptl;
5986
b6ec57f4
KS
5987 ptl = pmd_trans_huge_lock(pmd, vma);
5988 if (ptl) {
c733a828
JG
5989 /*
5990 * Note their can not be MC_TARGET_DEVICE for now as we do not
25b2995a
CH
5991 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5992 * this might change.
c733a828 5993 */
12724850
NH
5994 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5995 mc.precharge += HPAGE_PMD_NR;
bf929152 5996 spin_unlock(ptl);
1a5a9906 5997 return 0;
12724850 5998 }
03319327 5999
45f83cef
AA
6000 if (pmd_trans_unstable(pmd))
6001 return 0;
4ffef5fe
DN
6002 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6003 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 6004 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
6005 mc.precharge++; /* increment precharge temporarily */
6006 pte_unmap_unlock(pte - 1, ptl);
6007 cond_resched();
6008
7dc74be0
DN
6009 return 0;
6010}
6011
7b86ac33
CH
6012static const struct mm_walk_ops precharge_walk_ops = {
6013 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6014};
6015
4ffef5fe
DN
6016static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6017{
6018 unsigned long precharge;
4ffef5fe 6019
d8ed45c5 6020 mmap_read_lock(mm);
ba0aff8e 6021 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
d8ed45c5 6022 mmap_read_unlock(mm);
4ffef5fe
DN
6023
6024 precharge = mc.precharge;
6025 mc.precharge = 0;
6026
6027 return precharge;
6028}
6029
4ffef5fe
DN
6030static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6031{
dfe076b0
DN
6032 unsigned long precharge = mem_cgroup_count_precharge(mm);
6033
6034 VM_BUG_ON(mc.moving_task);
6035 mc.moving_task = current;
6036 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
6037}
6038
dfe076b0
DN
6039/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6040static void __mem_cgroup_clear_mc(void)
4ffef5fe 6041{
2bd9bb20
KH
6042 struct mem_cgroup *from = mc.from;
6043 struct mem_cgroup *to = mc.to;
6044
4ffef5fe 6045 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 6046 if (mc.precharge) {
00501b53 6047 cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
6048 mc.precharge = 0;
6049 }
6050 /*
6051 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6052 * we must uncharge here.
6053 */
6054 if (mc.moved_charge) {
00501b53 6055 cancel_charge(mc.from, mc.moved_charge);
854ffa8d 6056 mc.moved_charge = 0;
4ffef5fe 6057 }
483c30b5
DN
6058 /* we must fixup refcnts and charges */
6059 if (mc.moved_swap) {
483c30b5 6060 /* uncharge swap account from the old cgroup */
ce00a967 6061 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 6062 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 6063
615d66c3
VD
6064 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6065
05b84301 6066 /*
3e32cb2e
JW
6067 * we charged both to->memory and to->memsw, so we
6068 * should uncharge to->memory.
05b84301 6069 */
ce00a967 6070 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
6071 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6072
483c30b5
DN
6073 mc.moved_swap = 0;
6074 }
dfe076b0
DN
6075 memcg_oom_recover(from);
6076 memcg_oom_recover(to);
6077 wake_up_all(&mc.waitq);
6078}
6079
6080static void mem_cgroup_clear_mc(void)
6081{
264a0ae1
TH
6082 struct mm_struct *mm = mc.mm;
6083
dfe076b0
DN
6084 /*
6085 * we must clear moving_task before waking up waiters at the end of
6086 * task migration.
6087 */
6088 mc.moving_task = NULL;
6089 __mem_cgroup_clear_mc();
2bd9bb20 6090 spin_lock(&mc.lock);
4ffef5fe
DN
6091 mc.from = NULL;
6092 mc.to = NULL;
264a0ae1 6093 mc.mm = NULL;
2bd9bb20 6094 spin_unlock(&mc.lock);
264a0ae1
TH
6095
6096 mmput(mm);
4ffef5fe
DN
6097}
6098
1f7dd3e5 6099static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
7dc74be0 6100{
1f7dd3e5 6101 struct cgroup_subsys_state *css;
eed67d75 6102 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
9f2115f9 6103 struct mem_cgroup *from;
4530eddb 6104 struct task_struct *leader, *p;
9f2115f9 6105 struct mm_struct *mm;
1dfab5ab 6106 unsigned long move_flags;
9f2115f9 6107 int ret = 0;
7dc74be0 6108
1f7dd3e5
TH
6109 /* charge immigration isn't supported on the default hierarchy */
6110 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
9f2115f9
TH
6111 return 0;
6112
4530eddb
TH
6113 /*
6114 * Multi-process migrations only happen on the default hierarchy
6115 * where charge immigration is not used. Perform charge
6116 * immigration if @tset contains a leader and whine if there are
6117 * multiple.
6118 */
6119 p = NULL;
1f7dd3e5 6120 cgroup_taskset_for_each_leader(leader, css, tset) {
4530eddb
TH
6121 WARN_ON_ONCE(p);
6122 p = leader;
1f7dd3e5 6123 memcg = mem_cgroup_from_css(css);
4530eddb
TH
6124 }
6125 if (!p)
6126 return 0;
6127
1f7dd3e5 6128 /*
f0953a1b 6129 * We are now committed to this value whatever it is. Changes in this
1f7dd3e5
TH
6130 * tunable will only affect upcoming migrations, not the current one.
6131 * So we need to save it, and keep it going.
6132 */
6133 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6134 if (!move_flags)
6135 return 0;
6136
9f2115f9
TH
6137 from = mem_cgroup_from_task(p);
6138
6139 VM_BUG_ON(from == memcg);
6140
6141 mm = get_task_mm(p);
6142 if (!mm)
6143 return 0;
6144 /* We move charges only when we move a owner of the mm */
6145 if (mm->owner == p) {
6146 VM_BUG_ON(mc.from);
6147 VM_BUG_ON(mc.to);
6148 VM_BUG_ON(mc.precharge);
6149 VM_BUG_ON(mc.moved_charge);
6150 VM_BUG_ON(mc.moved_swap);
6151
6152 spin_lock(&mc.lock);
264a0ae1 6153 mc.mm = mm;
9f2115f9
TH
6154 mc.from = from;
6155 mc.to = memcg;
6156 mc.flags = move_flags;
6157 spin_unlock(&mc.lock);
6158 /* We set mc.moving_task later */
6159
6160 ret = mem_cgroup_precharge_mc(mm);
6161 if (ret)
6162 mem_cgroup_clear_mc();
264a0ae1
TH
6163 } else {
6164 mmput(mm);
7dc74be0
DN
6165 }
6166 return ret;
6167}
6168
1f7dd3e5 6169static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
7dc74be0 6170{
4e2f245d
JW
6171 if (mc.to)
6172 mem_cgroup_clear_mc();
7dc74be0
DN
6173}
6174
4ffef5fe
DN
6175static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6176 unsigned long addr, unsigned long end,
6177 struct mm_walk *walk)
7dc74be0 6178{
4ffef5fe 6179 int ret = 0;
26bcd64a 6180 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6181 pte_t *pte;
6182 spinlock_t *ptl;
12724850
NH
6183 enum mc_target_type target_type;
6184 union mc_target target;
6185 struct page *page;
4ffef5fe 6186
b6ec57f4
KS
6187 ptl = pmd_trans_huge_lock(pmd, vma);
6188 if (ptl) {
62ade86a 6189 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 6190 spin_unlock(ptl);
12724850
NH
6191 return 0;
6192 }
6193 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6194 if (target_type == MC_TARGET_PAGE) {
6195 page = target.page;
f7f9c00d 6196 if (isolate_lru_page(page)) {
f627c2f5 6197 if (!mem_cgroup_move_account(page, true,
1306a85a 6198 mc.from, mc.to)) {
12724850
NH
6199 mc.precharge -= HPAGE_PMD_NR;
6200 mc.moved_charge += HPAGE_PMD_NR;
6201 }
6202 putback_lru_page(page);
6203 }
4e0cf05f 6204 unlock_page(page);
12724850 6205 put_page(page);
c733a828
JG
6206 } else if (target_type == MC_TARGET_DEVICE) {
6207 page = target.page;
6208 if (!mem_cgroup_move_account(page, true,
6209 mc.from, mc.to)) {
6210 mc.precharge -= HPAGE_PMD_NR;
6211 mc.moved_charge += HPAGE_PMD_NR;
6212 }
4e0cf05f 6213 unlock_page(page);
c733a828 6214 put_page(page);
12724850 6215 }
bf929152 6216 spin_unlock(ptl);
1a5a9906 6217 return 0;
12724850
NH
6218 }
6219
45f83cef
AA
6220 if (pmd_trans_unstable(pmd))
6221 return 0;
4ffef5fe
DN
6222retry:
6223 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6224 for (; addr != end; addr += PAGE_SIZE) {
6225 pte_t ptent = *(pte++);
c733a828 6226 bool device = false;
02491447 6227 swp_entry_t ent;
4ffef5fe
DN
6228
6229 if (!mc.precharge)
6230 break;
6231
8d32ff84 6232 switch (get_mctgt_type(vma, addr, ptent, &target)) {
c733a828
JG
6233 case MC_TARGET_DEVICE:
6234 device = true;
e4a9bc58 6235 fallthrough;
4ffef5fe
DN
6236 case MC_TARGET_PAGE:
6237 page = target.page;
53f9263b
KS
6238 /*
6239 * We can have a part of the split pmd here. Moving it
6240 * can be done but it would be too convoluted so simply
6241 * ignore such a partial THP and keep it in original
6242 * memcg. There should be somebody mapping the head.
6243 */
6244 if (PageTransCompound(page))
6245 goto put;
f7f9c00d 6246 if (!device && !isolate_lru_page(page))
4ffef5fe 6247 goto put;
f627c2f5
KS
6248 if (!mem_cgroup_move_account(page, false,
6249 mc.from, mc.to)) {
4ffef5fe 6250 mc.precharge--;
854ffa8d
DN
6251 /* we uncharge from mc.from later. */
6252 mc.moved_charge++;
4ffef5fe 6253 }
c733a828
JG
6254 if (!device)
6255 putback_lru_page(page);
4e0cf05f
JW
6256put: /* get_mctgt_type() gets & locks the page */
6257 unlock_page(page);
4ffef5fe
DN
6258 put_page(page);
6259 break;
02491447
DN
6260 case MC_TARGET_SWAP:
6261 ent = target.ent;
e91cbb42 6262 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 6263 mc.precharge--;
8d22a935
HD
6264 mem_cgroup_id_get_many(mc.to, 1);
6265 /* we fixup other refcnts and charges later. */
483c30b5
DN
6266 mc.moved_swap++;
6267 }
02491447 6268 break;
4ffef5fe
DN
6269 default:
6270 break;
6271 }
6272 }
6273 pte_unmap_unlock(pte - 1, ptl);
6274 cond_resched();
6275
6276 if (addr != end) {
6277 /*
6278 * We have consumed all precharges we got in can_attach().
6279 * We try charge one by one, but don't do any additional
6280 * charges to mc.to if we have failed in charge once in attach()
6281 * phase.
6282 */
854ffa8d 6283 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
6284 if (!ret)
6285 goto retry;
6286 }
6287
6288 return ret;
6289}
6290
7b86ac33
CH
6291static const struct mm_walk_ops charge_walk_ops = {
6292 .pmd_entry = mem_cgroup_move_charge_pte_range,
6293};
6294
264a0ae1 6295static void mem_cgroup_move_charge(void)
4ffef5fe 6296{
4ffef5fe 6297 lru_add_drain_all();
312722cb 6298 /*
81f8c3a4
JW
6299 * Signal lock_page_memcg() to take the memcg's move_lock
6300 * while we're moving its pages to another memcg. Then wait
6301 * for already started RCU-only updates to finish.
312722cb
JW
6302 */
6303 atomic_inc(&mc.from->moving_account);
6304 synchronize_rcu();
dfe076b0 6305retry:
d8ed45c5 6306 if (unlikely(!mmap_read_trylock(mc.mm))) {
dfe076b0 6307 /*
c1e8d7c6 6308 * Someone who are holding the mmap_lock might be waiting in
dfe076b0
DN
6309 * waitq. So we cancel all extra charges, wake up all waiters,
6310 * and retry. Because we cancel precharges, we might not be able
6311 * to move enough charges, but moving charge is a best-effort
6312 * feature anyway, so it wouldn't be a big problem.
6313 */
6314 __mem_cgroup_clear_mc();
6315 cond_resched();
6316 goto retry;
6317 }
26bcd64a
NH
6318 /*
6319 * When we have consumed all precharges and failed in doing
6320 * additional charge, the page walk just aborts.
6321 */
ba0aff8e 6322 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
d8ed45c5 6323 mmap_read_unlock(mc.mm);
312722cb 6324 atomic_dec(&mc.from->moving_account);
7dc74be0
DN
6325}
6326
264a0ae1 6327static void mem_cgroup_move_task(void)
67e465a7 6328{
264a0ae1
TH
6329 if (mc.to) {
6330 mem_cgroup_move_charge();
a433658c 6331 mem_cgroup_clear_mc();
264a0ae1 6332 }
67e465a7 6333}
5cfb80a7 6334#else /* !CONFIG_MMU */
1f7dd3e5 6335static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6336{
6337 return 0;
6338}
1f7dd3e5 6339static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6340{
6341}
264a0ae1 6342static void mem_cgroup_move_task(void)
5cfb80a7
DN
6343{
6344}
6345#endif
67e465a7 6346
bd74fdae
YZ
6347#ifdef CONFIG_LRU_GEN
6348static void mem_cgroup_attach(struct cgroup_taskset *tset)
6349{
6350 struct task_struct *task;
6351 struct cgroup_subsys_state *css;
6352
6353 /* find the first leader if there is any */
6354 cgroup_taskset_for_each_leader(task, css, tset)
6355 break;
6356
6357 if (!task)
6358 return;
6359
6360 task_lock(task);
6361 if (task->mm && READ_ONCE(task->mm->owner) == task)
6362 lru_gen_migrate_mm(task->mm);
6363 task_unlock(task);
6364}
6365#else
6366static void mem_cgroup_attach(struct cgroup_taskset *tset)
6367{
6368}
6369#endif /* CONFIG_LRU_GEN */
6370
677dc973
CD
6371static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6372{
6373 if (value == PAGE_COUNTER_MAX)
6374 seq_puts(m, "max\n");
6375 else
6376 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6377
6378 return 0;
6379}
6380
241994ed
JW
6381static u64 memory_current_read(struct cgroup_subsys_state *css,
6382 struct cftype *cft)
6383{
f5fc3c5d
JW
6384 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6385
6386 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
241994ed
JW
6387}
6388
8e20d4b3
GR
6389static u64 memory_peak_read(struct cgroup_subsys_state *css,
6390 struct cftype *cft)
6391{
6392 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6393
6394 return (u64)memcg->memory.watermark * PAGE_SIZE;
6395}
6396
bf8d5d52
RG
6397static int memory_min_show(struct seq_file *m, void *v)
6398{
677dc973
CD
6399 return seq_puts_memcg_tunable(m,
6400 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
bf8d5d52
RG
6401}
6402
6403static ssize_t memory_min_write(struct kernfs_open_file *of,
6404 char *buf, size_t nbytes, loff_t off)
6405{
6406 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6407 unsigned long min;
6408 int err;
6409
6410 buf = strstrip(buf);
6411 err = page_counter_memparse(buf, "max", &min);
6412 if (err)
6413 return err;
6414
6415 page_counter_set_min(&memcg->memory, min);
6416
6417 return nbytes;
6418}
6419
241994ed
JW
6420static int memory_low_show(struct seq_file *m, void *v)
6421{
677dc973
CD
6422 return seq_puts_memcg_tunable(m,
6423 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
241994ed
JW
6424}
6425
6426static ssize_t memory_low_write(struct kernfs_open_file *of,
6427 char *buf, size_t nbytes, loff_t off)
6428{
6429 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6430 unsigned long low;
6431 int err;
6432
6433 buf = strstrip(buf);
d2973697 6434 err = page_counter_memparse(buf, "max", &low);
241994ed
JW
6435 if (err)
6436 return err;
6437
23067153 6438 page_counter_set_low(&memcg->memory, low);
241994ed
JW
6439
6440 return nbytes;
6441}
6442
6443static int memory_high_show(struct seq_file *m, void *v)
6444{
d1663a90
JK
6445 return seq_puts_memcg_tunable(m,
6446 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
241994ed
JW
6447}
6448
6449static ssize_t memory_high_write(struct kernfs_open_file *of,
6450 char *buf, size_t nbytes, loff_t off)
6451{
6452 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6453 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
8c8c383c 6454 bool drained = false;
241994ed
JW
6455 unsigned long high;
6456 int err;
6457
6458 buf = strstrip(buf);
d2973697 6459 err = page_counter_memparse(buf, "max", &high);
241994ed
JW
6460 if (err)
6461 return err;
6462
e82553c1
JW
6463 page_counter_set_high(&memcg->memory, high);
6464
8c8c383c
JW
6465 for (;;) {
6466 unsigned long nr_pages = page_counter_read(&memcg->memory);
6467 unsigned long reclaimed;
6468
6469 if (nr_pages <= high)
6470 break;
6471
6472 if (signal_pending(current))
6473 break;
6474
6475 if (!drained) {
6476 drain_all_stock(memcg);
6477 drained = true;
6478 continue;
6479 }
6480
6481 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
55ab834a 6482 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
8c8c383c
JW
6483
6484 if (!reclaimed && !nr_retries--)
6485 break;
6486 }
588083bb 6487
19ce33ac 6488 memcg_wb_domain_size_changed(memcg);
241994ed
JW
6489 return nbytes;
6490}
6491
6492static int memory_max_show(struct seq_file *m, void *v)
6493{
677dc973
CD
6494 return seq_puts_memcg_tunable(m,
6495 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
241994ed
JW
6496}
6497
6498static ssize_t memory_max_write(struct kernfs_open_file *of,
6499 char *buf, size_t nbytes, loff_t off)
6500{
6501 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6502 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
b6e6edcf 6503 bool drained = false;
241994ed
JW
6504 unsigned long max;
6505 int err;
6506
6507 buf = strstrip(buf);
d2973697 6508 err = page_counter_memparse(buf, "max", &max);
241994ed
JW
6509 if (err)
6510 return err;
6511
bbec2e15 6512 xchg(&memcg->memory.max, max);
b6e6edcf
JW
6513
6514 for (;;) {
6515 unsigned long nr_pages = page_counter_read(&memcg->memory);
6516
6517 if (nr_pages <= max)
6518 break;
6519
7249c9f0 6520 if (signal_pending(current))
b6e6edcf 6521 break;
b6e6edcf
JW
6522
6523 if (!drained) {
6524 drain_all_stock(memcg);
6525 drained = true;
6526 continue;
6527 }
6528
6529 if (nr_reclaims) {
6530 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
55ab834a 6531 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
b6e6edcf
JW
6532 nr_reclaims--;
6533 continue;
6534 }
6535
e27be240 6536 memcg_memory_event(memcg, MEMCG_OOM);
b6e6edcf
JW
6537 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6538 break;
6539 }
241994ed 6540
2529bb3a 6541 memcg_wb_domain_size_changed(memcg);
241994ed
JW
6542 return nbytes;
6543}
6544
1e577f97
SB
6545static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6546{
6547 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6548 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6549 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6550 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6551 seq_printf(m, "oom_kill %lu\n",
6552 atomic_long_read(&events[MEMCG_OOM_KILL]));
b6bf9abb
DS
6553 seq_printf(m, "oom_group_kill %lu\n",
6554 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
1e577f97
SB
6555}
6556
241994ed
JW
6557static int memory_events_show(struct seq_file *m, void *v)
6558{
aa9694bb 6559 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6560
1e577f97
SB
6561 __memory_events_show(m, memcg->memory_events);
6562 return 0;
6563}
6564
6565static int memory_events_local_show(struct seq_file *m, void *v)
6566{
6567 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6568
1e577f97 6569 __memory_events_show(m, memcg->memory_events_local);
241994ed
JW
6570 return 0;
6571}
6572
587d9f72
JW
6573static int memory_stat_show(struct seq_file *m, void *v)
6574{
aa9694bb 6575 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
68aaee14 6576 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1ff9e6e1 6577
c8713d0b
JW
6578 if (!buf)
6579 return -ENOMEM;
68aaee14 6580 memory_stat_format(memcg, buf, PAGE_SIZE);
c8713d0b
JW
6581 seq_puts(m, buf);
6582 kfree(buf);
587d9f72
JW
6583 return 0;
6584}
6585
5f9a4f4a 6586#ifdef CONFIG_NUMA
fff66b79
MS
6587static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6588 int item)
6589{
6590 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6591}
6592
5f9a4f4a
MS
6593static int memory_numa_stat_show(struct seq_file *m, void *v)
6594{
6595 int i;
6596 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6597
fd25a9e0 6598 mem_cgroup_flush_stats();
7e1c0d6f 6599
5f9a4f4a
MS
6600 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6601 int nid;
6602
6603 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6604 continue;
6605
6606 seq_printf(m, "%s", memory_stats[i].name);
6607 for_each_node_state(nid, N_MEMORY) {
6608 u64 size;
6609 struct lruvec *lruvec;
6610
6611 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
fff66b79
MS
6612 size = lruvec_page_state_output(lruvec,
6613 memory_stats[i].idx);
5f9a4f4a
MS
6614 seq_printf(m, " N%d=%llu", nid, size);
6615 }
6616 seq_putc(m, '\n');
6617 }
6618
6619 return 0;
6620}
6621#endif
6622
3d8b38eb
RG
6623static int memory_oom_group_show(struct seq_file *m, void *v)
6624{
aa9694bb 6625 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3d8b38eb 6626
eaf7b66b 6627 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
3d8b38eb
RG
6628
6629 return 0;
6630}
6631
6632static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6633 char *buf, size_t nbytes, loff_t off)
6634{
6635 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6636 int ret, oom_group;
6637
6638 buf = strstrip(buf);
6639 if (!buf)
6640 return -EINVAL;
6641
6642 ret = kstrtoint(buf, 0, &oom_group);
6643 if (ret)
6644 return ret;
6645
6646 if (oom_group != 0 && oom_group != 1)
6647 return -EINVAL;
6648
eaf7b66b 6649 WRITE_ONCE(memcg->oom_group, oom_group);
3d8b38eb
RG
6650
6651 return nbytes;
6652}
6653
94968384
SB
6654static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6655 size_t nbytes, loff_t off)
6656{
6657 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6658 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6659 unsigned long nr_to_reclaim, nr_reclaimed = 0;
55ab834a
MH
6660 unsigned int reclaim_options;
6661 int err;
12a5d395
MA
6662
6663 buf = strstrip(buf);
55ab834a
MH
6664 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6665 if (err)
6666 return err;
12a5d395 6667
55ab834a 6668 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
94968384
SB
6669 while (nr_reclaimed < nr_to_reclaim) {
6670 unsigned long reclaimed;
6671
6672 if (signal_pending(current))
6673 return -EINTR;
6674
6675 /*
6676 * This is the final attempt, drain percpu lru caches in the
6677 * hope of introducing more evictable pages for
6678 * try_to_free_mem_cgroup_pages().
6679 */
6680 if (!nr_retries)
6681 lru_add_drain_all();
6682
6683 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6684 nr_to_reclaim - nr_reclaimed,
55ab834a 6685 GFP_KERNEL, reclaim_options);
94968384
SB
6686
6687 if (!reclaimed && !nr_retries--)
6688 return -EAGAIN;
6689
6690 nr_reclaimed += reclaimed;
6691 }
6692
6693 return nbytes;
6694}
6695
241994ed
JW
6696static struct cftype memory_files[] = {
6697 {
6698 .name = "current",
f5fc3c5d 6699 .flags = CFTYPE_NOT_ON_ROOT,
241994ed
JW
6700 .read_u64 = memory_current_read,
6701 },
8e20d4b3
GR
6702 {
6703 .name = "peak",
6704 .flags = CFTYPE_NOT_ON_ROOT,
6705 .read_u64 = memory_peak_read,
6706 },
bf8d5d52
RG
6707 {
6708 .name = "min",
6709 .flags = CFTYPE_NOT_ON_ROOT,
6710 .seq_show = memory_min_show,
6711 .write = memory_min_write,
6712 },
241994ed
JW
6713 {
6714 .name = "low",
6715 .flags = CFTYPE_NOT_ON_ROOT,
6716 .seq_show = memory_low_show,
6717 .write = memory_low_write,
6718 },
6719 {
6720 .name = "high",
6721 .flags = CFTYPE_NOT_ON_ROOT,
6722 .seq_show = memory_high_show,
6723 .write = memory_high_write,
6724 },
6725 {
6726 .name = "max",
6727 .flags = CFTYPE_NOT_ON_ROOT,
6728 .seq_show = memory_max_show,
6729 .write = memory_max_write,
6730 },
6731 {
6732 .name = "events",
6733 .flags = CFTYPE_NOT_ON_ROOT,
472912a2 6734 .file_offset = offsetof(struct mem_cgroup, events_file),
241994ed
JW
6735 .seq_show = memory_events_show,
6736 },
1e577f97
SB
6737 {
6738 .name = "events.local",
6739 .flags = CFTYPE_NOT_ON_ROOT,
6740 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6741 .seq_show = memory_events_local_show,
6742 },
587d9f72
JW
6743 {
6744 .name = "stat",
587d9f72
JW
6745 .seq_show = memory_stat_show,
6746 },
5f9a4f4a
MS
6747#ifdef CONFIG_NUMA
6748 {
6749 .name = "numa_stat",
6750 .seq_show = memory_numa_stat_show,
6751 },
6752#endif
3d8b38eb
RG
6753 {
6754 .name = "oom.group",
6755 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6756 .seq_show = memory_oom_group_show,
6757 .write = memory_oom_group_write,
6758 },
94968384
SB
6759 {
6760 .name = "reclaim",
6761 .flags = CFTYPE_NS_DELEGATABLE,
6762 .write = memory_reclaim,
6763 },
241994ed
JW
6764 { } /* terminate */
6765};
6766
073219e9 6767struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 6768 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 6769 .css_online = mem_cgroup_css_online,
92fb9748 6770 .css_offline = mem_cgroup_css_offline,
6df38689 6771 .css_released = mem_cgroup_css_released,
92fb9748 6772 .css_free = mem_cgroup_css_free,
1ced953b 6773 .css_reset = mem_cgroup_css_reset,
2d146aa3 6774 .css_rstat_flush = mem_cgroup_css_rstat_flush,
7dc74be0 6775 .can_attach = mem_cgroup_can_attach,
bd74fdae 6776 .attach = mem_cgroup_attach,
7dc74be0 6777 .cancel_attach = mem_cgroup_cancel_attach,
264a0ae1 6778 .post_attach = mem_cgroup_move_task,
241994ed
JW
6779 .dfl_cftypes = memory_files,
6780 .legacy_cftypes = mem_cgroup_legacy_files,
6d12e2d8 6781 .early_init = 0,
8cdea7c0 6782};
c077719b 6783
bc50bcc6
JW
6784/*
6785 * This function calculates an individual cgroup's effective
6786 * protection which is derived from its own memory.min/low, its
6787 * parent's and siblings' settings, as well as the actual memory
6788 * distribution in the tree.
6789 *
6790 * The following rules apply to the effective protection values:
6791 *
6792 * 1. At the first level of reclaim, effective protection is equal to
6793 * the declared protection in memory.min and memory.low.
6794 *
6795 * 2. To enable safe delegation of the protection configuration, at
6796 * subsequent levels the effective protection is capped to the
6797 * parent's effective protection.
6798 *
6799 * 3. To make complex and dynamic subtrees easier to configure, the
6800 * user is allowed to overcommit the declared protection at a given
6801 * level. If that is the case, the parent's effective protection is
6802 * distributed to the children in proportion to how much protection
6803 * they have declared and how much of it they are utilizing.
6804 *
6805 * This makes distribution proportional, but also work-conserving:
6806 * if one cgroup claims much more protection than it uses memory,
6807 * the unused remainder is available to its siblings.
6808 *
6809 * 4. Conversely, when the declared protection is undercommitted at a
6810 * given level, the distribution of the larger parental protection
6811 * budget is NOT proportional. A cgroup's protection from a sibling
6812 * is capped to its own memory.min/low setting.
6813 *
8a931f80
JW
6814 * 5. However, to allow protecting recursive subtrees from each other
6815 * without having to declare each individual cgroup's fixed share
6816 * of the ancestor's claim to protection, any unutilized -
6817 * "floating" - protection from up the tree is distributed in
6818 * proportion to each cgroup's *usage*. This makes the protection
6819 * neutral wrt sibling cgroups and lets them compete freely over
6820 * the shared parental protection budget, but it protects the
6821 * subtree as a whole from neighboring subtrees.
6822 *
6823 * Note that 4. and 5. are not in conflict: 4. is about protecting
6824 * against immediate siblings whereas 5. is about protecting against
6825 * neighboring subtrees.
bc50bcc6
JW
6826 */
6827static unsigned long effective_protection(unsigned long usage,
8a931f80 6828 unsigned long parent_usage,
bc50bcc6
JW
6829 unsigned long setting,
6830 unsigned long parent_effective,
6831 unsigned long siblings_protected)
6832{
6833 unsigned long protected;
8a931f80 6834 unsigned long ep;
bc50bcc6
JW
6835
6836 protected = min(usage, setting);
6837 /*
6838 * If all cgroups at this level combined claim and use more
6839 * protection then what the parent affords them, distribute
6840 * shares in proportion to utilization.
6841 *
6842 * We are using actual utilization rather than the statically
6843 * claimed protection in order to be work-conserving: claimed
6844 * but unused protection is available to siblings that would
6845 * otherwise get a smaller chunk than what they claimed.
6846 */
6847 if (siblings_protected > parent_effective)
6848 return protected * parent_effective / siblings_protected;
6849
6850 /*
6851 * Ok, utilized protection of all children is within what the
6852 * parent affords them, so we know whatever this child claims
6853 * and utilizes is effectively protected.
6854 *
6855 * If there is unprotected usage beyond this value, reclaim
6856 * will apply pressure in proportion to that amount.
6857 *
6858 * If there is unutilized protection, the cgroup will be fully
6859 * shielded from reclaim, but we do return a smaller value for
6860 * protection than what the group could enjoy in theory. This
6861 * is okay. With the overcommit distribution above, effective
6862 * protection is always dependent on how memory is actually
6863 * consumed among the siblings anyway.
6864 */
8a931f80
JW
6865 ep = protected;
6866
6867 /*
6868 * If the children aren't claiming (all of) the protection
6869 * afforded to them by the parent, distribute the remainder in
6870 * proportion to the (unprotected) memory of each cgroup. That
6871 * way, cgroups that aren't explicitly prioritized wrt each
6872 * other compete freely over the allowance, but they are
6873 * collectively protected from neighboring trees.
6874 *
6875 * We're using unprotected memory for the weight so that if
6876 * some cgroups DO claim explicit protection, we don't protect
6877 * the same bytes twice.
cd324edc
JW
6878 *
6879 * Check both usage and parent_usage against the respective
6880 * protected values. One should imply the other, but they
6881 * aren't read atomically - make sure the division is sane.
8a931f80
JW
6882 */
6883 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6884 return ep;
cd324edc
JW
6885 if (parent_effective > siblings_protected &&
6886 parent_usage > siblings_protected &&
6887 usage > protected) {
8a931f80
JW
6888 unsigned long unclaimed;
6889
6890 unclaimed = parent_effective - siblings_protected;
6891 unclaimed *= usage - protected;
6892 unclaimed /= parent_usage - siblings_protected;
6893
6894 ep += unclaimed;
6895 }
6896
6897 return ep;
bc50bcc6
JW
6898}
6899
241994ed 6900/**
05395718 6901 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
34c81057 6902 * @root: the top ancestor of the sub-tree being checked
241994ed
JW
6903 * @memcg: the memory cgroup to check
6904 *
23067153
RG
6905 * WARNING: This function is not stateless! It can only be used as part
6906 * of a top-down tree iteration, not for isolated queries.
241994ed 6907 */
45c7f7e1
CD
6908void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6909 struct mem_cgroup *memcg)
241994ed 6910{
8a931f80 6911 unsigned long usage, parent_usage;
23067153
RG
6912 struct mem_cgroup *parent;
6913
241994ed 6914 if (mem_cgroup_disabled())
45c7f7e1 6915 return;
241994ed 6916
34c81057
SC
6917 if (!root)
6918 root = root_mem_cgroup;
22f7496f
YS
6919
6920 /*
6921 * Effective values of the reclaim targets are ignored so they
6922 * can be stale. Have a look at mem_cgroup_protection for more
6923 * details.
6924 * TODO: calculation should be more robust so that we do not need
6925 * that special casing.
6926 */
34c81057 6927 if (memcg == root)
45c7f7e1 6928 return;
241994ed 6929
23067153 6930 usage = page_counter_read(&memcg->memory);
bf8d5d52 6931 if (!usage)
45c7f7e1 6932 return;
bf8d5d52 6933
bf8d5d52 6934 parent = parent_mem_cgroup(memcg);
df2a4196 6935
bc50bcc6 6936 if (parent == root) {
c3d53200 6937 memcg->memory.emin = READ_ONCE(memcg->memory.min);
03960e33 6938 memcg->memory.elow = READ_ONCE(memcg->memory.low);
45c7f7e1 6939 return;
bf8d5d52
RG
6940 }
6941
8a931f80
JW
6942 parent_usage = page_counter_read(&parent->memory);
6943
b3a7822e 6944 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
c3d53200
CD
6945 READ_ONCE(memcg->memory.min),
6946 READ_ONCE(parent->memory.emin),
b3a7822e 6947 atomic_long_read(&parent->memory.children_min_usage)));
23067153 6948
b3a7822e 6949 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
03960e33
CD
6950 READ_ONCE(memcg->memory.low),
6951 READ_ONCE(parent->memory.elow),
b3a7822e 6952 atomic_long_read(&parent->memory.children_low_usage)));
241994ed
JW
6953}
6954
8f425e4e
MWO
6955static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6956 gfp_t gfp)
0add0c77 6957{
118f2875 6958 long nr_pages = folio_nr_pages(folio);
0add0c77
SB
6959 int ret;
6960
6961 ret = try_charge(memcg, gfp, nr_pages);
6962 if (ret)
6963 goto out;
6964
6965 css_get(&memcg->css);
118f2875 6966 commit_charge(folio, memcg);
0add0c77
SB
6967
6968 local_irq_disable();
6e0110c2 6969 mem_cgroup_charge_statistics(memcg, nr_pages);
8f425e4e 6970 memcg_check_events(memcg, folio_nid(folio));
0add0c77
SB
6971 local_irq_enable();
6972out:
6973 return ret;
6974}
6975
8f425e4e 6976int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
00501b53 6977{
0add0c77
SB
6978 struct mem_cgroup *memcg;
6979 int ret;
00501b53 6980
0add0c77 6981 memcg = get_mem_cgroup_from_mm(mm);
8f425e4e 6982 ret = charge_memcg(folio, memcg, gfp);
0add0c77 6983 css_put(&memcg->css);
2d1c4980 6984
0add0c77
SB
6985 return ret;
6986}
e993d905 6987
0add0c77 6988/**
65995918
MWO
6989 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
6990 * @folio: folio to charge.
0add0c77
SB
6991 * @mm: mm context of the victim
6992 * @gfp: reclaim mode
65995918 6993 * @entry: swap entry for which the folio is allocated
0add0c77 6994 *
65995918
MWO
6995 * This function charges a folio allocated for swapin. Please call this before
6996 * adding the folio to the swapcache.
0add0c77
SB
6997 *
6998 * Returns 0 on success. Otherwise, an error code is returned.
6999 */
65995918 7000int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
0add0c77
SB
7001 gfp_t gfp, swp_entry_t entry)
7002{
7003 struct mem_cgroup *memcg;
7004 unsigned short id;
7005 int ret;
00501b53 7006
0add0c77
SB
7007 if (mem_cgroup_disabled())
7008 return 0;
00501b53 7009
0add0c77
SB
7010 id = lookup_swap_cgroup_id(entry);
7011 rcu_read_lock();
7012 memcg = mem_cgroup_from_id(id);
7013 if (!memcg || !css_tryget_online(&memcg->css))
7014 memcg = get_mem_cgroup_from_mm(mm);
7015 rcu_read_unlock();
00501b53 7016
8f425e4e 7017 ret = charge_memcg(folio, memcg, gfp);
6abb5a86 7018
0add0c77
SB
7019 css_put(&memcg->css);
7020 return ret;
7021}
00501b53 7022
0add0c77
SB
7023/*
7024 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7025 * @entry: swap entry for which the page is charged
7026 *
7027 * Call this function after successfully adding the charged page to swapcache.
7028 *
7029 * Note: This function assumes the page for which swap slot is being uncharged
7030 * is order 0 page.
7031 */
7032void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7033{
cae3af62
MS
7034 /*
7035 * Cgroup1's unified memory+swap counter has been charged with the
7036 * new swapcache page, finish the transfer by uncharging the swap
7037 * slot. The swap slot would also get uncharged when it dies, but
7038 * it can stick around indefinitely and we'd count the page twice
7039 * the entire time.
7040 *
7041 * Cgroup2 has separate resource counters for memory and swap,
7042 * so this is a non-issue here. Memory and swap charge lifetimes
7043 * correspond 1:1 to page and swap slot lifetimes: we charge the
7044 * page to memory here, and uncharge swap when the slot is freed.
7045 */
0add0c77 7046 if (!mem_cgroup_disabled() && do_memsw_account()) {
00501b53
JW
7047 /*
7048 * The swap entry might not get freed for a long time,
7049 * let's not wait for it. The page already received a
7050 * memory+swap charge, drop the swap entry duplicate.
7051 */
0add0c77 7052 mem_cgroup_uncharge_swap(entry, 1);
00501b53 7053 }
3fea5a49
JW
7054}
7055
a9d5adee
JG
7056struct uncharge_gather {
7057 struct mem_cgroup *memcg;
b4e0b68f 7058 unsigned long nr_memory;
a9d5adee 7059 unsigned long pgpgout;
a9d5adee 7060 unsigned long nr_kmem;
8e88bd2d 7061 int nid;
a9d5adee
JG
7062};
7063
7064static inline void uncharge_gather_clear(struct uncharge_gather *ug)
747db954 7065{
a9d5adee
JG
7066 memset(ug, 0, sizeof(*ug));
7067}
7068
7069static void uncharge_batch(const struct uncharge_gather *ug)
7070{
747db954
JW
7071 unsigned long flags;
7072
b4e0b68f
MS
7073 if (ug->nr_memory) {
7074 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7941d214 7075 if (do_memsw_account())
b4e0b68f 7076 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
a8c49af3
YA
7077 if (ug->nr_kmem)
7078 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
a9d5adee 7079 memcg_oom_recover(ug->memcg);
ce00a967 7080 }
747db954
JW
7081
7082 local_irq_save(flags);
c9019e9b 7083 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
b4e0b68f 7084 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
8e88bd2d 7085 memcg_check_events(ug->memcg, ug->nid);
747db954 7086 local_irq_restore(flags);
f1796544 7087
c4ed6ebf 7088 /* drop reference from uncharge_folio */
f1796544 7089 css_put(&ug->memcg->css);
a9d5adee
JG
7090}
7091
c4ed6ebf 7092static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
a9d5adee 7093{
c4ed6ebf 7094 long nr_pages;
b4e0b68f
MS
7095 struct mem_cgroup *memcg;
7096 struct obj_cgroup *objcg;
9f762dbe 7097
c4ed6ebf 7098 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
a9d5adee 7099
a9d5adee
JG
7100 /*
7101 * Nobody should be changing or seriously looking at
c4ed6ebf
MWO
7102 * folio memcg or objcg at this point, we have fully
7103 * exclusive access to the folio.
a9d5adee 7104 */
fead2b86 7105 if (folio_memcg_kmem(folio)) {
1b7e4464 7106 objcg = __folio_objcg(folio);
b4e0b68f
MS
7107 /*
7108 * This get matches the put at the end of the function and
7109 * kmem pages do not hold memcg references anymore.
7110 */
7111 memcg = get_mem_cgroup_from_objcg(objcg);
7112 } else {
1b7e4464 7113 memcg = __folio_memcg(folio);
b4e0b68f 7114 }
a9d5adee 7115
b4e0b68f
MS
7116 if (!memcg)
7117 return;
7118
7119 if (ug->memcg != memcg) {
a9d5adee
JG
7120 if (ug->memcg) {
7121 uncharge_batch(ug);
7122 uncharge_gather_clear(ug);
7123 }
b4e0b68f 7124 ug->memcg = memcg;
c4ed6ebf 7125 ug->nid = folio_nid(folio);
f1796544
MH
7126
7127 /* pairs with css_put in uncharge_batch */
b4e0b68f 7128 css_get(&memcg->css);
a9d5adee
JG
7129 }
7130
c4ed6ebf 7131 nr_pages = folio_nr_pages(folio);
a9d5adee 7132
fead2b86 7133 if (folio_memcg_kmem(folio)) {
b4e0b68f 7134 ug->nr_memory += nr_pages;
9f762dbe 7135 ug->nr_kmem += nr_pages;
b4e0b68f 7136
c4ed6ebf 7137 folio->memcg_data = 0;
b4e0b68f
MS
7138 obj_cgroup_put(objcg);
7139 } else {
7140 /* LRU pages aren't accounted at the root level */
7141 if (!mem_cgroup_is_root(memcg))
7142 ug->nr_memory += nr_pages;
18b2db3b 7143 ug->pgpgout++;
a9d5adee 7144
c4ed6ebf 7145 folio->memcg_data = 0;
b4e0b68f
MS
7146 }
7147
7148 css_put(&memcg->css);
747db954
JW
7149}
7150
bbc6b703 7151void __mem_cgroup_uncharge(struct folio *folio)
0a31bc97 7152{
a9d5adee
JG
7153 struct uncharge_gather ug;
7154
bbc6b703
MWO
7155 /* Don't touch folio->lru of any random page, pre-check: */
7156 if (!folio_memcg(folio))
0a31bc97
JW
7157 return;
7158
a9d5adee 7159 uncharge_gather_clear(&ug);
bbc6b703 7160 uncharge_folio(folio, &ug);
a9d5adee 7161 uncharge_batch(&ug);
747db954 7162}
0a31bc97 7163
747db954 7164/**
2c8d8f97 7165 * __mem_cgroup_uncharge_list - uncharge a list of page
747db954
JW
7166 * @page_list: list of pages to uncharge
7167 *
7168 * Uncharge a list of pages previously charged with
2c8d8f97 7169 * __mem_cgroup_charge().
747db954 7170 */
2c8d8f97 7171void __mem_cgroup_uncharge_list(struct list_head *page_list)
747db954 7172{
c41a40b6 7173 struct uncharge_gather ug;
c4ed6ebf 7174 struct folio *folio;
c41a40b6 7175
c41a40b6 7176 uncharge_gather_clear(&ug);
c4ed6ebf
MWO
7177 list_for_each_entry(folio, page_list, lru)
7178 uncharge_folio(folio, &ug);
c41a40b6
MS
7179 if (ug.memcg)
7180 uncharge_batch(&ug);
0a31bc97
JW
7181}
7182
7183/**
d21bba2b
MWO
7184 * mem_cgroup_migrate - Charge a folio's replacement.
7185 * @old: Currently circulating folio.
7186 * @new: Replacement folio.
0a31bc97 7187 *
d21bba2b 7188 * Charge @new as a replacement folio for @old. @old will
6a93ca8f 7189 * be uncharged upon free.
0a31bc97 7190 *
d21bba2b 7191 * Both folios must be locked, @new->mapping must be set up.
0a31bc97 7192 */
d21bba2b 7193void mem_cgroup_migrate(struct folio *old, struct folio *new)
0a31bc97 7194{
29833315 7195 struct mem_cgroup *memcg;
d21bba2b 7196 long nr_pages = folio_nr_pages(new);
d93c4130 7197 unsigned long flags;
0a31bc97 7198
d21bba2b
MWO
7199 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7200 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7201 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7202 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
0a31bc97
JW
7203
7204 if (mem_cgroup_disabled())
7205 return;
7206
d21bba2b
MWO
7207 /* Page cache replacement: new folio already charged? */
7208 if (folio_memcg(new))
0a31bc97
JW
7209 return;
7210
d21bba2b
MWO
7211 memcg = folio_memcg(old);
7212 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
29833315 7213 if (!memcg)
0a31bc97
JW
7214 return;
7215
44b7a8d3 7216 /* Force-charge the new page. The old one will be freed soon */
8dc87c7d
MS
7217 if (!mem_cgroup_is_root(memcg)) {
7218 page_counter_charge(&memcg->memory, nr_pages);
7219 if (do_memsw_account())
7220 page_counter_charge(&memcg->memsw, nr_pages);
7221 }
0a31bc97 7222
1a3e1f40 7223 css_get(&memcg->css);
d21bba2b 7224 commit_charge(new, memcg);
44b7a8d3 7225
d93c4130 7226 local_irq_save(flags);
6e0110c2 7227 mem_cgroup_charge_statistics(memcg, nr_pages);
d21bba2b 7228 memcg_check_events(memcg, folio_nid(new));
d93c4130 7229 local_irq_restore(flags);
0a31bc97
JW
7230}
7231
ef12947c 7232DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
11092087
JW
7233EXPORT_SYMBOL(memcg_sockets_enabled_key);
7234
2d758073 7235void mem_cgroup_sk_alloc(struct sock *sk)
11092087
JW
7236{
7237 struct mem_cgroup *memcg;
7238
2d758073
JW
7239 if (!mem_cgroup_sockets_enabled)
7240 return;
7241
e876ecc6 7242 /* Do not associate the sock with unrelated interrupted task's memcg. */
086f694a 7243 if (!in_task())
e876ecc6
SB
7244 return;
7245
11092087
JW
7246 rcu_read_lock();
7247 memcg = mem_cgroup_from_task(current);
7848ed62 7248 if (mem_cgroup_is_root(memcg))
f7e1cb6e 7249 goto out;
0db15298 7250 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
f7e1cb6e 7251 goto out;
8965aa28 7252 if (css_tryget(&memcg->css))
11092087 7253 sk->sk_memcg = memcg;
f7e1cb6e 7254out:
11092087
JW
7255 rcu_read_unlock();
7256}
11092087 7257
2d758073 7258void mem_cgroup_sk_free(struct sock *sk)
11092087 7259{
2d758073
JW
7260 if (sk->sk_memcg)
7261 css_put(&sk->sk_memcg->css);
11092087
JW
7262}
7263
7264/**
7265 * mem_cgroup_charge_skmem - charge socket memory
7266 * @memcg: memcg to charge
7267 * @nr_pages: number of pages to charge
4b1327be 7268 * @gfp_mask: reclaim mode
11092087
JW
7269 *
7270 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4b1327be 7271 * @memcg's configured limit, %false if it doesn't.
11092087 7272 */
4b1327be
WW
7273bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7274 gfp_t gfp_mask)
11092087 7275{
f7e1cb6e 7276 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7277 struct page_counter *fail;
f7e1cb6e 7278
0db15298
JW
7279 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7280 memcg->tcpmem_pressure = 0;
f7e1cb6e
JW
7281 return true;
7282 }
0db15298 7283 memcg->tcpmem_pressure = 1;
4b1327be
WW
7284 if (gfp_mask & __GFP_NOFAIL) {
7285 page_counter_charge(&memcg->tcpmem, nr_pages);
7286 return true;
7287 }
f7e1cb6e 7288 return false;
11092087 7289 }
d886f4e4 7290
4b1327be
WW
7291 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7292 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
f7e1cb6e 7293 return true;
4b1327be 7294 }
f7e1cb6e 7295
11092087
JW
7296 return false;
7297}
7298
7299/**
7300 * mem_cgroup_uncharge_skmem - uncharge socket memory
b7701a5f
MR
7301 * @memcg: memcg to uncharge
7302 * @nr_pages: number of pages to uncharge
11092087
JW
7303 */
7304void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7305{
f7e1cb6e 7306 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7307 page_counter_uncharge(&memcg->tcpmem, nr_pages);
f7e1cb6e
JW
7308 return;
7309 }
d886f4e4 7310
c9019e9b 7311 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
b2807f07 7312
475d0487 7313 refill_stock(memcg, nr_pages);
11092087
JW
7314}
7315
f7e1cb6e
JW
7316static int __init cgroup_memory(char *s)
7317{
7318 char *token;
7319
7320 while ((token = strsep(&s, ",")) != NULL) {
7321 if (!*token)
7322 continue;
7323 if (!strcmp(token, "nosocket"))
7324 cgroup_memory_nosocket = true;
04823c83
VD
7325 if (!strcmp(token, "nokmem"))
7326 cgroup_memory_nokmem = true;
b6c1a8af
YS
7327 if (!strcmp(token, "nobpf"))
7328 cgroup_memory_nobpf = true;
f7e1cb6e 7329 }
460a79e1 7330 return 1;
f7e1cb6e
JW
7331}
7332__setup("cgroup.memory=", cgroup_memory);
11092087 7333
2d11085e 7334/*
1081312f
MH
7335 * subsys_initcall() for memory controller.
7336 *
308167fc
SAS
7337 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7338 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7339 * basically everything that doesn't depend on a specific mem_cgroup structure
7340 * should be initialized from here.
2d11085e
MH
7341 */
7342static int __init mem_cgroup_init(void)
7343{
95a045f6
JW
7344 int cpu, node;
7345
f3344adf
MS
7346 /*
7347 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7348 * used for per-memcg-per-cpu caching of per-node statistics. In order
7349 * to work fine, we should make sure that the overfill threshold can't
7350 * exceed S32_MAX / PAGE_SIZE.
7351 */
7352 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7353
308167fc
SAS
7354 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7355 memcg_hotplug_cpu_dead);
95a045f6
JW
7356
7357 for_each_possible_cpu(cpu)
7358 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7359 drain_local_stock);
7360
7361 for_each_node(node) {
7362 struct mem_cgroup_tree_per_node *rtpn;
95a045f6
JW
7363
7364 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7365 node_online(node) ? node : NUMA_NO_NODE);
7366
ef8f2327 7367 rtpn->rb_root = RB_ROOT;
fa90b2fd 7368 rtpn->rb_rightmost = NULL;
ef8f2327 7369 spin_lock_init(&rtpn->lock);
95a045f6
JW
7370 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7371 }
7372
2d11085e
MH
7373 return 0;
7374}
7375subsys_initcall(mem_cgroup_init);
21afa38e 7376
e55b9f96 7377#ifdef CONFIG_SWAP
358c07fc
AB
7378static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7379{
1c2d479a 7380 while (!refcount_inc_not_zero(&memcg->id.ref)) {
358c07fc
AB
7381 /*
7382 * The root cgroup cannot be destroyed, so it's refcount must
7383 * always be >= 1.
7384 */
7848ed62 7385 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
358c07fc
AB
7386 VM_BUG_ON(1);
7387 break;
7388 }
7389 memcg = parent_mem_cgroup(memcg);
7390 if (!memcg)
7391 memcg = root_mem_cgroup;
7392 }
7393 return memcg;
7394}
7395
21afa38e
JW
7396/**
7397 * mem_cgroup_swapout - transfer a memsw charge to swap
3ecb0087 7398 * @folio: folio whose memsw charge to transfer
21afa38e
JW
7399 * @entry: swap entry to move the charge to
7400 *
3ecb0087 7401 * Transfer the memsw charge of @folio to @entry.
21afa38e 7402 */
3ecb0087 7403void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
21afa38e 7404{
1f47b61f 7405 struct mem_cgroup *memcg, *swap_memcg;
d6810d73 7406 unsigned int nr_entries;
21afa38e
JW
7407 unsigned short oldid;
7408
3ecb0087
MWO
7409 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7410 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
21afa38e 7411
76358ab5
AS
7412 if (mem_cgroup_disabled())
7413 return;
7414
b94c4e94 7415 if (!do_memsw_account())
21afa38e
JW
7416 return;
7417
3ecb0087 7418 memcg = folio_memcg(folio);
21afa38e 7419
3ecb0087 7420 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
21afa38e
JW
7421 if (!memcg)
7422 return;
7423
1f47b61f
VD
7424 /*
7425 * In case the memcg owning these pages has been offlined and doesn't
7426 * have an ID allocated to it anymore, charge the closest online
7427 * ancestor for the swap instead and transfer the memory+swap charge.
7428 */
7429 swap_memcg = mem_cgroup_id_get_online(memcg);
3ecb0087 7430 nr_entries = folio_nr_pages(folio);
d6810d73
HY
7431 /* Get references for the tail pages, too */
7432 if (nr_entries > 1)
7433 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7434 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7435 nr_entries);
3ecb0087 7436 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 7437 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
21afa38e 7438
3ecb0087 7439 folio->memcg_data = 0;
21afa38e
JW
7440
7441 if (!mem_cgroup_is_root(memcg))
d6810d73 7442 page_counter_uncharge(&memcg->memory, nr_entries);
21afa38e 7443
b25806dc 7444 if (memcg != swap_memcg) {
1f47b61f 7445 if (!mem_cgroup_is_root(swap_memcg))
d6810d73
HY
7446 page_counter_charge(&swap_memcg->memsw, nr_entries);
7447 page_counter_uncharge(&memcg->memsw, nr_entries);
1f47b61f
VD
7448 }
7449
ce9ce665
SAS
7450 /*
7451 * Interrupts should be disabled here because the caller holds the
b93b0163 7452 * i_pages lock which is taken with interrupts-off. It is
ce9ce665 7453 * important here to have the interrupts disabled because it is the
b93b0163 7454 * only synchronisation we have for updating the per-CPU variables.
ce9ce665 7455 */
be3e67b5 7456 memcg_stats_lock();
6e0110c2 7457 mem_cgroup_charge_statistics(memcg, -nr_entries);
be3e67b5 7458 memcg_stats_unlock();
3ecb0087 7459 memcg_check_events(memcg, folio_nid(folio));
73f576c0 7460
1a3e1f40 7461 css_put(&memcg->css);
21afa38e
JW
7462}
7463
38d8b4e6 7464/**
e2e3fdc7
MWO
7465 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7466 * @folio: folio being added to swap
37e84351
VD
7467 * @entry: swap entry to charge
7468 *
e2e3fdc7 7469 * Try to charge @folio's memcg for the swap space at @entry.
37e84351
VD
7470 *
7471 * Returns 0 on success, -ENOMEM on failure.
7472 */
e2e3fdc7 7473int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
37e84351 7474{
e2e3fdc7 7475 unsigned int nr_pages = folio_nr_pages(folio);
37e84351 7476 struct page_counter *counter;
38d8b4e6 7477 struct mem_cgroup *memcg;
37e84351
VD
7478 unsigned short oldid;
7479
b94c4e94 7480 if (do_memsw_account())
37e84351
VD
7481 return 0;
7482
e2e3fdc7 7483 memcg = folio_memcg(folio);
37e84351 7484
e2e3fdc7 7485 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
37e84351
VD
7486 if (!memcg)
7487 return 0;
7488
f3a53a3a
TH
7489 if (!entry.val) {
7490 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
bb98f2c5 7491 return 0;
f3a53a3a 7492 }
bb98f2c5 7493
1f47b61f
VD
7494 memcg = mem_cgroup_id_get_online(memcg);
7495
b25806dc 7496 if (!mem_cgroup_is_root(memcg) &&
38d8b4e6 7497 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
f3a53a3a
TH
7498 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7499 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
1f47b61f 7500 mem_cgroup_id_put(memcg);
37e84351 7501 return -ENOMEM;
1f47b61f 7502 }
37e84351 7503
38d8b4e6
HY
7504 /* Get references for the tail pages, too */
7505 if (nr_pages > 1)
7506 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7507 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
e2e3fdc7 7508 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 7509 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
37e84351 7510
37e84351
VD
7511 return 0;
7512}
7513
21afa38e 7514/**
01c4b28c 7515 * __mem_cgroup_uncharge_swap - uncharge swap space
21afa38e 7516 * @entry: swap entry to uncharge
38d8b4e6 7517 * @nr_pages: the amount of swap space to uncharge
21afa38e 7518 */
01c4b28c 7519void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
21afa38e
JW
7520{
7521 struct mem_cgroup *memcg;
7522 unsigned short id;
7523
c91bdc93
JW
7524 if (mem_cgroup_disabled())
7525 return;
7526
38d8b4e6 7527 id = swap_cgroup_record(entry, 0, nr_pages);
21afa38e 7528 rcu_read_lock();
adbe427b 7529 memcg = mem_cgroup_from_id(id);
21afa38e 7530 if (memcg) {
b25806dc 7531 if (!mem_cgroup_is_root(memcg)) {
b94c4e94 7532 if (do_memsw_account())
38d8b4e6 7533 page_counter_uncharge(&memcg->memsw, nr_pages);
b94c4e94
JW
7534 else
7535 page_counter_uncharge(&memcg->swap, nr_pages);
37e84351 7536 }
c9019e9b 7537 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
38d8b4e6 7538 mem_cgroup_id_put_many(memcg, nr_pages);
21afa38e
JW
7539 }
7540 rcu_read_unlock();
7541}
7542
d8b38438
VD
7543long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7544{
7545 long nr_swap_pages = get_nr_swap_pages();
7546
b25806dc 7547 if (mem_cgroup_disabled() || do_memsw_account())
d8b38438 7548 return nr_swap_pages;
7848ed62 7549 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
d8b38438 7550 nr_swap_pages = min_t(long, nr_swap_pages,
bbec2e15 7551 READ_ONCE(memcg->swap.max) -
d8b38438
VD
7552 page_counter_read(&memcg->swap));
7553 return nr_swap_pages;
7554}
7555
9202d527 7556bool mem_cgroup_swap_full(struct folio *folio)
5ccc5aba
VD
7557{
7558 struct mem_cgroup *memcg;
7559
9202d527 7560 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5ccc5aba
VD
7561
7562 if (vm_swap_full())
7563 return true;
b25806dc 7564 if (do_memsw_account())
5ccc5aba
VD
7565 return false;
7566
9202d527 7567 memcg = folio_memcg(folio);
5ccc5aba
VD
7568 if (!memcg)
7569 return false;
7570
7848ed62 7571 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
4b82ab4f
JK
7572 unsigned long usage = page_counter_read(&memcg->swap);
7573
7574 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7575 usage * 2 >= READ_ONCE(memcg->swap.max))
5ccc5aba 7576 return true;
4b82ab4f 7577 }
5ccc5aba
VD
7578
7579 return false;
7580}
7581
eccb52e7 7582static int __init setup_swap_account(char *s)
21afa38e 7583{
b25806dc
JW
7584 pr_warn_once("The swapaccount= commandline option is deprecated. "
7585 "Please report your usecase to linux-mm@kvack.org if you "
7586 "depend on this functionality.\n");
21afa38e
JW
7587 return 1;
7588}
eccb52e7 7589__setup("swapaccount=", setup_swap_account);
21afa38e 7590
37e84351
VD
7591static u64 swap_current_read(struct cgroup_subsys_state *css,
7592 struct cftype *cft)
7593{
7594 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7595
7596 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7597}
7598
4b82ab4f
JK
7599static int swap_high_show(struct seq_file *m, void *v)
7600{
7601 return seq_puts_memcg_tunable(m,
7602 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7603}
7604
7605static ssize_t swap_high_write(struct kernfs_open_file *of,
7606 char *buf, size_t nbytes, loff_t off)
7607{
7608 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7609 unsigned long high;
7610 int err;
7611
7612 buf = strstrip(buf);
7613 err = page_counter_memparse(buf, "max", &high);
7614 if (err)
7615 return err;
7616
7617 page_counter_set_high(&memcg->swap, high);
7618
7619 return nbytes;
7620}
7621
37e84351
VD
7622static int swap_max_show(struct seq_file *m, void *v)
7623{
677dc973
CD
7624 return seq_puts_memcg_tunable(m,
7625 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
37e84351
VD
7626}
7627
7628static ssize_t swap_max_write(struct kernfs_open_file *of,
7629 char *buf, size_t nbytes, loff_t off)
7630{
7631 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7632 unsigned long max;
7633 int err;
7634
7635 buf = strstrip(buf);
7636 err = page_counter_memparse(buf, "max", &max);
7637 if (err)
7638 return err;
7639
be09102b 7640 xchg(&memcg->swap.max, max);
37e84351
VD
7641
7642 return nbytes;
7643}
7644
f3a53a3a
TH
7645static int swap_events_show(struct seq_file *m, void *v)
7646{
aa9694bb 7647 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
f3a53a3a 7648
4b82ab4f
JK
7649 seq_printf(m, "high %lu\n",
7650 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
f3a53a3a
TH
7651 seq_printf(m, "max %lu\n",
7652 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7653 seq_printf(m, "fail %lu\n",
7654 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7655
7656 return 0;
7657}
7658
37e84351
VD
7659static struct cftype swap_files[] = {
7660 {
7661 .name = "swap.current",
7662 .flags = CFTYPE_NOT_ON_ROOT,
7663 .read_u64 = swap_current_read,
7664 },
4b82ab4f
JK
7665 {
7666 .name = "swap.high",
7667 .flags = CFTYPE_NOT_ON_ROOT,
7668 .seq_show = swap_high_show,
7669 .write = swap_high_write,
7670 },
37e84351
VD
7671 {
7672 .name = "swap.max",
7673 .flags = CFTYPE_NOT_ON_ROOT,
7674 .seq_show = swap_max_show,
7675 .write = swap_max_write,
7676 },
f3a53a3a
TH
7677 {
7678 .name = "swap.events",
7679 .flags = CFTYPE_NOT_ON_ROOT,
7680 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7681 .seq_show = swap_events_show,
7682 },
37e84351
VD
7683 { } /* terminate */
7684};
7685
eccb52e7 7686static struct cftype memsw_files[] = {
21afa38e
JW
7687 {
7688 .name = "memsw.usage_in_bytes",
7689 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7690 .read_u64 = mem_cgroup_read_u64,
7691 },
7692 {
7693 .name = "memsw.max_usage_in_bytes",
7694 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7695 .write = mem_cgroup_reset,
7696 .read_u64 = mem_cgroup_read_u64,
7697 },
7698 {
7699 .name = "memsw.limit_in_bytes",
7700 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7701 .write = mem_cgroup_write,
7702 .read_u64 = mem_cgroup_read_u64,
7703 },
7704 {
7705 .name = "memsw.failcnt",
7706 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7707 .write = mem_cgroup_reset,
7708 .read_u64 = mem_cgroup_read_u64,
7709 },
7710 { }, /* terminate */
7711};
7712
f4840ccf
JW
7713#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7714/**
7715 * obj_cgroup_may_zswap - check if this cgroup can zswap
7716 * @objcg: the object cgroup
7717 *
7718 * Check if the hierarchical zswap limit has been reached.
7719 *
7720 * This doesn't check for specific headroom, and it is not atomic
7721 * either. But with zswap, the size of the allocation is only known
7722 * once compression has occured, and this optimistic pre-check avoids
7723 * spending cycles on compression when there is already no room left
7724 * or zswap is disabled altogether somewhere in the hierarchy.
7725 */
7726bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7727{
7728 struct mem_cgroup *memcg, *original_memcg;
7729 bool ret = true;
7730
7731 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7732 return true;
7733
7734 original_memcg = get_mem_cgroup_from_objcg(objcg);
7848ed62 7735 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
f4840ccf
JW
7736 memcg = parent_mem_cgroup(memcg)) {
7737 unsigned long max = READ_ONCE(memcg->zswap_max);
7738 unsigned long pages;
7739
7740 if (max == PAGE_COUNTER_MAX)
7741 continue;
7742 if (max == 0) {
7743 ret = false;
7744 break;
7745 }
7746
7747 cgroup_rstat_flush(memcg->css.cgroup);
7748 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7749 if (pages < max)
7750 continue;
7751 ret = false;
7752 break;
7753 }
7754 mem_cgroup_put(original_memcg);
7755 return ret;
7756}
7757
7758/**
7759 * obj_cgroup_charge_zswap - charge compression backend memory
7760 * @objcg: the object cgroup
7761 * @size: size of compressed object
7762 *
7763 * This forces the charge after obj_cgroup_may_swap() allowed
7764 * compression and storage in zwap for this cgroup to go ahead.
7765 */
7766void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7767{
7768 struct mem_cgroup *memcg;
7769
7770 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7771 return;
7772
7773 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7774
7775 /* PF_MEMALLOC context, charging must succeed */
7776 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7777 VM_WARN_ON_ONCE(1);
7778
7779 rcu_read_lock();
7780 memcg = obj_cgroup_memcg(objcg);
7781 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7782 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7783 rcu_read_unlock();
7784}
7785
7786/**
7787 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7788 * @objcg: the object cgroup
7789 * @size: size of compressed object
7790 *
7791 * Uncharges zswap memory on page in.
7792 */
7793void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7794{
7795 struct mem_cgroup *memcg;
7796
7797 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7798 return;
7799
7800 obj_cgroup_uncharge(objcg, size);
7801
7802 rcu_read_lock();
7803 memcg = obj_cgroup_memcg(objcg);
7804 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7805 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7806 rcu_read_unlock();
7807}
7808
7809static u64 zswap_current_read(struct cgroup_subsys_state *css,
7810 struct cftype *cft)
7811{
7812 cgroup_rstat_flush(css->cgroup);
7813 return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7814}
7815
7816static int zswap_max_show(struct seq_file *m, void *v)
7817{
7818 return seq_puts_memcg_tunable(m,
7819 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7820}
7821
7822static ssize_t zswap_max_write(struct kernfs_open_file *of,
7823 char *buf, size_t nbytes, loff_t off)
7824{
7825 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7826 unsigned long max;
7827 int err;
7828
7829 buf = strstrip(buf);
7830 err = page_counter_memparse(buf, "max", &max);
7831 if (err)
7832 return err;
7833
7834 xchg(&memcg->zswap_max, max);
7835
7836 return nbytes;
7837}
7838
7839static struct cftype zswap_files[] = {
7840 {
7841 .name = "zswap.current",
7842 .flags = CFTYPE_NOT_ON_ROOT,
7843 .read_u64 = zswap_current_read,
7844 },
7845 {
7846 .name = "zswap.max",
7847 .flags = CFTYPE_NOT_ON_ROOT,
7848 .seq_show = zswap_max_show,
7849 .write = zswap_max_write,
7850 },
7851 { } /* terminate */
7852};
7853#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7854
21afa38e
JW
7855static int __init mem_cgroup_swap_init(void)
7856{
2d1c4980 7857 if (mem_cgroup_disabled())
eccb52e7
JW
7858 return 0;
7859
7860 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7861 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
f4840ccf
JW
7862#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7863 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7864#endif
21afa38e
JW
7865 return 0;
7866}
b25806dc 7867subsys_initcall(mem_cgroup_swap_init);
21afa38e 7868
e55b9f96 7869#endif /* CONFIG_SWAP */