]> git.ipfire.org Git - people/arne_f/kernel.git/blame - mm/memcontrol.c
EDAC: Delete edac_stub.c
[people/arne_f/kernel.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
7ae1e1d0
GC
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
1575e68b
JW
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
8cdea7c0
BS
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
3e32cb2e 34#include <linux/page_counter.h>
8cdea7c0
BS
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
78fb7466 37#include <linux/mm.h>
6e84f315 38#include <linux/sched/mm.h>
3a4f8a0b 39#include <linux/shmem_fs.h>
4ffef5fe 40#include <linux/hugetlb.h>
d13d1443 41#include <linux/pagemap.h>
d52aa412 42#include <linux/smp.h>
8a9f3ccd 43#include <linux/page-flags.h>
66e1707b 44#include <linux/backing-dev.h>
8a9f3ccd
BS
45#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
e222432b 47#include <linux/limits.h>
b9e15baf 48#include <linux/export.h>
8c7c6e34 49#include <linux/mutex.h>
bb4cc1a8 50#include <linux/rbtree.h>
b6ac57d5 51#include <linux/slab.h>
66e1707b 52#include <linux/swap.h>
02491447 53#include <linux/swapops.h>
66e1707b 54#include <linux/spinlock.h>
2e72b634 55#include <linux/eventfd.h>
79bd9814 56#include <linux/poll.h>
2e72b634 57#include <linux/sort.h>
66e1707b 58#include <linux/fs.h>
d2ceb9b7 59#include <linux/seq_file.h>
70ddf637 60#include <linux/vmpressure.h>
b69408e8 61#include <linux/mm_inline.h>
5d1ea48b 62#include <linux/swap_cgroup.h>
cdec2e42 63#include <linux/cpu.h>
158e0a2d 64#include <linux/oom.h>
0056f4e6 65#include <linux/lockdep.h>
79bd9814 66#include <linux/file.h>
b23afb93 67#include <linux/tracehook.h>
08e552c6 68#include "internal.h"
d1a4c0b3 69#include <net/sock.h>
4bd2c1ee 70#include <net/ip.h>
f35c3a8e 71#include "slab.h"
8cdea7c0 72
7c0f6ba6 73#include <linux/uaccess.h>
8697d331 74
cc8e970c
KM
75#include <trace/events/vmscan.h>
76
073219e9
TH
77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 79
7d828602
JW
80struct mem_cgroup *root_mem_cgroup __read_mostly;
81
a181b0e8 82#define MEM_CGROUP_RECLAIM_RETRIES 5
8cdea7c0 83
f7e1cb6e
JW
84/* Socket memory accounting disabled? */
85static bool cgroup_memory_nosocket;
86
04823c83
VD
87/* Kernel memory accounting disabled? */
88static bool cgroup_memory_nokmem;
89
21afa38e 90/* Whether the swap controller is active */
c255a458 91#ifdef CONFIG_MEMCG_SWAP
c077719b 92int do_swap_account __read_mostly;
c077719b 93#else
a0db00fc 94#define do_swap_account 0
c077719b
KH
95#endif
96
7941d214
JW
97/* Whether legacy memory+swap accounting is active */
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
af7c4b0e
JW
103static const char * const mem_cgroup_stat_names[] = {
104 "cache",
105 "rss",
b070e65c 106 "rss_huge",
af7c4b0e 107 "mapped_file",
c4843a75 108 "dirty",
3ea67d06 109 "writeback",
af7c4b0e
JW
110 "swap",
111};
112
af7c4b0e
JW
113static const char * const mem_cgroup_events_names[] = {
114 "pgpgin",
115 "pgpgout",
116 "pgfault",
117 "pgmajfault",
118};
119
58cf188e
SZ
120static const char * const mem_cgroup_lru_names[] = {
121 "inactive_anon",
122 "active_anon",
123 "inactive_file",
124 "active_file",
125 "unevictable",
126};
127
a0db00fc
KS
128#define THRESHOLDS_EVENTS_TARGET 128
129#define SOFTLIMIT_EVENTS_TARGET 1024
130#define NUMAINFO_EVENTS_TARGET 1024
e9f8974f 131
bb4cc1a8
AM
132/*
133 * Cgroups above their limits are maintained in a RB-Tree, independent of
134 * their hierarchy representation
135 */
136
ef8f2327 137struct mem_cgroup_tree_per_node {
bb4cc1a8
AM
138 struct rb_root rb_root;
139 spinlock_t lock;
140};
141
bb4cc1a8
AM
142struct mem_cgroup_tree {
143 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
144};
145
146static struct mem_cgroup_tree soft_limit_tree __read_mostly;
147
9490ff27
KH
148/* for OOM */
149struct mem_cgroup_eventfd_list {
150 struct list_head list;
151 struct eventfd_ctx *eventfd;
152};
2e72b634 153
79bd9814
TH
154/*
155 * cgroup_event represents events which userspace want to receive.
156 */
3bc942f3 157struct mem_cgroup_event {
79bd9814 158 /*
59b6f873 159 * memcg which the event belongs to.
79bd9814 160 */
59b6f873 161 struct mem_cgroup *memcg;
79bd9814
TH
162 /*
163 * eventfd to signal userspace about the event.
164 */
165 struct eventfd_ctx *eventfd;
166 /*
167 * Each of these stored in a list by the cgroup.
168 */
169 struct list_head list;
fba94807
TH
170 /*
171 * register_event() callback will be used to add new userspace
172 * waiter for changes related to this event. Use eventfd_signal()
173 * on eventfd to send notification to userspace.
174 */
59b6f873 175 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 176 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
177 /*
178 * unregister_event() callback will be called when userspace closes
179 * the eventfd or on cgroup removing. This callback must be set,
180 * if you want provide notification functionality.
181 */
59b6f873 182 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 183 struct eventfd_ctx *eventfd);
79bd9814
TH
184 /*
185 * All fields below needed to unregister event when
186 * userspace closes eventfd.
187 */
188 poll_table pt;
189 wait_queue_head_t *wqh;
190 wait_queue_t wait;
191 struct work_struct remove;
192};
193
c0ff4b85
R
194static void mem_cgroup_threshold(struct mem_cgroup *memcg);
195static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 196
7dc74be0
DN
197/* Stuffs for move charges at task migration. */
198/*
1dfab5ab 199 * Types of charges to be moved.
7dc74be0 200 */
1dfab5ab
JW
201#define MOVE_ANON 0x1U
202#define MOVE_FILE 0x2U
203#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
7dc74be0 204
4ffef5fe
DN
205/* "mc" and its members are protected by cgroup_mutex */
206static struct move_charge_struct {
b1dd693e 207 spinlock_t lock; /* for from, to */
264a0ae1 208 struct mm_struct *mm;
4ffef5fe
DN
209 struct mem_cgroup *from;
210 struct mem_cgroup *to;
1dfab5ab 211 unsigned long flags;
4ffef5fe 212 unsigned long precharge;
854ffa8d 213 unsigned long moved_charge;
483c30b5 214 unsigned long moved_swap;
8033b97c
DN
215 struct task_struct *moving_task; /* a task moving charges */
216 wait_queue_head_t waitq; /* a waitq for other context */
217} mc = {
2bd9bb20 218 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
219 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
220};
4ffef5fe 221
4e416953
BS
222/*
223 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
224 * limit reclaim to prevent infinite loops, if they ever occur.
225 */
a0db00fc 226#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 227#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 228
217bc319
KH
229enum charge_type {
230 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
41326c17 231 MEM_CGROUP_CHARGE_TYPE_ANON,
d13d1443 232 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 233 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
234 NR_CHARGE_TYPE,
235};
236
8c7c6e34 237/* for encoding cft->private value on file */
86ae53e1
GC
238enum res_type {
239 _MEM,
240 _MEMSWAP,
241 _OOM_TYPE,
510fc4e1 242 _KMEM,
d55f90bf 243 _TCP,
86ae53e1
GC
244};
245
a0db00fc
KS
246#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
247#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34 248#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
249/* Used for OOM nofiier */
250#define OOM_CONTROL (0)
8c7c6e34 251
70ddf637
AV
252/* Some nice accessors for the vmpressure. */
253struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
254{
255 if (!memcg)
256 memcg = root_mem_cgroup;
257 return &memcg->vmpressure;
258}
259
260struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
261{
262 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
263}
264
7ffc0edc
MH
265static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
266{
267 return (memcg == root_mem_cgroup);
268}
269
127424c8 270#ifndef CONFIG_SLOB
55007d84 271/*
f7ce3190 272 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
b8627835
LZ
273 * The main reason for not using cgroup id for this:
274 * this works better in sparse environments, where we have a lot of memcgs,
275 * but only a few kmem-limited. Or also, if we have, for instance, 200
276 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
277 * 200 entry array for that.
55007d84 278 *
dbcf73e2
VD
279 * The current size of the caches array is stored in memcg_nr_cache_ids. It
280 * will double each time we have to increase it.
55007d84 281 */
dbcf73e2
VD
282static DEFINE_IDA(memcg_cache_ida);
283int memcg_nr_cache_ids;
749c5415 284
05257a1a
VD
285/* Protects memcg_nr_cache_ids */
286static DECLARE_RWSEM(memcg_cache_ids_sem);
287
288void memcg_get_cache_ids(void)
289{
290 down_read(&memcg_cache_ids_sem);
291}
292
293void memcg_put_cache_ids(void)
294{
295 up_read(&memcg_cache_ids_sem);
296}
297
55007d84
GC
298/*
299 * MIN_SIZE is different than 1, because we would like to avoid going through
300 * the alloc/free process all the time. In a small machine, 4 kmem-limited
301 * cgroups is a reasonable guess. In the future, it could be a parameter or
302 * tunable, but that is strictly not necessary.
303 *
b8627835 304 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
55007d84
GC
305 * this constant directly from cgroup, but it is understandable that this is
306 * better kept as an internal representation in cgroup.c. In any case, the
b8627835 307 * cgrp_id space is not getting any smaller, and we don't have to necessarily
55007d84
GC
308 * increase ours as well if it increases.
309 */
310#define MEMCG_CACHES_MIN_SIZE 4
b8627835 311#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
55007d84 312
d7f25f8a
GC
313/*
314 * A lot of the calls to the cache allocation functions are expected to be
315 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
316 * conditional to this static branch, we'll have to allow modules that does
317 * kmem_cache_alloc and the such to see this symbol as well
318 */
ef12947c 319DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
d7f25f8a 320EXPORT_SYMBOL(memcg_kmem_enabled_key);
a8964b9b 321
17cc4dfe
TH
322struct workqueue_struct *memcg_kmem_cache_wq;
323
127424c8 324#endif /* !CONFIG_SLOB */
a8964b9b 325
ad7fa852
TH
326/**
327 * mem_cgroup_css_from_page - css of the memcg associated with a page
328 * @page: page of interest
329 *
330 * If memcg is bound to the default hierarchy, css of the memcg associated
331 * with @page is returned. The returned css remains associated with @page
332 * until it is released.
333 *
334 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
335 * is returned.
ad7fa852
TH
336 */
337struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
338{
339 struct mem_cgroup *memcg;
340
ad7fa852
TH
341 memcg = page->mem_cgroup;
342
9e10a130 343 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
ad7fa852
TH
344 memcg = root_mem_cgroup;
345
ad7fa852
TH
346 return &memcg->css;
347}
348
2fc04524
VD
349/**
350 * page_cgroup_ino - return inode number of the memcg a page is charged to
351 * @page: the page
352 *
353 * Look up the closest online ancestor of the memory cgroup @page is charged to
354 * and return its inode number or 0 if @page is not charged to any cgroup. It
355 * is safe to call this function without holding a reference to @page.
356 *
357 * Note, this function is inherently racy, because there is nothing to prevent
358 * the cgroup inode from getting torn down and potentially reallocated a moment
359 * after page_cgroup_ino() returns, so it only should be used by callers that
360 * do not care (such as procfs interfaces).
361 */
362ino_t page_cgroup_ino(struct page *page)
363{
364 struct mem_cgroup *memcg;
365 unsigned long ino = 0;
366
367 rcu_read_lock();
368 memcg = READ_ONCE(page->mem_cgroup);
369 while (memcg && !(memcg->css.flags & CSS_ONLINE))
370 memcg = parent_mem_cgroup(memcg);
371 if (memcg)
372 ino = cgroup_ino(memcg->css.cgroup);
373 rcu_read_unlock();
374 return ino;
375}
376
ef8f2327
MG
377static struct mem_cgroup_per_node *
378mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
f64c3f54 379{
97a6c37b 380 int nid = page_to_nid(page);
f64c3f54 381
ef8f2327 382 return memcg->nodeinfo[nid];
f64c3f54
BS
383}
384
ef8f2327
MG
385static struct mem_cgroup_tree_per_node *
386soft_limit_tree_node(int nid)
bb4cc1a8 387{
ef8f2327 388 return soft_limit_tree.rb_tree_per_node[nid];
bb4cc1a8
AM
389}
390
ef8f2327 391static struct mem_cgroup_tree_per_node *
bb4cc1a8
AM
392soft_limit_tree_from_page(struct page *page)
393{
394 int nid = page_to_nid(page);
bb4cc1a8 395
ef8f2327 396 return soft_limit_tree.rb_tree_per_node[nid];
bb4cc1a8
AM
397}
398
ef8f2327
MG
399static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
400 struct mem_cgroup_tree_per_node *mctz,
3e32cb2e 401 unsigned long new_usage_in_excess)
bb4cc1a8
AM
402{
403 struct rb_node **p = &mctz->rb_root.rb_node;
404 struct rb_node *parent = NULL;
ef8f2327 405 struct mem_cgroup_per_node *mz_node;
bb4cc1a8
AM
406
407 if (mz->on_tree)
408 return;
409
410 mz->usage_in_excess = new_usage_in_excess;
411 if (!mz->usage_in_excess)
412 return;
413 while (*p) {
414 parent = *p;
ef8f2327 415 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
bb4cc1a8
AM
416 tree_node);
417 if (mz->usage_in_excess < mz_node->usage_in_excess)
418 p = &(*p)->rb_left;
419 /*
420 * We can't avoid mem cgroups that are over their soft
421 * limit by the same amount
422 */
423 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
424 p = &(*p)->rb_right;
425 }
426 rb_link_node(&mz->tree_node, parent, p);
427 rb_insert_color(&mz->tree_node, &mctz->rb_root);
428 mz->on_tree = true;
429}
430
ef8f2327
MG
431static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
432 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
433{
434 if (!mz->on_tree)
435 return;
436 rb_erase(&mz->tree_node, &mctz->rb_root);
437 mz->on_tree = false;
438}
439
ef8f2327
MG
440static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
441 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 442{
0a31bc97
JW
443 unsigned long flags;
444
445 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 446 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 447 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
448}
449
3e32cb2e
JW
450static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
451{
452 unsigned long nr_pages = page_counter_read(&memcg->memory);
4db0c3c2 453 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
3e32cb2e
JW
454 unsigned long excess = 0;
455
456 if (nr_pages > soft_limit)
457 excess = nr_pages - soft_limit;
458
459 return excess;
460}
bb4cc1a8
AM
461
462static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
463{
3e32cb2e 464 unsigned long excess;
ef8f2327
MG
465 struct mem_cgroup_per_node *mz;
466 struct mem_cgroup_tree_per_node *mctz;
bb4cc1a8 467
e231875b 468 mctz = soft_limit_tree_from_page(page);
bb4cc1a8
AM
469 /*
470 * Necessary to update all ancestors when hierarchy is used.
471 * because their event counter is not touched.
472 */
473 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
ef8f2327 474 mz = mem_cgroup_page_nodeinfo(memcg, page);
3e32cb2e 475 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
476 /*
477 * We have to update the tree if mz is on RB-tree or
478 * mem is over its softlimit.
479 */
480 if (excess || mz->on_tree) {
0a31bc97
JW
481 unsigned long flags;
482
483 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
484 /* if on-tree, remove it */
485 if (mz->on_tree)
cf2c8127 486 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
487 /*
488 * Insert again. mz->usage_in_excess will be updated.
489 * If excess is 0, no tree ops.
490 */
cf2c8127 491 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 492 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
493 }
494 }
495}
496
497static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
498{
ef8f2327
MG
499 struct mem_cgroup_tree_per_node *mctz;
500 struct mem_cgroup_per_node *mz;
501 int nid;
bb4cc1a8 502
e231875b 503 for_each_node(nid) {
ef8f2327
MG
504 mz = mem_cgroup_nodeinfo(memcg, nid);
505 mctz = soft_limit_tree_node(nid);
506 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
507 }
508}
509
ef8f2327
MG
510static struct mem_cgroup_per_node *
511__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
512{
513 struct rb_node *rightmost = NULL;
ef8f2327 514 struct mem_cgroup_per_node *mz;
bb4cc1a8
AM
515
516retry:
517 mz = NULL;
518 rightmost = rb_last(&mctz->rb_root);
519 if (!rightmost)
520 goto done; /* Nothing to reclaim from */
521
ef8f2327 522 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
bb4cc1a8
AM
523 /*
524 * Remove the node now but someone else can add it back,
525 * we will to add it back at the end of reclaim to its correct
526 * position in the tree.
527 */
cf2c8127 528 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 529 if (!soft_limit_excess(mz->memcg) ||
ec903c0c 530 !css_tryget_online(&mz->memcg->css))
bb4cc1a8
AM
531 goto retry;
532done:
533 return mz;
534}
535
ef8f2327
MG
536static struct mem_cgroup_per_node *
537mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 538{
ef8f2327 539 struct mem_cgroup_per_node *mz;
bb4cc1a8 540
0a31bc97 541 spin_lock_irq(&mctz->lock);
bb4cc1a8 542 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 543 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
544 return mz;
545}
546
711d3d2c 547/*
484ebb3b
GT
548 * Return page count for single (non recursive) @memcg.
549 *
711d3d2c
KH
550 * Implementation Note: reading percpu statistics for memcg.
551 *
552 * Both of vmstat[] and percpu_counter has threshold and do periodic
553 * synchronization to implement "quick" read. There are trade-off between
554 * reading cost and precision of value. Then, we may have a chance to implement
484ebb3b 555 * a periodic synchronization of counter in memcg's counter.
711d3d2c
KH
556 *
557 * But this _read() function is used for user interface now. The user accounts
558 * memory usage by memory cgroup and he _always_ requires exact value because
559 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
560 * have to visit all online cpus and make sum. So, for now, unnecessary
561 * synchronization is not implemented. (just implemented for cpu hotplug)
562 *
563 * If there are kernel internal actions which can make use of some not-exact
564 * value, and reading all cpu value can be performance bottleneck in some
484ebb3b 565 * common workload, threshold and synchronization as vmstat[] should be
711d3d2c
KH
566 * implemented.
567 */
484ebb3b
GT
568static unsigned long
569mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
c62b1a3b 570{
7a159cc9 571 long val = 0;
c62b1a3b 572 int cpu;
c62b1a3b 573
484ebb3b 574 /* Per-cpu values can be negative, use a signed accumulator */
733a572e 575 for_each_possible_cpu(cpu)
c0ff4b85 576 val += per_cpu(memcg->stat->count[idx], cpu);
484ebb3b
GT
577 /*
578 * Summing races with updates, so val may be negative. Avoid exposing
579 * transient negative values.
580 */
581 if (val < 0)
582 val = 0;
c62b1a3b
KH
583 return val;
584}
585
c0ff4b85 586static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
e9f8974f
JW
587 enum mem_cgroup_events_index idx)
588{
589 unsigned long val = 0;
590 int cpu;
591
733a572e 592 for_each_possible_cpu(cpu)
c0ff4b85 593 val += per_cpu(memcg->stat->events[idx], cpu);
e9f8974f
JW
594 return val;
595}
596
c0ff4b85 597static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
b070e65c 598 struct page *page,
f627c2f5 599 bool compound, int nr_pages)
d52aa412 600{
b2402857
KH
601 /*
602 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
603 * counted as CACHE even if it's on ANON LRU.
604 */
0a31bc97 605 if (PageAnon(page))
b2402857 606 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
c0ff4b85 607 nr_pages);
d52aa412 608 else
b2402857 609 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
c0ff4b85 610 nr_pages);
55e462b0 611
f627c2f5
KS
612 if (compound) {
613 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
b070e65c
DR
614 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
615 nr_pages);
f627c2f5 616 }
b070e65c 617
e401f176
KH
618 /* pagein of a big page is an event. So, ignore page size */
619 if (nr_pages > 0)
c0ff4b85 620 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d604 621 else {
c0ff4b85 622 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d604
KH
623 nr_pages = -nr_pages; /* for event */
624 }
e401f176 625
13114716 626 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
6d12e2d8
KH
627}
628
0a6b76dd
VD
629unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
630 int nid, unsigned int lru_mask)
bb2a0de9 631{
b4536f0c 632 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
e231875b 633 unsigned long nr = 0;
ef8f2327 634 enum lru_list lru;
889976db 635
e231875b 636 VM_BUG_ON((unsigned)nid >= nr_node_ids);
bb2a0de9 637
ef8f2327
MG
638 for_each_lru(lru) {
639 if (!(BIT(lru) & lru_mask))
640 continue;
b4536f0c 641 nr += mem_cgroup_get_lru_size(lruvec, lru);
e231875b
JZ
642 }
643 return nr;
889976db 644}
bb2a0de9 645
c0ff4b85 646static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 647 unsigned int lru_mask)
6d12e2d8 648{
e231875b 649 unsigned long nr = 0;
889976db 650 int nid;
6d12e2d8 651
31aaea4a 652 for_each_node_state(nid, N_MEMORY)
e231875b
JZ
653 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
654 return nr;
d52aa412
KH
655}
656
f53d7ce3
JW
657static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
658 enum mem_cgroup_events_target target)
7a159cc9
JW
659{
660 unsigned long val, next;
661
13114716 662 val = __this_cpu_read(memcg->stat->nr_page_events);
4799401f 663 next = __this_cpu_read(memcg->stat->targets[target]);
7a159cc9 664 /* from time_after() in jiffies.h */
f53d7ce3
JW
665 if ((long)next - (long)val < 0) {
666 switch (target) {
667 case MEM_CGROUP_TARGET_THRESH:
668 next = val + THRESHOLDS_EVENTS_TARGET;
669 break;
bb4cc1a8
AM
670 case MEM_CGROUP_TARGET_SOFTLIMIT:
671 next = val + SOFTLIMIT_EVENTS_TARGET;
672 break;
f53d7ce3
JW
673 case MEM_CGROUP_TARGET_NUMAINFO:
674 next = val + NUMAINFO_EVENTS_TARGET;
675 break;
676 default:
677 break;
678 }
679 __this_cpu_write(memcg->stat->targets[target], next);
680 return true;
7a159cc9 681 }
f53d7ce3 682 return false;
d2265e6f
KH
683}
684
685/*
686 * Check events in order.
687 *
688 */
c0ff4b85 689static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
d2265e6f
KH
690{
691 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
692 if (unlikely(mem_cgroup_event_ratelimit(memcg,
693 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 694 bool do_softlimit;
82b3f2a7 695 bool do_numainfo __maybe_unused;
f53d7ce3 696
bb4cc1a8
AM
697 do_softlimit = mem_cgroup_event_ratelimit(memcg,
698 MEM_CGROUP_TARGET_SOFTLIMIT);
f53d7ce3
JW
699#if MAX_NUMNODES > 1
700 do_numainfo = mem_cgroup_event_ratelimit(memcg,
701 MEM_CGROUP_TARGET_NUMAINFO);
702#endif
c0ff4b85 703 mem_cgroup_threshold(memcg);
bb4cc1a8
AM
704 if (unlikely(do_softlimit))
705 mem_cgroup_update_tree(memcg, page);
453a9bf3 706#if MAX_NUMNODES > 1
f53d7ce3 707 if (unlikely(do_numainfo))
c0ff4b85 708 atomic_inc(&memcg->numainfo_events);
453a9bf3 709#endif
0a31bc97 710 }
d2265e6f
KH
711}
712
cf475ad2 713struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 714{
31a78f23
BS
715 /*
716 * mm_update_next_owner() may clear mm->owner to NULL
717 * if it races with swapoff, page migration, etc.
718 * So this can be called with p == NULL.
719 */
720 if (unlikely(!p))
721 return NULL;
722
073219e9 723 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466 724}
33398cf2 725EXPORT_SYMBOL(mem_cgroup_from_task);
78fb7466 726
df381975 727static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 728{
c0ff4b85 729 struct mem_cgroup *memcg = NULL;
0b7f569e 730
54595fe2
KH
731 rcu_read_lock();
732 do {
6f6acb00
MH
733 /*
734 * Page cache insertions can happen withou an
735 * actual mm context, e.g. during disk probing
736 * on boot, loopback IO, acct() writes etc.
737 */
738 if (unlikely(!mm))
df381975 739 memcg = root_mem_cgroup;
6f6acb00
MH
740 else {
741 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
742 if (unlikely(!memcg))
743 memcg = root_mem_cgroup;
744 }
ec903c0c 745 } while (!css_tryget_online(&memcg->css));
54595fe2 746 rcu_read_unlock();
c0ff4b85 747 return memcg;
54595fe2
KH
748}
749
5660048c
JW
750/**
751 * mem_cgroup_iter - iterate over memory cgroup hierarchy
752 * @root: hierarchy root
753 * @prev: previously returned memcg, NULL on first invocation
754 * @reclaim: cookie for shared reclaim walks, NULL for full walks
755 *
756 * Returns references to children of the hierarchy below @root, or
757 * @root itself, or %NULL after a full round-trip.
758 *
759 * Caller must pass the return value in @prev on subsequent
760 * invocations for reference counting, or use mem_cgroup_iter_break()
761 * to cancel a hierarchy walk before the round-trip is complete.
762 *
763 * Reclaimers can specify a zone and a priority level in @reclaim to
764 * divide up the memcgs in the hierarchy among all concurrent
765 * reclaimers operating on the same zone and priority.
766 */
694fbc0f 767struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 768 struct mem_cgroup *prev,
694fbc0f 769 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 770{
33398cf2 771 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
5ac8fb31 772 struct cgroup_subsys_state *css = NULL;
9f3a0d09 773 struct mem_cgroup *memcg = NULL;
5ac8fb31 774 struct mem_cgroup *pos = NULL;
711d3d2c 775
694fbc0f
AM
776 if (mem_cgroup_disabled())
777 return NULL;
5660048c 778
9f3a0d09
JW
779 if (!root)
780 root = root_mem_cgroup;
7d74b06f 781
9f3a0d09 782 if (prev && !reclaim)
5ac8fb31 783 pos = prev;
14067bb3 784
9f3a0d09
JW
785 if (!root->use_hierarchy && root != root_mem_cgroup) {
786 if (prev)
5ac8fb31 787 goto out;
694fbc0f 788 return root;
9f3a0d09 789 }
14067bb3 790
542f85f9 791 rcu_read_lock();
5f578161 792
5ac8fb31 793 if (reclaim) {
ef8f2327 794 struct mem_cgroup_per_node *mz;
5ac8fb31 795
ef8f2327 796 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
5ac8fb31
JW
797 iter = &mz->iter[reclaim->priority];
798
799 if (prev && reclaim->generation != iter->generation)
800 goto out_unlock;
801
6df38689 802 while (1) {
4db0c3c2 803 pos = READ_ONCE(iter->position);
6df38689
VD
804 if (!pos || css_tryget(&pos->css))
805 break;
5ac8fb31 806 /*
6df38689
VD
807 * css reference reached zero, so iter->position will
808 * be cleared by ->css_released. However, we should not
809 * rely on this happening soon, because ->css_released
810 * is called from a work queue, and by busy-waiting we
811 * might block it. So we clear iter->position right
812 * away.
5ac8fb31 813 */
6df38689
VD
814 (void)cmpxchg(&iter->position, pos, NULL);
815 }
5ac8fb31
JW
816 }
817
818 if (pos)
819 css = &pos->css;
820
821 for (;;) {
822 css = css_next_descendant_pre(css, &root->css);
823 if (!css) {
824 /*
825 * Reclaimers share the hierarchy walk, and a
826 * new one might jump in right at the end of
827 * the hierarchy - make sure they see at least
828 * one group and restart from the beginning.
829 */
830 if (!prev)
831 continue;
832 break;
527a5ec9 833 }
7d74b06f 834
5ac8fb31
JW
835 /*
836 * Verify the css and acquire a reference. The root
837 * is provided by the caller, so we know it's alive
838 * and kicking, and don't take an extra reference.
839 */
840 memcg = mem_cgroup_from_css(css);
14067bb3 841
5ac8fb31
JW
842 if (css == &root->css)
843 break;
14067bb3 844
0b8f73e1
JW
845 if (css_tryget(css))
846 break;
9f3a0d09 847
5ac8fb31 848 memcg = NULL;
9f3a0d09 849 }
5ac8fb31
JW
850
851 if (reclaim) {
5ac8fb31 852 /*
6df38689
VD
853 * The position could have already been updated by a competing
854 * thread, so check that the value hasn't changed since we read
855 * it to avoid reclaiming from the same cgroup twice.
5ac8fb31 856 */
6df38689
VD
857 (void)cmpxchg(&iter->position, pos, memcg);
858
5ac8fb31
JW
859 if (pos)
860 css_put(&pos->css);
861
862 if (!memcg)
863 iter->generation++;
864 else if (!prev)
865 reclaim->generation = iter->generation;
9f3a0d09 866 }
5ac8fb31 867
542f85f9
MH
868out_unlock:
869 rcu_read_unlock();
5ac8fb31 870out:
c40046f3
MH
871 if (prev && prev != root)
872 css_put(&prev->css);
873
9f3a0d09 874 return memcg;
14067bb3 875}
7d74b06f 876
5660048c
JW
877/**
878 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
879 * @root: hierarchy root
880 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
881 */
882void mem_cgroup_iter_break(struct mem_cgroup *root,
883 struct mem_cgroup *prev)
9f3a0d09
JW
884{
885 if (!root)
886 root = root_mem_cgroup;
887 if (prev && prev != root)
888 css_put(&prev->css);
889}
7d74b06f 890
6df38689
VD
891static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
892{
893 struct mem_cgroup *memcg = dead_memcg;
894 struct mem_cgroup_reclaim_iter *iter;
ef8f2327
MG
895 struct mem_cgroup_per_node *mz;
896 int nid;
6df38689
VD
897 int i;
898
899 while ((memcg = parent_mem_cgroup(memcg))) {
900 for_each_node(nid) {
ef8f2327
MG
901 mz = mem_cgroup_nodeinfo(memcg, nid);
902 for (i = 0; i <= DEF_PRIORITY; i++) {
903 iter = &mz->iter[i];
904 cmpxchg(&iter->position,
905 dead_memcg, NULL);
6df38689
VD
906 }
907 }
908 }
909}
910
9f3a0d09
JW
911/*
912 * Iteration constructs for visiting all cgroups (under a tree). If
913 * loops are exited prematurely (break), mem_cgroup_iter_break() must
914 * be used for reference counting.
915 */
916#define for_each_mem_cgroup_tree(iter, root) \
527a5ec9 917 for (iter = mem_cgroup_iter(root, NULL, NULL); \
9f3a0d09 918 iter != NULL; \
527a5ec9 919 iter = mem_cgroup_iter(root, iter, NULL))
711d3d2c 920
9f3a0d09 921#define for_each_mem_cgroup(iter) \
527a5ec9 922 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
9f3a0d09 923 iter != NULL; \
527a5ec9 924 iter = mem_cgroup_iter(NULL, iter, NULL))
14067bb3 925
7c5f64f8
VD
926/**
927 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
928 * @memcg: hierarchy root
929 * @fn: function to call for each task
930 * @arg: argument passed to @fn
931 *
932 * This function iterates over tasks attached to @memcg or to any of its
933 * descendants and calls @fn for each task. If @fn returns a non-zero
934 * value, the function breaks the iteration loop and returns the value.
935 * Otherwise, it will iterate over all tasks and return 0.
936 *
937 * This function must not be called for the root memory cgroup.
938 */
939int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
940 int (*fn)(struct task_struct *, void *), void *arg)
941{
942 struct mem_cgroup *iter;
943 int ret = 0;
944
945 BUG_ON(memcg == root_mem_cgroup);
946
947 for_each_mem_cgroup_tree(iter, memcg) {
948 struct css_task_iter it;
949 struct task_struct *task;
950
951 css_task_iter_start(&iter->css, &it);
952 while (!ret && (task = css_task_iter_next(&it)))
953 ret = fn(task, arg);
954 css_task_iter_end(&it);
955 if (ret) {
956 mem_cgroup_iter_break(memcg, iter);
957 break;
958 }
959 }
960 return ret;
961}
962
925b7673 963/**
dfe0e773 964 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
925b7673 965 * @page: the page
fa9add64 966 * @zone: zone of the page
dfe0e773
JW
967 *
968 * This function is only safe when following the LRU page isolation
969 * and putback protocol: the LRU lock must be held, and the page must
970 * either be PageLRU() or the caller must have isolated/allocated it.
925b7673 971 */
599d0c95 972struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
08e552c6 973{
ef8f2327 974 struct mem_cgroup_per_node *mz;
925b7673 975 struct mem_cgroup *memcg;
bea8c150 976 struct lruvec *lruvec;
6d12e2d8 977
bea8c150 978 if (mem_cgroup_disabled()) {
599d0c95 979 lruvec = &pgdat->lruvec;
bea8c150
HD
980 goto out;
981 }
925b7673 982
1306a85a 983 memcg = page->mem_cgroup;
7512102c 984 /*
dfe0e773 985 * Swapcache readahead pages are added to the LRU - and
29833315 986 * possibly migrated - before they are charged.
7512102c 987 */
29833315
JW
988 if (!memcg)
989 memcg = root_mem_cgroup;
7512102c 990
ef8f2327 991 mz = mem_cgroup_page_nodeinfo(memcg, page);
bea8c150
HD
992 lruvec = &mz->lruvec;
993out:
994 /*
995 * Since a node can be onlined after the mem_cgroup was created,
996 * we have to be prepared to initialize lruvec->zone here;
997 * and if offlined then reonlined, we need to reinitialize it.
998 */
599d0c95
MG
999 if (unlikely(lruvec->pgdat != pgdat))
1000 lruvec->pgdat = pgdat;
bea8c150 1001 return lruvec;
08e552c6 1002}
b69408e8 1003
925b7673 1004/**
fa9add64
HD
1005 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1006 * @lruvec: mem_cgroup per zone lru vector
1007 * @lru: index of lru list the page is sitting on
b4536f0c 1008 * @zid: zone id of the accounted pages
fa9add64 1009 * @nr_pages: positive when adding or negative when removing
925b7673 1010 *
ca707239
HD
1011 * This function must be called under lru_lock, just before a page is added
1012 * to or just after a page is removed from an lru list (that ordering being
1013 * so as to allow it to check that lru_size 0 is consistent with list_empty).
3f58a829 1014 */
fa9add64 1015void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 1016 int zid, int nr_pages)
3f58a829 1017{
ef8f2327 1018 struct mem_cgroup_per_node *mz;
fa9add64 1019 unsigned long *lru_size;
ca707239 1020 long size;
3f58a829
MK
1021
1022 if (mem_cgroup_disabled())
1023 return;
1024
ef8f2327 1025 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c 1026 lru_size = &mz->lru_zone_size[zid][lru];
ca707239
HD
1027
1028 if (nr_pages < 0)
1029 *lru_size += nr_pages;
1030
1031 size = *lru_size;
b4536f0c
MH
1032 if (WARN_ONCE(size < 0,
1033 "%s(%p, %d, %d): lru_size %ld\n",
1034 __func__, lruvec, lru, nr_pages, size)) {
ca707239
HD
1035 VM_BUG_ON(1);
1036 *lru_size = 0;
1037 }
1038
1039 if (nr_pages > 0)
1040 *lru_size += nr_pages;
08e552c6 1041}
544122e5 1042
2314b42d 1043bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
c3ac9a8a 1044{
2314b42d 1045 struct mem_cgroup *task_memcg;
158e0a2d 1046 struct task_struct *p;
ffbdccf5 1047 bool ret;
4c4a2214 1048
158e0a2d 1049 p = find_lock_task_mm(task);
de077d22 1050 if (p) {
2314b42d 1051 task_memcg = get_mem_cgroup_from_mm(p->mm);
de077d22
DR
1052 task_unlock(p);
1053 } else {
1054 /*
1055 * All threads may have already detached their mm's, but the oom
1056 * killer still needs to detect if they have already been oom
1057 * killed to prevent needlessly killing additional tasks.
1058 */
ffbdccf5 1059 rcu_read_lock();
2314b42d
JW
1060 task_memcg = mem_cgroup_from_task(task);
1061 css_get(&task_memcg->css);
ffbdccf5 1062 rcu_read_unlock();
de077d22 1063 }
2314b42d
JW
1064 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1065 css_put(&task_memcg->css);
4c4a2214
DR
1066 return ret;
1067}
1068
19942822 1069/**
9d11ea9f 1070 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1071 * @memcg: the memory cgroup
19942822 1072 *
9d11ea9f 1073 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1074 * pages.
19942822 1075 */
c0ff4b85 1076static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1077{
3e32cb2e
JW
1078 unsigned long margin = 0;
1079 unsigned long count;
1080 unsigned long limit;
9d11ea9f 1081
3e32cb2e 1082 count = page_counter_read(&memcg->memory);
4db0c3c2 1083 limit = READ_ONCE(memcg->memory.limit);
3e32cb2e
JW
1084 if (count < limit)
1085 margin = limit - count;
1086
7941d214 1087 if (do_memsw_account()) {
3e32cb2e 1088 count = page_counter_read(&memcg->memsw);
4db0c3c2 1089 limit = READ_ONCE(memcg->memsw.limit);
3e32cb2e
JW
1090 if (count <= limit)
1091 margin = min(margin, limit - count);
cbedbac3
LR
1092 else
1093 margin = 0;
3e32cb2e
JW
1094 }
1095
1096 return margin;
19942822
JW
1097}
1098
32047e2a 1099/*
bdcbb659 1100 * A routine for checking "mem" is under move_account() or not.
32047e2a 1101 *
bdcbb659
QH
1102 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1103 * moving cgroups. This is for waiting at high-memory pressure
1104 * caused by "move".
32047e2a 1105 */
c0ff4b85 1106static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1107{
2bd9bb20
KH
1108 struct mem_cgroup *from;
1109 struct mem_cgroup *to;
4b534334 1110 bool ret = false;
2bd9bb20
KH
1111 /*
1112 * Unlike task_move routines, we access mc.to, mc.from not under
1113 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1114 */
1115 spin_lock(&mc.lock);
1116 from = mc.from;
1117 to = mc.to;
1118 if (!from)
1119 goto unlock;
3e92041d 1120
2314b42d
JW
1121 ret = mem_cgroup_is_descendant(from, memcg) ||
1122 mem_cgroup_is_descendant(to, memcg);
2bd9bb20
KH
1123unlock:
1124 spin_unlock(&mc.lock);
4b534334
KH
1125 return ret;
1126}
1127
c0ff4b85 1128static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1129{
1130 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1131 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1132 DEFINE_WAIT(wait);
1133 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1134 /* moving charge context might have finished. */
1135 if (mc.moving_task)
1136 schedule();
1137 finish_wait(&mc.waitq, &wait);
1138 return true;
1139 }
1140 }
1141 return false;
1142}
1143
58cf188e 1144#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1145/**
58cf188e 1146 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
e222432b
BS
1147 * @memcg: The memory cgroup that went over limit
1148 * @p: Task that is going to be killed
1149 *
1150 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1151 * enabled
1152 */
1153void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1154{
58cf188e
SZ
1155 struct mem_cgroup *iter;
1156 unsigned int i;
e222432b 1157
e222432b
BS
1158 rcu_read_lock();
1159
2415b9f5
BV
1160 if (p) {
1161 pr_info("Task in ");
1162 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1163 pr_cont(" killed as a result of limit of ");
1164 } else {
1165 pr_info("Memory limit reached of cgroup ");
1166 }
1167
e61734c5 1168 pr_cont_cgroup_path(memcg->css.cgroup);
0346dadb 1169 pr_cont("\n");
e222432b 1170
e222432b
BS
1171 rcu_read_unlock();
1172
3e32cb2e
JW
1173 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1174 K((u64)page_counter_read(&memcg->memory)),
1175 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1176 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1177 K((u64)page_counter_read(&memcg->memsw)),
1178 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1179 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1180 K((u64)page_counter_read(&memcg->kmem)),
1181 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
58cf188e
SZ
1182
1183 for_each_mem_cgroup_tree(iter, memcg) {
e61734c5
TH
1184 pr_info("Memory cgroup stats for ");
1185 pr_cont_cgroup_path(iter->css.cgroup);
58cf188e
SZ
1186 pr_cont(":");
1187
1188 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
37e84351 1189 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
58cf188e 1190 continue;
484ebb3b 1191 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
58cf188e
SZ
1192 K(mem_cgroup_read_stat(iter, i)));
1193 }
1194
1195 for (i = 0; i < NR_LRU_LISTS; i++)
1196 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1197 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1198
1199 pr_cont("\n");
1200 }
e222432b
BS
1201}
1202
81d39c20
KH
1203/*
1204 * This function returns the number of memcg under hierarchy tree. Returns
1205 * 1(self count) if no children.
1206 */
c0ff4b85 1207static int mem_cgroup_count_children(struct mem_cgroup *memcg)
81d39c20
KH
1208{
1209 int num = 0;
7d74b06f
KH
1210 struct mem_cgroup *iter;
1211
c0ff4b85 1212 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 1213 num++;
81d39c20
KH
1214 return num;
1215}
1216
a63d83f4
DR
1217/*
1218 * Return the memory (and swap, if configured) limit for a memcg.
1219 */
7c5f64f8 1220unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4 1221{
3e32cb2e 1222 unsigned long limit;
f3e8eb70 1223
3e32cb2e 1224 limit = memcg->memory.limit;
9a5a8f19 1225 if (mem_cgroup_swappiness(memcg)) {
3e32cb2e 1226 unsigned long memsw_limit;
37e84351 1227 unsigned long swap_limit;
9a5a8f19 1228
3e32cb2e 1229 memsw_limit = memcg->memsw.limit;
37e84351
VD
1230 swap_limit = memcg->swap.limit;
1231 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1232 limit = min(limit + swap_limit, memsw_limit);
9a5a8f19 1233 }
9a5a8f19 1234 return limit;
a63d83f4
DR
1235}
1236
b6e6edcf 1237static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
19965460 1238 int order)
9cbb78bb 1239{
6e0fc46d
DR
1240 struct oom_control oc = {
1241 .zonelist = NULL,
1242 .nodemask = NULL,
2a966b77 1243 .memcg = memcg,
6e0fc46d
DR
1244 .gfp_mask = gfp_mask,
1245 .order = order,
6e0fc46d 1246 };
7c5f64f8 1247 bool ret;
9cbb78bb 1248
dc56401f 1249 mutex_lock(&oom_lock);
7c5f64f8 1250 ret = out_of_memory(&oc);
dc56401f 1251 mutex_unlock(&oom_lock);
7c5f64f8 1252 return ret;
9cbb78bb
DR
1253}
1254
ae6e71d3
MC
1255#if MAX_NUMNODES > 1
1256
4d0c066d
KH
1257/**
1258 * test_mem_cgroup_node_reclaimable
dad7557e 1259 * @memcg: the target memcg
4d0c066d
KH
1260 * @nid: the node ID to be checked.
1261 * @noswap : specify true here if the user wants flle only information.
1262 *
1263 * This function returns whether the specified memcg contains any
1264 * reclaimable pages on a node. Returns true if there are any reclaimable
1265 * pages in the node.
1266 */
c0ff4b85 1267static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
4d0c066d
KH
1268 int nid, bool noswap)
1269{
c0ff4b85 1270 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
4d0c066d
KH
1271 return true;
1272 if (noswap || !total_swap_pages)
1273 return false;
c0ff4b85 1274 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
4d0c066d
KH
1275 return true;
1276 return false;
1277
1278}
889976db
YH
1279
1280/*
1281 * Always updating the nodemask is not very good - even if we have an empty
1282 * list or the wrong list here, we can start from some node and traverse all
1283 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1284 *
1285 */
c0ff4b85 1286static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
889976db
YH
1287{
1288 int nid;
453a9bf3
KH
1289 /*
1290 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1291 * pagein/pageout changes since the last update.
1292 */
c0ff4b85 1293 if (!atomic_read(&memcg->numainfo_events))
453a9bf3 1294 return;
c0ff4b85 1295 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
889976db
YH
1296 return;
1297
889976db 1298 /* make a nodemask where this memcg uses memory from */
31aaea4a 1299 memcg->scan_nodes = node_states[N_MEMORY];
889976db 1300
31aaea4a 1301 for_each_node_mask(nid, node_states[N_MEMORY]) {
889976db 1302
c0ff4b85
R
1303 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1304 node_clear(nid, memcg->scan_nodes);
889976db 1305 }
453a9bf3 1306
c0ff4b85
R
1307 atomic_set(&memcg->numainfo_events, 0);
1308 atomic_set(&memcg->numainfo_updating, 0);
889976db
YH
1309}
1310
1311/*
1312 * Selecting a node where we start reclaim from. Because what we need is just
1313 * reducing usage counter, start from anywhere is O,K. Considering
1314 * memory reclaim from current node, there are pros. and cons.
1315 *
1316 * Freeing memory from current node means freeing memory from a node which
1317 * we'll use or we've used. So, it may make LRU bad. And if several threads
1318 * hit limits, it will see a contention on a node. But freeing from remote
1319 * node means more costs for memory reclaim because of memory latency.
1320 *
1321 * Now, we use round-robin. Better algorithm is welcomed.
1322 */
c0ff4b85 1323int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1324{
1325 int node;
1326
c0ff4b85
R
1327 mem_cgroup_may_update_nodemask(memcg);
1328 node = memcg->last_scanned_node;
889976db 1329
0edaf86c 1330 node = next_node_in(node, memcg->scan_nodes);
889976db 1331 /*
fda3d69b
MH
1332 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1333 * last time it really checked all the LRUs due to rate limiting.
1334 * Fallback to the current node in that case for simplicity.
889976db
YH
1335 */
1336 if (unlikely(node == MAX_NUMNODES))
1337 node = numa_node_id();
1338
c0ff4b85 1339 memcg->last_scanned_node = node;
889976db
YH
1340 return node;
1341}
889976db 1342#else
c0ff4b85 1343int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1344{
1345 return 0;
1346}
1347#endif
1348
0608f43d 1349static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
ef8f2327 1350 pg_data_t *pgdat,
0608f43d
AM
1351 gfp_t gfp_mask,
1352 unsigned long *total_scanned)
1353{
1354 struct mem_cgroup *victim = NULL;
1355 int total = 0;
1356 int loop = 0;
1357 unsigned long excess;
1358 unsigned long nr_scanned;
1359 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 1360 .pgdat = pgdat,
0608f43d
AM
1361 .priority = 0,
1362 };
1363
3e32cb2e 1364 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1365
1366 while (1) {
1367 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1368 if (!victim) {
1369 loop++;
1370 if (loop >= 2) {
1371 /*
1372 * If we have not been able to reclaim
1373 * anything, it might because there are
1374 * no reclaimable pages under this hierarchy
1375 */
1376 if (!total)
1377 break;
1378 /*
1379 * We want to do more targeted reclaim.
1380 * excess >> 2 is not to excessive so as to
1381 * reclaim too much, nor too less that we keep
1382 * coming back to reclaim from this cgroup
1383 */
1384 if (total >= (excess >> 2) ||
1385 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1386 break;
1387 }
1388 continue;
1389 }
a9dd0a83 1390 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
ef8f2327 1391 pgdat, &nr_scanned);
0608f43d 1392 *total_scanned += nr_scanned;
3e32cb2e 1393 if (!soft_limit_excess(root_memcg))
0608f43d 1394 break;
6d61ef40 1395 }
0608f43d
AM
1396 mem_cgroup_iter_break(root_memcg, victim);
1397 return total;
6d61ef40
BS
1398}
1399
0056f4e6
JW
1400#ifdef CONFIG_LOCKDEP
1401static struct lockdep_map memcg_oom_lock_dep_map = {
1402 .name = "memcg_oom_lock",
1403};
1404#endif
1405
fb2a6fc5
JW
1406static DEFINE_SPINLOCK(memcg_oom_lock);
1407
867578cb
KH
1408/*
1409 * Check OOM-Killer is already running under our hierarchy.
1410 * If someone is running, return false.
1411 */
fb2a6fc5 1412static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 1413{
79dfdacc 1414 struct mem_cgroup *iter, *failed = NULL;
a636b327 1415
fb2a6fc5
JW
1416 spin_lock(&memcg_oom_lock);
1417
9f3a0d09 1418 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 1419 if (iter->oom_lock) {
79dfdacc
MH
1420 /*
1421 * this subtree of our hierarchy is already locked
1422 * so we cannot give a lock.
1423 */
79dfdacc 1424 failed = iter;
9f3a0d09
JW
1425 mem_cgroup_iter_break(memcg, iter);
1426 break;
23751be0
JW
1427 } else
1428 iter->oom_lock = true;
7d74b06f 1429 }
867578cb 1430
fb2a6fc5
JW
1431 if (failed) {
1432 /*
1433 * OK, we failed to lock the whole subtree so we have
1434 * to clean up what we set up to the failing subtree
1435 */
1436 for_each_mem_cgroup_tree(iter, memcg) {
1437 if (iter == failed) {
1438 mem_cgroup_iter_break(memcg, iter);
1439 break;
1440 }
1441 iter->oom_lock = false;
79dfdacc 1442 }
0056f4e6
JW
1443 } else
1444 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
1445
1446 spin_unlock(&memcg_oom_lock);
1447
1448 return !failed;
a636b327 1449}
0b7f569e 1450
fb2a6fc5 1451static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 1452{
7d74b06f
KH
1453 struct mem_cgroup *iter;
1454
fb2a6fc5 1455 spin_lock(&memcg_oom_lock);
0056f4e6 1456 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
c0ff4b85 1457 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1458 iter->oom_lock = false;
fb2a6fc5 1459 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1460}
1461
c0ff4b85 1462static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1463{
1464 struct mem_cgroup *iter;
1465
c2b42d3c 1466 spin_lock(&memcg_oom_lock);
c0ff4b85 1467 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1468 iter->under_oom++;
1469 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1470}
1471
c0ff4b85 1472static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1473{
1474 struct mem_cgroup *iter;
1475
867578cb
KH
1476 /*
1477 * When a new child is created while the hierarchy is under oom,
c2b42d3c 1478 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
867578cb 1479 */
c2b42d3c 1480 spin_lock(&memcg_oom_lock);
c0ff4b85 1481 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1482 if (iter->under_oom > 0)
1483 iter->under_oom--;
1484 spin_unlock(&memcg_oom_lock);
0b7f569e
KH
1485}
1486
867578cb
KH
1487static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1488
dc98df5a 1489struct oom_wait_info {
d79154bb 1490 struct mem_cgroup *memcg;
dc98df5a
KH
1491 wait_queue_t wait;
1492};
1493
1494static int memcg_oom_wake_function(wait_queue_t *wait,
1495 unsigned mode, int sync, void *arg)
1496{
d79154bb
HD
1497 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1498 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
1499 struct oom_wait_info *oom_wait_info;
1500
1501 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 1502 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 1503
2314b42d
JW
1504 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1505 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
dc98df5a 1506 return 0;
dc98df5a
KH
1507 return autoremove_wake_function(wait, mode, sync, arg);
1508}
1509
c0ff4b85 1510static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 1511{
c2b42d3c
TH
1512 /*
1513 * For the following lockless ->under_oom test, the only required
1514 * guarantee is that it must see the state asserted by an OOM when
1515 * this function is called as a result of userland actions
1516 * triggered by the notification of the OOM. This is trivially
1517 * achieved by invoking mem_cgroup_mark_under_oom() before
1518 * triggering notification.
1519 */
1520 if (memcg && memcg->under_oom)
f4b90b70 1521 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
3c11ecf4
KH
1522}
1523
3812c8c8 1524static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 1525{
d0db7afa 1526 if (!current->memcg_may_oom)
3812c8c8 1527 return;
867578cb 1528 /*
49426420
JW
1529 * We are in the middle of the charge context here, so we
1530 * don't want to block when potentially sitting on a callstack
1531 * that holds all kinds of filesystem and mm locks.
1532 *
1533 * Also, the caller may handle a failed allocation gracefully
1534 * (like optional page cache readahead) and so an OOM killer
1535 * invocation might not even be necessary.
1536 *
1537 * That's why we don't do anything here except remember the
1538 * OOM context and then deal with it at the end of the page
1539 * fault when the stack is unwound, the locks are released,
1540 * and when we know whether the fault was overall successful.
867578cb 1541 */
49426420 1542 css_get(&memcg->css);
626ebc41
TH
1543 current->memcg_in_oom = memcg;
1544 current->memcg_oom_gfp_mask = mask;
1545 current->memcg_oom_order = order;
3812c8c8
JW
1546}
1547
1548/**
1549 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 1550 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 1551 *
49426420
JW
1552 * This has to be called at the end of a page fault if the memcg OOM
1553 * handler was enabled.
3812c8c8 1554 *
49426420 1555 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
1556 * sleep on a waitqueue until the userspace task resolves the
1557 * situation. Sleeping directly in the charge context with all kinds
1558 * of locks held is not a good idea, instead we remember an OOM state
1559 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 1560 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
1561 *
1562 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 1563 * completed, %false otherwise.
3812c8c8 1564 */
49426420 1565bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 1566{
626ebc41 1567 struct mem_cgroup *memcg = current->memcg_in_oom;
3812c8c8 1568 struct oom_wait_info owait;
49426420 1569 bool locked;
3812c8c8
JW
1570
1571 /* OOM is global, do not handle */
3812c8c8 1572 if (!memcg)
49426420 1573 return false;
3812c8c8 1574
7c5f64f8 1575 if (!handle)
49426420 1576 goto cleanup;
3812c8c8
JW
1577
1578 owait.memcg = memcg;
1579 owait.wait.flags = 0;
1580 owait.wait.func = memcg_oom_wake_function;
1581 owait.wait.private = current;
1582 INIT_LIST_HEAD(&owait.wait.task_list);
867578cb 1583
3812c8c8 1584 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
1585 mem_cgroup_mark_under_oom(memcg);
1586
1587 locked = mem_cgroup_oom_trylock(memcg);
1588
1589 if (locked)
1590 mem_cgroup_oom_notify(memcg);
1591
1592 if (locked && !memcg->oom_kill_disable) {
1593 mem_cgroup_unmark_under_oom(memcg);
1594 finish_wait(&memcg_oom_waitq, &owait.wait);
626ebc41
TH
1595 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1596 current->memcg_oom_order);
49426420 1597 } else {
3812c8c8 1598 schedule();
49426420
JW
1599 mem_cgroup_unmark_under_oom(memcg);
1600 finish_wait(&memcg_oom_waitq, &owait.wait);
1601 }
1602
1603 if (locked) {
fb2a6fc5
JW
1604 mem_cgroup_oom_unlock(memcg);
1605 /*
1606 * There is no guarantee that an OOM-lock contender
1607 * sees the wakeups triggered by the OOM kill
1608 * uncharges. Wake any sleepers explicitely.
1609 */
1610 memcg_oom_recover(memcg);
1611 }
49426420 1612cleanup:
626ebc41 1613 current->memcg_in_oom = NULL;
3812c8c8 1614 css_put(&memcg->css);
867578cb 1615 return true;
0b7f569e
KH
1616}
1617
d7365e78 1618/**
81f8c3a4
JW
1619 * lock_page_memcg - lock a page->mem_cgroup binding
1620 * @page: the page
32047e2a 1621 *
81f8c3a4
JW
1622 * This function protects unlocked LRU pages from being moved to
1623 * another cgroup and stabilizes their page->mem_cgroup binding.
d69b042f 1624 */
62cccb8c 1625void lock_page_memcg(struct page *page)
89c06bd5
KH
1626{
1627 struct mem_cgroup *memcg;
6de22619 1628 unsigned long flags;
89c06bd5 1629
6de22619
JW
1630 /*
1631 * The RCU lock is held throughout the transaction. The fast
1632 * path can get away without acquiring the memcg->move_lock
1633 * because page moving starts with an RCU grace period.
6de22619 1634 */
d7365e78
JW
1635 rcu_read_lock();
1636
1637 if (mem_cgroup_disabled())
62cccb8c 1638 return;
89c06bd5 1639again:
1306a85a 1640 memcg = page->mem_cgroup;
29833315 1641 if (unlikely(!memcg))
62cccb8c 1642 return;
d7365e78 1643
bdcbb659 1644 if (atomic_read(&memcg->moving_account) <= 0)
62cccb8c 1645 return;
89c06bd5 1646
6de22619 1647 spin_lock_irqsave(&memcg->move_lock, flags);
1306a85a 1648 if (memcg != page->mem_cgroup) {
6de22619 1649 spin_unlock_irqrestore(&memcg->move_lock, flags);
89c06bd5
KH
1650 goto again;
1651 }
6de22619
JW
1652
1653 /*
1654 * When charge migration first begins, we can have locked and
1655 * unlocked page stat updates happening concurrently. Track
81f8c3a4 1656 * the task who has the lock for unlock_page_memcg().
6de22619
JW
1657 */
1658 memcg->move_lock_task = current;
1659 memcg->move_lock_flags = flags;
d7365e78 1660
62cccb8c 1661 return;
89c06bd5 1662}
81f8c3a4 1663EXPORT_SYMBOL(lock_page_memcg);
89c06bd5 1664
d7365e78 1665/**
81f8c3a4 1666 * unlock_page_memcg - unlock a page->mem_cgroup binding
62cccb8c 1667 * @page: the page
d7365e78 1668 */
62cccb8c 1669void unlock_page_memcg(struct page *page)
89c06bd5 1670{
62cccb8c
JW
1671 struct mem_cgroup *memcg = page->mem_cgroup;
1672
6de22619
JW
1673 if (memcg && memcg->move_lock_task == current) {
1674 unsigned long flags = memcg->move_lock_flags;
1675
1676 memcg->move_lock_task = NULL;
1677 memcg->move_lock_flags = 0;
1678
1679 spin_unlock_irqrestore(&memcg->move_lock, flags);
1680 }
89c06bd5 1681
d7365e78 1682 rcu_read_unlock();
89c06bd5 1683}
81f8c3a4 1684EXPORT_SYMBOL(unlock_page_memcg);
89c06bd5 1685
cdec2e42
KH
1686/*
1687 * size of first charge trial. "32" comes from vmscan.c's magic value.
1688 * TODO: maybe necessary to use big numbers in big irons.
1689 */
7ec99d62 1690#define CHARGE_BATCH 32U
cdec2e42
KH
1691struct memcg_stock_pcp {
1692 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 1693 unsigned int nr_pages;
cdec2e42 1694 struct work_struct work;
26fe6168 1695 unsigned long flags;
a0db00fc 1696#define FLUSHING_CACHED_CHARGE 0
cdec2e42
KH
1697};
1698static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad6 1699static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 1700
a0956d54
SS
1701/**
1702 * consume_stock: Try to consume stocked charge on this cpu.
1703 * @memcg: memcg to consume from.
1704 * @nr_pages: how many pages to charge.
1705 *
1706 * The charges will only happen if @memcg matches the current cpu's memcg
1707 * stock, and at least @nr_pages are available in that stock. Failure to
1708 * service an allocation will refill the stock.
1709 *
1710 * returns true if successful, false otherwise.
cdec2e42 1711 */
a0956d54 1712static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
1713{
1714 struct memcg_stock_pcp *stock;
db2ba40c 1715 unsigned long flags;
3e32cb2e 1716 bool ret = false;
cdec2e42 1717
a0956d54 1718 if (nr_pages > CHARGE_BATCH)
3e32cb2e 1719 return ret;
a0956d54 1720
db2ba40c
JW
1721 local_irq_save(flags);
1722
1723 stock = this_cpu_ptr(&memcg_stock);
3e32cb2e 1724 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
a0956d54 1725 stock->nr_pages -= nr_pages;
3e32cb2e
JW
1726 ret = true;
1727 }
db2ba40c
JW
1728
1729 local_irq_restore(flags);
1730
cdec2e42
KH
1731 return ret;
1732}
1733
1734/*
3e32cb2e 1735 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
1736 */
1737static void drain_stock(struct memcg_stock_pcp *stock)
1738{
1739 struct mem_cgroup *old = stock->cached;
1740
11c9ea4e 1741 if (stock->nr_pages) {
3e32cb2e 1742 page_counter_uncharge(&old->memory, stock->nr_pages);
7941d214 1743 if (do_memsw_account())
3e32cb2e 1744 page_counter_uncharge(&old->memsw, stock->nr_pages);
e8ea14cc 1745 css_put_many(&old->css, stock->nr_pages);
11c9ea4e 1746 stock->nr_pages = 0;
cdec2e42
KH
1747 }
1748 stock->cached = NULL;
cdec2e42
KH
1749}
1750
cdec2e42
KH
1751static void drain_local_stock(struct work_struct *dummy)
1752{
db2ba40c
JW
1753 struct memcg_stock_pcp *stock;
1754 unsigned long flags;
1755
1756 local_irq_save(flags);
1757
1758 stock = this_cpu_ptr(&memcg_stock);
cdec2e42 1759 drain_stock(stock);
26fe6168 1760 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
db2ba40c
JW
1761
1762 local_irq_restore(flags);
cdec2e42
KH
1763}
1764
1765/*
3e32cb2e 1766 * Cache charges(val) to local per_cpu area.
320cc51d 1767 * This will be consumed by consume_stock() function, later.
cdec2e42 1768 */
c0ff4b85 1769static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42 1770{
db2ba40c
JW
1771 struct memcg_stock_pcp *stock;
1772 unsigned long flags;
1773
1774 local_irq_save(flags);
cdec2e42 1775
db2ba40c 1776 stock = this_cpu_ptr(&memcg_stock);
c0ff4b85 1777 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 1778 drain_stock(stock);
c0ff4b85 1779 stock->cached = memcg;
cdec2e42 1780 }
11c9ea4e 1781 stock->nr_pages += nr_pages;
db2ba40c
JW
1782
1783 local_irq_restore(flags);
cdec2e42
KH
1784}
1785
1786/*
c0ff4b85 1787 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 1788 * of the hierarchy under it.
cdec2e42 1789 */
6d3d6aa2 1790static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 1791{
26fe6168 1792 int cpu, curcpu;
d38144b7 1793
6d3d6aa2
JW
1794 /* If someone's already draining, avoid adding running more workers. */
1795 if (!mutex_trylock(&percpu_charge_mutex))
1796 return;
cdec2e42 1797 /* Notify other cpus that system-wide "drain" is running */
cdec2e42 1798 get_online_cpus();
5af12d0e 1799 curcpu = get_cpu();
cdec2e42
KH
1800 for_each_online_cpu(cpu) {
1801 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 1802 struct mem_cgroup *memcg;
26fe6168 1803
c0ff4b85
R
1804 memcg = stock->cached;
1805 if (!memcg || !stock->nr_pages)
26fe6168 1806 continue;
2314b42d 1807 if (!mem_cgroup_is_descendant(memcg, root_memcg))
3e92041d 1808 continue;
d1a05b69
MH
1809 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1810 if (cpu == curcpu)
1811 drain_local_stock(&stock->work);
1812 else
1813 schedule_work_on(cpu, &stock->work);
1814 }
cdec2e42 1815 }
5af12d0e 1816 put_cpu();
f894ffa8 1817 put_online_cpus();
9f50fad6 1818 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
1819}
1820
308167fc 1821static int memcg_hotplug_cpu_dead(unsigned int cpu)
cdec2e42 1822{
cdec2e42
KH
1823 struct memcg_stock_pcp *stock;
1824
cdec2e42
KH
1825 stock = &per_cpu(memcg_stock, cpu);
1826 drain_stock(stock);
308167fc 1827 return 0;
cdec2e42
KH
1828}
1829
f7e1cb6e
JW
1830static void reclaim_high(struct mem_cgroup *memcg,
1831 unsigned int nr_pages,
1832 gfp_t gfp_mask)
1833{
1834 do {
1835 if (page_counter_read(&memcg->memory) <= memcg->high)
1836 continue;
1837 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1838 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1839 } while ((memcg = parent_mem_cgroup(memcg)));
1840}
1841
1842static void high_work_func(struct work_struct *work)
1843{
1844 struct mem_cgroup *memcg;
1845
1846 memcg = container_of(work, struct mem_cgroup, high_work);
1847 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1848}
1849
b23afb93
TH
1850/*
1851 * Scheduled by try_charge() to be executed from the userland return path
1852 * and reclaims memory over the high limit.
1853 */
1854void mem_cgroup_handle_over_high(void)
1855{
1856 unsigned int nr_pages = current->memcg_nr_pages_over_high;
f7e1cb6e 1857 struct mem_cgroup *memcg;
b23afb93
TH
1858
1859 if (likely(!nr_pages))
1860 return;
1861
f7e1cb6e
JW
1862 memcg = get_mem_cgroup_from_mm(current->mm);
1863 reclaim_high(memcg, nr_pages, GFP_KERNEL);
b23afb93
TH
1864 css_put(&memcg->css);
1865 current->memcg_nr_pages_over_high = 0;
1866}
1867
00501b53
JW
1868static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1869 unsigned int nr_pages)
8a9f3ccd 1870{
7ec99d62 1871 unsigned int batch = max(CHARGE_BATCH, nr_pages);
9b130619 1872 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
6539cc05 1873 struct mem_cgroup *mem_over_limit;
3e32cb2e 1874 struct page_counter *counter;
6539cc05 1875 unsigned long nr_reclaimed;
b70a2a21
JW
1876 bool may_swap = true;
1877 bool drained = false;
a636b327 1878
ce00a967 1879 if (mem_cgroup_is_root(memcg))
10d53c74 1880 return 0;
6539cc05 1881retry:
b6b6cc72 1882 if (consume_stock(memcg, nr_pages))
10d53c74 1883 return 0;
8a9f3ccd 1884
7941d214 1885 if (!do_memsw_account() ||
6071ca52
JW
1886 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1887 if (page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 1888 goto done_restock;
7941d214 1889 if (do_memsw_account())
3e32cb2e
JW
1890 page_counter_uncharge(&memcg->memsw, batch);
1891 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 1892 } else {
3e32cb2e 1893 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
b70a2a21 1894 may_swap = false;
3fbe7244 1895 }
7a81b88c 1896
6539cc05
JW
1897 if (batch > nr_pages) {
1898 batch = nr_pages;
1899 goto retry;
1900 }
6d61ef40 1901
06b078fc
JW
1902 /*
1903 * Unlike in global OOM situations, memcg is not in a physical
1904 * memory shortage. Allow dying and OOM-killed tasks to
1905 * bypass the last charges so that they can exit quickly and
1906 * free their memory.
1907 */
1908 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1909 fatal_signal_pending(current) ||
1910 current->flags & PF_EXITING))
10d53c74 1911 goto force;
06b078fc 1912
89a28483
JW
1913 /*
1914 * Prevent unbounded recursion when reclaim operations need to
1915 * allocate memory. This might exceed the limits temporarily,
1916 * but we prefer facilitating memory reclaim and getting back
1917 * under the limit over triggering OOM kills in these cases.
1918 */
1919 if (unlikely(current->flags & PF_MEMALLOC))
1920 goto force;
1921
06b078fc
JW
1922 if (unlikely(task_in_memcg_oom(current)))
1923 goto nomem;
1924
d0164adc 1925 if (!gfpflags_allow_blocking(gfp_mask))
6539cc05 1926 goto nomem;
4b534334 1927
241994ed
JW
1928 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1929
b70a2a21
JW
1930 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1931 gfp_mask, may_swap);
6539cc05 1932
61e02c74 1933 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 1934 goto retry;
28c34c29 1935
b70a2a21 1936 if (!drained) {
6d3d6aa2 1937 drain_all_stock(mem_over_limit);
b70a2a21
JW
1938 drained = true;
1939 goto retry;
1940 }
1941
28c34c29
JW
1942 if (gfp_mask & __GFP_NORETRY)
1943 goto nomem;
6539cc05
JW
1944 /*
1945 * Even though the limit is exceeded at this point, reclaim
1946 * may have been able to free some pages. Retry the charge
1947 * before killing the task.
1948 *
1949 * Only for regular pages, though: huge pages are rather
1950 * unlikely to succeed so close to the limit, and we fall back
1951 * to regular pages anyway in case of failure.
1952 */
61e02c74 1953 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
1954 goto retry;
1955 /*
1956 * At task move, charge accounts can be doubly counted. So, it's
1957 * better to wait until the end of task_move if something is going on.
1958 */
1959 if (mem_cgroup_wait_acct_move(mem_over_limit))
1960 goto retry;
1961
9b130619
JW
1962 if (nr_retries--)
1963 goto retry;
1964
06b078fc 1965 if (gfp_mask & __GFP_NOFAIL)
10d53c74 1966 goto force;
06b078fc 1967
6539cc05 1968 if (fatal_signal_pending(current))
10d53c74 1969 goto force;
6539cc05 1970
241994ed
JW
1971 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
1972
3608de07
JM
1973 mem_cgroup_oom(mem_over_limit, gfp_mask,
1974 get_order(nr_pages * PAGE_SIZE));
7a81b88c 1975nomem:
6d1fdc48 1976 if (!(gfp_mask & __GFP_NOFAIL))
3168ecbe 1977 return -ENOMEM;
10d53c74
TH
1978force:
1979 /*
1980 * The allocation either can't fail or will lead to more memory
1981 * being freed very soon. Allow memory usage go over the limit
1982 * temporarily by force charging it.
1983 */
1984 page_counter_charge(&memcg->memory, nr_pages);
7941d214 1985 if (do_memsw_account())
10d53c74
TH
1986 page_counter_charge(&memcg->memsw, nr_pages);
1987 css_get_many(&memcg->css, nr_pages);
1988
1989 return 0;
6539cc05
JW
1990
1991done_restock:
e8ea14cc 1992 css_get_many(&memcg->css, batch);
6539cc05
JW
1993 if (batch > nr_pages)
1994 refill_stock(memcg, batch - nr_pages);
b23afb93 1995
241994ed 1996 /*
b23afb93
TH
1997 * If the hierarchy is above the normal consumption range, schedule
1998 * reclaim on returning to userland. We can perform reclaim here
71baba4b 1999 * if __GFP_RECLAIM but let's always punt for simplicity and so that
b23afb93
TH
2000 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2001 * not recorded as it most likely matches current's and won't
2002 * change in the meantime. As high limit is checked again before
2003 * reclaim, the cost of mismatch is negligible.
241994ed
JW
2004 */
2005 do {
b23afb93 2006 if (page_counter_read(&memcg->memory) > memcg->high) {
f7e1cb6e
JW
2007 /* Don't bother a random interrupted task */
2008 if (in_interrupt()) {
2009 schedule_work(&memcg->high_work);
2010 break;
2011 }
9516a18a 2012 current->memcg_nr_pages_over_high += batch;
b23afb93
TH
2013 set_notify_resume(current);
2014 break;
2015 }
241994ed 2016 } while ((memcg = parent_mem_cgroup(memcg)));
10d53c74
TH
2017
2018 return 0;
7a81b88c 2019}
8a9f3ccd 2020
00501b53 2021static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 2022{
ce00a967
JW
2023 if (mem_cgroup_is_root(memcg))
2024 return;
2025
3e32cb2e 2026 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 2027 if (do_memsw_account())
3e32cb2e 2028 page_counter_uncharge(&memcg->memsw, nr_pages);
ce00a967 2029
e8ea14cc 2030 css_put_many(&memcg->css, nr_pages);
d01dd17f
KH
2031}
2032
0a31bc97
JW
2033static void lock_page_lru(struct page *page, int *isolated)
2034{
2035 struct zone *zone = page_zone(page);
2036
a52633d8 2037 spin_lock_irq(zone_lru_lock(zone));
0a31bc97
JW
2038 if (PageLRU(page)) {
2039 struct lruvec *lruvec;
2040
599d0c95 2041 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
0a31bc97
JW
2042 ClearPageLRU(page);
2043 del_page_from_lru_list(page, lruvec, page_lru(page));
2044 *isolated = 1;
2045 } else
2046 *isolated = 0;
2047}
2048
2049static void unlock_page_lru(struct page *page, int isolated)
2050{
2051 struct zone *zone = page_zone(page);
2052
2053 if (isolated) {
2054 struct lruvec *lruvec;
2055
599d0c95 2056 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
0a31bc97
JW
2057 VM_BUG_ON_PAGE(PageLRU(page), page);
2058 SetPageLRU(page);
2059 add_page_to_lru_list(page, lruvec, page_lru(page));
2060 }
a52633d8 2061 spin_unlock_irq(zone_lru_lock(zone));
0a31bc97
JW
2062}
2063
00501b53 2064static void commit_charge(struct page *page, struct mem_cgroup *memcg,
6abb5a86 2065 bool lrucare)
7a81b88c 2066{
0a31bc97 2067 int isolated;
9ce70c02 2068
1306a85a 2069 VM_BUG_ON_PAGE(page->mem_cgroup, page);
9ce70c02
HD
2070
2071 /*
2072 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2073 * may already be on some other mem_cgroup's LRU. Take care of it.
2074 */
0a31bc97
JW
2075 if (lrucare)
2076 lock_page_lru(page, &isolated);
9ce70c02 2077
0a31bc97
JW
2078 /*
2079 * Nobody should be changing or seriously looking at
1306a85a 2080 * page->mem_cgroup at this point:
0a31bc97
JW
2081 *
2082 * - the page is uncharged
2083 *
2084 * - the page is off-LRU
2085 *
2086 * - an anonymous fault has exclusive page access, except for
2087 * a locked page table
2088 *
2089 * - a page cache insertion, a swapin fault, or a migration
2090 * have the page locked
2091 */
1306a85a 2092 page->mem_cgroup = memcg;
9ce70c02 2093
0a31bc97
JW
2094 if (lrucare)
2095 unlock_page_lru(page, isolated);
7a81b88c 2096}
66e1707b 2097
127424c8 2098#ifndef CONFIG_SLOB
f3bb3043 2099static int memcg_alloc_cache_id(void)
55007d84 2100{
f3bb3043
VD
2101 int id, size;
2102 int err;
2103
dbcf73e2 2104 id = ida_simple_get(&memcg_cache_ida,
f3bb3043
VD
2105 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2106 if (id < 0)
2107 return id;
55007d84 2108
dbcf73e2 2109 if (id < memcg_nr_cache_ids)
f3bb3043
VD
2110 return id;
2111
2112 /*
2113 * There's no space for the new id in memcg_caches arrays,
2114 * so we have to grow them.
2115 */
05257a1a 2116 down_write(&memcg_cache_ids_sem);
f3bb3043
VD
2117
2118 size = 2 * (id + 1);
55007d84
GC
2119 if (size < MEMCG_CACHES_MIN_SIZE)
2120 size = MEMCG_CACHES_MIN_SIZE;
2121 else if (size > MEMCG_CACHES_MAX_SIZE)
2122 size = MEMCG_CACHES_MAX_SIZE;
2123
f3bb3043 2124 err = memcg_update_all_caches(size);
60d3fd32
VD
2125 if (!err)
2126 err = memcg_update_all_list_lrus(size);
05257a1a
VD
2127 if (!err)
2128 memcg_nr_cache_ids = size;
2129
2130 up_write(&memcg_cache_ids_sem);
2131
f3bb3043 2132 if (err) {
dbcf73e2 2133 ida_simple_remove(&memcg_cache_ida, id);
f3bb3043
VD
2134 return err;
2135 }
2136 return id;
2137}
2138
2139static void memcg_free_cache_id(int id)
2140{
dbcf73e2 2141 ida_simple_remove(&memcg_cache_ida, id);
55007d84
GC
2142}
2143
d5b3cf71 2144struct memcg_kmem_cache_create_work {
5722d094
VD
2145 struct mem_cgroup *memcg;
2146 struct kmem_cache *cachep;
2147 struct work_struct work;
2148};
2149
d5b3cf71 2150static void memcg_kmem_cache_create_func(struct work_struct *w)
d7f25f8a 2151{
d5b3cf71
VD
2152 struct memcg_kmem_cache_create_work *cw =
2153 container_of(w, struct memcg_kmem_cache_create_work, work);
5722d094
VD
2154 struct mem_cgroup *memcg = cw->memcg;
2155 struct kmem_cache *cachep = cw->cachep;
d7f25f8a 2156
d5b3cf71 2157 memcg_create_kmem_cache(memcg, cachep);
bd673145 2158
5722d094 2159 css_put(&memcg->css);
d7f25f8a
GC
2160 kfree(cw);
2161}
2162
2163/*
2164 * Enqueue the creation of a per-memcg kmem_cache.
d7f25f8a 2165 */
d5b3cf71
VD
2166static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2167 struct kmem_cache *cachep)
d7f25f8a 2168{
d5b3cf71 2169 struct memcg_kmem_cache_create_work *cw;
d7f25f8a 2170
776ed0f0 2171 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
8135be5a 2172 if (!cw)
d7f25f8a 2173 return;
8135be5a
VD
2174
2175 css_get(&memcg->css);
d7f25f8a
GC
2176
2177 cw->memcg = memcg;
2178 cw->cachep = cachep;
d5b3cf71 2179 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
d7f25f8a 2180
17cc4dfe 2181 queue_work(memcg_kmem_cache_wq, &cw->work);
d7f25f8a
GC
2182}
2183
d5b3cf71
VD
2184static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2185 struct kmem_cache *cachep)
0e9d92f2
GC
2186{
2187 /*
2188 * We need to stop accounting when we kmalloc, because if the
2189 * corresponding kmalloc cache is not yet created, the first allocation
d5b3cf71 2190 * in __memcg_schedule_kmem_cache_create will recurse.
0e9d92f2
GC
2191 *
2192 * However, it is better to enclose the whole function. Depending on
2193 * the debugging options enabled, INIT_WORK(), for instance, can
2194 * trigger an allocation. This too, will make us recurse. Because at
2195 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2196 * the safest choice is to do it like this, wrapping the whole function.
2197 */
6f185c29 2198 current->memcg_kmem_skip_account = 1;
d5b3cf71 2199 __memcg_schedule_kmem_cache_create(memcg, cachep);
6f185c29 2200 current->memcg_kmem_skip_account = 0;
0e9d92f2 2201}
c67a8a68 2202
45264778
VD
2203static inline bool memcg_kmem_bypass(void)
2204{
2205 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2206 return true;
2207 return false;
2208}
2209
2210/**
2211 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2212 * @cachep: the original global kmem cache
2213 *
d7f25f8a
GC
2214 * Return the kmem_cache we're supposed to use for a slab allocation.
2215 * We try to use the current memcg's version of the cache.
2216 *
45264778
VD
2217 * If the cache does not exist yet, if we are the first user of it, we
2218 * create it asynchronously in a workqueue and let the current allocation
2219 * go through with the original cache.
d7f25f8a 2220 *
45264778
VD
2221 * This function takes a reference to the cache it returns to assure it
2222 * won't get destroyed while we are working with it. Once the caller is
2223 * done with it, memcg_kmem_put_cache() must be called to release the
2224 * reference.
d7f25f8a 2225 */
45264778 2226struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
d7f25f8a
GC
2227{
2228 struct mem_cgroup *memcg;
959c8963 2229 struct kmem_cache *memcg_cachep;
2a4db7eb 2230 int kmemcg_id;
d7f25f8a 2231
f7ce3190 2232 VM_BUG_ON(!is_root_cache(cachep));
d7f25f8a 2233
45264778 2234 if (memcg_kmem_bypass())
230e9fc2
VD
2235 return cachep;
2236
9d100c5e 2237 if (current->memcg_kmem_skip_account)
0e9d92f2
GC
2238 return cachep;
2239
8135be5a 2240 memcg = get_mem_cgroup_from_mm(current->mm);
4db0c3c2 2241 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2a4db7eb 2242 if (kmemcg_id < 0)
ca0dde97 2243 goto out;
d7f25f8a 2244
2a4db7eb 2245 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
8135be5a
VD
2246 if (likely(memcg_cachep))
2247 return memcg_cachep;
ca0dde97
LZ
2248
2249 /*
2250 * If we are in a safe context (can wait, and not in interrupt
2251 * context), we could be be predictable and return right away.
2252 * This would guarantee that the allocation being performed
2253 * already belongs in the new cache.
2254 *
2255 * However, there are some clashes that can arrive from locking.
2256 * For instance, because we acquire the slab_mutex while doing
776ed0f0
VD
2257 * memcg_create_kmem_cache, this means no further allocation
2258 * could happen with the slab_mutex held. So it's better to
2259 * defer everything.
ca0dde97 2260 */
d5b3cf71 2261 memcg_schedule_kmem_cache_create(memcg, cachep);
ca0dde97 2262out:
8135be5a 2263 css_put(&memcg->css);
ca0dde97 2264 return cachep;
d7f25f8a 2265}
d7f25f8a 2266
45264778
VD
2267/**
2268 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2269 * @cachep: the cache returned by memcg_kmem_get_cache
2270 */
2271void memcg_kmem_put_cache(struct kmem_cache *cachep)
8135be5a
VD
2272{
2273 if (!is_root_cache(cachep))
f7ce3190 2274 css_put(&cachep->memcg_params.memcg->css);
8135be5a
VD
2275}
2276
45264778
VD
2277/**
2278 * memcg_kmem_charge: charge a kmem page
2279 * @page: page to charge
2280 * @gfp: reclaim mode
2281 * @order: allocation order
2282 * @memcg: memory cgroup to charge
2283 *
2284 * Returns 0 on success, an error code on failure.
2285 */
2286int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2287 struct mem_cgroup *memcg)
7ae1e1d0 2288{
f3ccb2c4
VD
2289 unsigned int nr_pages = 1 << order;
2290 struct page_counter *counter;
7ae1e1d0
GC
2291 int ret;
2292
f3ccb2c4 2293 ret = try_charge(memcg, gfp, nr_pages);
52c29b04 2294 if (ret)
f3ccb2c4 2295 return ret;
52c29b04
JW
2296
2297 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2298 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2299 cancel_charge(memcg, nr_pages);
2300 return -ENOMEM;
7ae1e1d0
GC
2301 }
2302
f3ccb2c4 2303 page->mem_cgroup = memcg;
7ae1e1d0 2304
f3ccb2c4 2305 return 0;
7ae1e1d0
GC
2306}
2307
45264778
VD
2308/**
2309 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2310 * @page: page to charge
2311 * @gfp: reclaim mode
2312 * @order: allocation order
2313 *
2314 * Returns 0 on success, an error code on failure.
2315 */
2316int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
7ae1e1d0 2317{
f3ccb2c4 2318 struct mem_cgroup *memcg;
fcff7d7e 2319 int ret = 0;
7ae1e1d0 2320
45264778
VD
2321 if (memcg_kmem_bypass())
2322 return 0;
2323
f3ccb2c4 2324 memcg = get_mem_cgroup_from_mm(current->mm);
c4159a75 2325 if (!mem_cgroup_is_root(memcg)) {
45264778 2326 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
c4159a75
VD
2327 if (!ret)
2328 __SetPageKmemcg(page);
2329 }
7ae1e1d0 2330 css_put(&memcg->css);
d05e83a6 2331 return ret;
7ae1e1d0 2332}
45264778
VD
2333/**
2334 * memcg_kmem_uncharge: uncharge a kmem page
2335 * @page: page to uncharge
2336 * @order: allocation order
2337 */
2338void memcg_kmem_uncharge(struct page *page, int order)
7ae1e1d0 2339{
1306a85a 2340 struct mem_cgroup *memcg = page->mem_cgroup;
f3ccb2c4 2341 unsigned int nr_pages = 1 << order;
7ae1e1d0 2342
7ae1e1d0
GC
2343 if (!memcg)
2344 return;
2345
309381fe 2346 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
29833315 2347
52c29b04
JW
2348 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2349 page_counter_uncharge(&memcg->kmem, nr_pages);
2350
f3ccb2c4 2351 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 2352 if (do_memsw_account())
f3ccb2c4 2353 page_counter_uncharge(&memcg->memsw, nr_pages);
60d3fd32 2354
1306a85a 2355 page->mem_cgroup = NULL;
c4159a75
VD
2356
2357 /* slab pages do not have PageKmemcg flag set */
2358 if (PageKmemcg(page))
2359 __ClearPageKmemcg(page);
2360
f3ccb2c4 2361 css_put_many(&memcg->css, nr_pages);
60d3fd32 2362}
127424c8 2363#endif /* !CONFIG_SLOB */
7ae1e1d0 2364
ca3e0214
KH
2365#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2366
ca3e0214
KH
2367/*
2368 * Because tail pages are not marked as "used", set it. We're under
a52633d8 2369 * zone_lru_lock and migration entries setup in all page mappings.
ca3e0214 2370 */
e94c8a9c 2371void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214 2372{
e94c8a9c 2373 int i;
ca3e0214 2374
3d37c4a9
KH
2375 if (mem_cgroup_disabled())
2376 return;
b070e65c 2377
29833315 2378 for (i = 1; i < HPAGE_PMD_NR; i++)
1306a85a 2379 head[i].mem_cgroup = head->mem_cgroup;
b9982f8d 2380
1306a85a 2381 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
b070e65c 2382 HPAGE_PMD_NR);
ca3e0214 2383}
12d27107 2384#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
ca3e0214 2385
c255a458 2386#ifdef CONFIG_MEMCG_SWAP
0a31bc97
JW
2387static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2388 bool charge)
d13d1443 2389{
0a31bc97
JW
2390 int val = (charge) ? 1 : -1;
2391 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
d13d1443 2392}
02491447
DN
2393
2394/**
2395 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2396 * @entry: swap entry to be moved
2397 * @from: mem_cgroup which the entry is moved from
2398 * @to: mem_cgroup which the entry is moved to
2399 *
2400 * It succeeds only when the swap_cgroup's record for this entry is the same
2401 * as the mem_cgroup's id of @from.
2402 *
2403 * Returns 0 on success, -EINVAL on failure.
2404 *
3e32cb2e 2405 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
2406 * both res and memsw, and called css_get().
2407 */
2408static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 2409 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
2410{
2411 unsigned short old_id, new_id;
2412
34c00c31
LZ
2413 old_id = mem_cgroup_id(from);
2414 new_id = mem_cgroup_id(to);
02491447
DN
2415
2416 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 2417 mem_cgroup_swap_statistics(from, false);
483c30b5 2418 mem_cgroup_swap_statistics(to, true);
02491447
DN
2419 return 0;
2420 }
2421 return -EINVAL;
2422}
2423#else
2424static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 2425 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
2426{
2427 return -EINVAL;
2428}
8c7c6e34 2429#endif
d13d1443 2430
3e32cb2e 2431static DEFINE_MUTEX(memcg_limit_mutex);
f212ad7c 2432
d38d2a75 2433static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3e32cb2e 2434 unsigned long limit)
628f4235 2435{
3e32cb2e
JW
2436 unsigned long curusage;
2437 unsigned long oldusage;
2438 bool enlarge = false;
81d39c20 2439 int retry_count;
3e32cb2e 2440 int ret;
81d39c20
KH
2441
2442 /*
2443 * For keeping hierarchical_reclaim simple, how long we should retry
2444 * is depends on callers. We set our retry-count to be function
2445 * of # of children which we should visit in this loop.
2446 */
3e32cb2e
JW
2447 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2448 mem_cgroup_count_children(memcg);
81d39c20 2449
3e32cb2e 2450 oldusage = page_counter_read(&memcg->memory);
628f4235 2451
3e32cb2e 2452 do {
628f4235
KH
2453 if (signal_pending(current)) {
2454 ret = -EINTR;
2455 break;
2456 }
3e32cb2e
JW
2457
2458 mutex_lock(&memcg_limit_mutex);
2459 if (limit > memcg->memsw.limit) {
2460 mutex_unlock(&memcg_limit_mutex);
8c7c6e34 2461 ret = -EINVAL;
628f4235
KH
2462 break;
2463 }
3e32cb2e
JW
2464 if (limit > memcg->memory.limit)
2465 enlarge = true;
2466 ret = page_counter_limit(&memcg->memory, limit);
2467 mutex_unlock(&memcg_limit_mutex);
8c7c6e34
KH
2468
2469 if (!ret)
2470 break;
2471
b70a2a21
JW
2472 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2473
3e32cb2e 2474 curusage = page_counter_read(&memcg->memory);
81d39c20 2475 /* Usage is reduced ? */
f894ffa8 2476 if (curusage >= oldusage)
81d39c20
KH
2477 retry_count--;
2478 else
2479 oldusage = curusage;
3e32cb2e
JW
2480 } while (retry_count);
2481
3c11ecf4
KH
2482 if (!ret && enlarge)
2483 memcg_oom_recover(memcg);
14797e23 2484
8c7c6e34
KH
2485 return ret;
2486}
2487
338c8431 2488static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3e32cb2e 2489 unsigned long limit)
8c7c6e34 2490{
3e32cb2e
JW
2491 unsigned long curusage;
2492 unsigned long oldusage;
2493 bool enlarge = false;
81d39c20 2494 int retry_count;
3e32cb2e 2495 int ret;
8c7c6e34 2496
81d39c20 2497 /* see mem_cgroup_resize_res_limit */
3e32cb2e
JW
2498 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2499 mem_cgroup_count_children(memcg);
2500
2501 oldusage = page_counter_read(&memcg->memsw);
2502
2503 do {
8c7c6e34
KH
2504 if (signal_pending(current)) {
2505 ret = -EINTR;
2506 break;
2507 }
3e32cb2e
JW
2508
2509 mutex_lock(&memcg_limit_mutex);
2510 if (limit < memcg->memory.limit) {
2511 mutex_unlock(&memcg_limit_mutex);
8c7c6e34 2512 ret = -EINVAL;
8c7c6e34
KH
2513 break;
2514 }
3e32cb2e
JW
2515 if (limit > memcg->memsw.limit)
2516 enlarge = true;
2517 ret = page_counter_limit(&memcg->memsw, limit);
2518 mutex_unlock(&memcg_limit_mutex);
8c7c6e34
KH
2519
2520 if (!ret)
2521 break;
2522
b70a2a21
JW
2523 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2524
3e32cb2e 2525 curusage = page_counter_read(&memcg->memsw);
81d39c20 2526 /* Usage is reduced ? */
8c7c6e34 2527 if (curusage >= oldusage)
628f4235 2528 retry_count--;
81d39c20
KH
2529 else
2530 oldusage = curusage;
3e32cb2e
JW
2531 } while (retry_count);
2532
3c11ecf4
KH
2533 if (!ret && enlarge)
2534 memcg_oom_recover(memcg);
3e32cb2e 2535
628f4235
KH
2536 return ret;
2537}
2538
ef8f2327 2539unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
2540 gfp_t gfp_mask,
2541 unsigned long *total_scanned)
2542{
2543 unsigned long nr_reclaimed = 0;
ef8f2327 2544 struct mem_cgroup_per_node *mz, *next_mz = NULL;
0608f43d
AM
2545 unsigned long reclaimed;
2546 int loop = 0;
ef8f2327 2547 struct mem_cgroup_tree_per_node *mctz;
3e32cb2e 2548 unsigned long excess;
0608f43d
AM
2549 unsigned long nr_scanned;
2550
2551 if (order > 0)
2552 return 0;
2553
ef8f2327 2554 mctz = soft_limit_tree_node(pgdat->node_id);
d6507ff5
MH
2555
2556 /*
2557 * Do not even bother to check the largest node if the root
2558 * is empty. Do it lockless to prevent lock bouncing. Races
2559 * are acceptable as soft limit is best effort anyway.
2560 */
2561 if (RB_EMPTY_ROOT(&mctz->rb_root))
2562 return 0;
2563
0608f43d
AM
2564 /*
2565 * This loop can run a while, specially if mem_cgroup's continuously
2566 * keep exceeding their soft limit and putting the system under
2567 * pressure
2568 */
2569 do {
2570 if (next_mz)
2571 mz = next_mz;
2572 else
2573 mz = mem_cgroup_largest_soft_limit_node(mctz);
2574 if (!mz)
2575 break;
2576
2577 nr_scanned = 0;
ef8f2327 2578 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
0608f43d
AM
2579 gfp_mask, &nr_scanned);
2580 nr_reclaimed += reclaimed;
2581 *total_scanned += nr_scanned;
0a31bc97 2582 spin_lock_irq(&mctz->lock);
bc2f2e7f 2583 __mem_cgroup_remove_exceeded(mz, mctz);
0608f43d
AM
2584
2585 /*
2586 * If we failed to reclaim anything from this memory cgroup
2587 * it is time to move on to the next cgroup
2588 */
2589 next_mz = NULL;
bc2f2e7f
VD
2590 if (!reclaimed)
2591 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
3e32cb2e 2593 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
2594 /*
2595 * One school of thought says that we should not add
2596 * back the node to the tree if reclaim returns 0.
2597 * But our reclaim could return 0, simply because due
2598 * to priority we are exposing a smaller subset of
2599 * memory to reclaim from. Consider this as a longer
2600 * term TODO.
2601 */
2602 /* If excess == 0, no tree ops */
cf2c8127 2603 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 2604 spin_unlock_irq(&mctz->lock);
0608f43d
AM
2605 css_put(&mz->memcg->css);
2606 loop++;
2607 /*
2608 * Could not reclaim anything and there are no more
2609 * mem cgroups to try or we seem to be looping without
2610 * reclaiming anything.
2611 */
2612 if (!nr_reclaimed &&
2613 (next_mz == NULL ||
2614 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615 break;
2616 } while (!nr_reclaimed);
2617 if (next_mz)
2618 css_put(&next_mz->memcg->css);
2619 return nr_reclaimed;
2620}
2621
ea280e7b
TH
2622/*
2623 * Test whether @memcg has children, dead or alive. Note that this
2624 * function doesn't care whether @memcg has use_hierarchy enabled and
2625 * returns %true if there are child csses according to the cgroup
2626 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2627 */
b5f99b53
GC
2628static inline bool memcg_has_children(struct mem_cgroup *memcg)
2629{
ea280e7b
TH
2630 bool ret;
2631
ea280e7b
TH
2632 rcu_read_lock();
2633 ret = css_next_child(NULL, &memcg->css);
2634 rcu_read_unlock();
2635 return ret;
b5f99b53
GC
2636}
2637
c26251f9 2638/*
51038171 2639 * Reclaims as many pages from the given memcg as possible.
c26251f9
MH
2640 *
2641 * Caller is responsible for holding css reference for memcg.
2642 */
2643static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2644{
2645 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
c26251f9 2646
c1e862c1
KH
2647 /* we call try-to-free pages for make this cgroup empty */
2648 lru_add_drain_all();
f817ed48 2649 /* try to free all pages in this cgroup */
3e32cb2e 2650 while (nr_retries && page_counter_read(&memcg->memory)) {
f817ed48 2651 int progress;
c1e862c1 2652
c26251f9
MH
2653 if (signal_pending(current))
2654 return -EINTR;
2655
b70a2a21
JW
2656 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2657 GFP_KERNEL, true);
c1e862c1 2658 if (!progress) {
f817ed48 2659 nr_retries--;
c1e862c1 2660 /* maybe some writeback is necessary */
8aa7e847 2661 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 2662 }
f817ed48
KH
2663
2664 }
ab5196c2
MH
2665
2666 return 0;
cc847582
KH
2667}
2668
6770c64e
TH
2669static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2670 char *buf, size_t nbytes,
2671 loff_t off)
c1e862c1 2672{
6770c64e 2673 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 2674
d8423011
MH
2675 if (mem_cgroup_is_root(memcg))
2676 return -EINVAL;
6770c64e 2677 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
2678}
2679
182446d0
TH
2680static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2681 struct cftype *cft)
18f59ea7 2682{
182446d0 2683 return mem_cgroup_from_css(css)->use_hierarchy;
18f59ea7
BS
2684}
2685
182446d0
TH
2686static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2687 struct cftype *cft, u64 val)
18f59ea7
BS
2688{
2689 int retval = 0;
182446d0 2690 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5c9d535b 2691 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
18f59ea7 2692
567fb435 2693 if (memcg->use_hierarchy == val)
0b8f73e1 2694 return 0;
567fb435 2695
18f59ea7 2696 /*
af901ca1 2697 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
2698 * in the child subtrees. If it is unset, then the change can
2699 * occur, provided the current cgroup has no children.
2700 *
2701 * For the root cgroup, parent_mem is NULL, we allow value to be
2702 * set if there are no children.
2703 */
c0ff4b85 2704 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
18f59ea7 2705 (val == 1 || val == 0)) {
ea280e7b 2706 if (!memcg_has_children(memcg))
c0ff4b85 2707 memcg->use_hierarchy = val;
18f59ea7
BS
2708 else
2709 retval = -EBUSY;
2710 } else
2711 retval = -EINVAL;
567fb435 2712
18f59ea7
BS
2713 return retval;
2714}
2715
72b54e73 2716static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
ce00a967
JW
2717{
2718 struct mem_cgroup *iter;
72b54e73 2719 int i;
ce00a967 2720
72b54e73 2721 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
ce00a967 2722
72b54e73
VD
2723 for_each_mem_cgroup_tree(iter, memcg) {
2724 for (i = 0; i < MEMCG_NR_STAT; i++)
2725 stat[i] += mem_cgroup_read_stat(iter, i);
2726 }
ce00a967
JW
2727}
2728
72b54e73 2729static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
587d9f72
JW
2730{
2731 struct mem_cgroup *iter;
72b54e73 2732 int i;
587d9f72 2733
72b54e73 2734 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
587d9f72 2735
72b54e73
VD
2736 for_each_mem_cgroup_tree(iter, memcg) {
2737 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2738 events[i] += mem_cgroup_read_events(iter, i);
2739 }
587d9f72
JW
2740}
2741
6f646156 2742static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
ce00a967 2743{
72b54e73 2744 unsigned long val = 0;
ce00a967 2745
3e32cb2e 2746 if (mem_cgroup_is_root(memcg)) {
72b54e73
VD
2747 struct mem_cgroup *iter;
2748
2749 for_each_mem_cgroup_tree(iter, memcg) {
2750 val += mem_cgroup_read_stat(iter,
2751 MEM_CGROUP_STAT_CACHE);
2752 val += mem_cgroup_read_stat(iter,
2753 MEM_CGROUP_STAT_RSS);
2754 if (swap)
2755 val += mem_cgroup_read_stat(iter,
2756 MEM_CGROUP_STAT_SWAP);
2757 }
3e32cb2e 2758 } else {
ce00a967 2759 if (!swap)
3e32cb2e 2760 val = page_counter_read(&memcg->memory);
ce00a967 2761 else
3e32cb2e 2762 val = page_counter_read(&memcg->memsw);
ce00a967 2763 }
c12176d3 2764 return val;
ce00a967
JW
2765}
2766
3e32cb2e
JW
2767enum {
2768 RES_USAGE,
2769 RES_LIMIT,
2770 RES_MAX_USAGE,
2771 RES_FAILCNT,
2772 RES_SOFT_LIMIT,
2773};
ce00a967 2774
791badbd 2775static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 2776 struct cftype *cft)
8cdea7c0 2777{
182446d0 2778 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 2779 struct page_counter *counter;
af36f906 2780
3e32cb2e 2781 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 2782 case _MEM:
3e32cb2e
JW
2783 counter = &memcg->memory;
2784 break;
8c7c6e34 2785 case _MEMSWAP:
3e32cb2e
JW
2786 counter = &memcg->memsw;
2787 break;
510fc4e1 2788 case _KMEM:
3e32cb2e 2789 counter = &memcg->kmem;
510fc4e1 2790 break;
d55f90bf 2791 case _TCP:
0db15298 2792 counter = &memcg->tcpmem;
d55f90bf 2793 break;
8c7c6e34
KH
2794 default:
2795 BUG();
8c7c6e34 2796 }
3e32cb2e
JW
2797
2798 switch (MEMFILE_ATTR(cft->private)) {
2799 case RES_USAGE:
2800 if (counter == &memcg->memory)
c12176d3 2801 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3e32cb2e 2802 if (counter == &memcg->memsw)
c12176d3 2803 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3e32cb2e
JW
2804 return (u64)page_counter_read(counter) * PAGE_SIZE;
2805 case RES_LIMIT:
2806 return (u64)counter->limit * PAGE_SIZE;
2807 case RES_MAX_USAGE:
2808 return (u64)counter->watermark * PAGE_SIZE;
2809 case RES_FAILCNT:
2810 return counter->failcnt;
2811 case RES_SOFT_LIMIT:
2812 return (u64)memcg->soft_limit * PAGE_SIZE;
2813 default:
2814 BUG();
2815 }
8cdea7c0 2816}
510fc4e1 2817
127424c8 2818#ifndef CONFIG_SLOB
567e9ab2 2819static int memcg_online_kmem(struct mem_cgroup *memcg)
d6441637 2820{
d6441637
VD
2821 int memcg_id;
2822
b313aeee
VD
2823 if (cgroup_memory_nokmem)
2824 return 0;
2825
2a4db7eb 2826 BUG_ON(memcg->kmemcg_id >= 0);
567e9ab2 2827 BUG_ON(memcg->kmem_state);
d6441637 2828
f3bb3043 2829 memcg_id = memcg_alloc_cache_id();
0b8f73e1
JW
2830 if (memcg_id < 0)
2831 return memcg_id;
d6441637 2832
ef12947c 2833 static_branch_inc(&memcg_kmem_enabled_key);
d6441637 2834 /*
567e9ab2 2835 * A memory cgroup is considered kmem-online as soon as it gets
900a38f0 2836 * kmemcg_id. Setting the id after enabling static branching will
d6441637
VD
2837 * guarantee no one starts accounting before all call sites are
2838 * patched.
2839 */
900a38f0 2840 memcg->kmemcg_id = memcg_id;
567e9ab2 2841 memcg->kmem_state = KMEM_ONLINE;
bc2791f8 2842 INIT_LIST_HEAD(&memcg->kmem_caches);
0b8f73e1
JW
2843
2844 return 0;
d6441637
VD
2845}
2846
8e0a8912
JW
2847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849 struct cgroup_subsys_state *css;
2850 struct mem_cgroup *parent, *child;
2851 int kmemcg_id;
2852
2853 if (memcg->kmem_state != KMEM_ONLINE)
2854 return;
2855 /*
2856 * Clear the online state before clearing memcg_caches array
2857 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2858 * guarantees that no cache will be created for this cgroup
2859 * after we are done (see memcg_create_kmem_cache()).
2860 */
2861 memcg->kmem_state = KMEM_ALLOCATED;
2862
2863 memcg_deactivate_kmem_caches(memcg);
2864
2865 kmemcg_id = memcg->kmemcg_id;
2866 BUG_ON(kmemcg_id < 0);
2867
2868 parent = parent_mem_cgroup(memcg);
2869 if (!parent)
2870 parent = root_mem_cgroup;
2871
2872 /*
2873 * Change kmemcg_id of this cgroup and all its descendants to the
2874 * parent's id, and then move all entries from this cgroup's list_lrus
2875 * to ones of the parent. After we have finished, all list_lrus
2876 * corresponding to this cgroup are guaranteed to remain empty. The
2877 * ordering is imposed by list_lru_node->lock taken by
2878 * memcg_drain_all_list_lrus().
2879 */
3a06bb78 2880 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
8e0a8912
JW
2881 css_for_each_descendant_pre(css, &memcg->css) {
2882 child = mem_cgroup_from_css(css);
2883 BUG_ON(child->kmemcg_id != kmemcg_id);
2884 child->kmemcg_id = parent->kmemcg_id;
2885 if (!memcg->use_hierarchy)
2886 break;
2887 }
3a06bb78
TH
2888 rcu_read_unlock();
2889
8e0a8912
JW
2890 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2891
2892 memcg_free_cache_id(kmemcg_id);
2893}
2894
2895static void memcg_free_kmem(struct mem_cgroup *memcg)
2896{
0b8f73e1
JW
2897 /* css_alloc() failed, offlining didn't happen */
2898 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2899 memcg_offline_kmem(memcg);
2900
8e0a8912
JW
2901 if (memcg->kmem_state == KMEM_ALLOCATED) {
2902 memcg_destroy_kmem_caches(memcg);
2903 static_branch_dec(&memcg_kmem_enabled_key);
2904 WARN_ON(page_counter_read(&memcg->kmem));
2905 }
8e0a8912 2906}
d6441637 2907#else
0b8f73e1 2908static int memcg_online_kmem(struct mem_cgroup *memcg)
127424c8
JW
2909{
2910 return 0;
2911}
2912static void memcg_offline_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915static void memcg_free_kmem(struct mem_cgroup *memcg)
2916{
2917}
2918#endif /* !CONFIG_SLOB */
2919
d6441637 2920static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3e32cb2e 2921 unsigned long limit)
d6441637 2922{
b313aeee 2923 int ret;
127424c8
JW
2924
2925 mutex_lock(&memcg_limit_mutex);
127424c8 2926 ret = page_counter_limit(&memcg->kmem, limit);
127424c8
JW
2927 mutex_unlock(&memcg_limit_mutex);
2928 return ret;
d6441637 2929}
510fc4e1 2930
d55f90bf
VD
2931static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2932{
2933 int ret;
2934
2935 mutex_lock(&memcg_limit_mutex);
2936
0db15298 2937 ret = page_counter_limit(&memcg->tcpmem, limit);
d55f90bf
VD
2938 if (ret)
2939 goto out;
2940
0db15298 2941 if (!memcg->tcpmem_active) {
d55f90bf
VD
2942 /*
2943 * The active flag needs to be written after the static_key
2944 * update. This is what guarantees that the socket activation
2d758073
JW
2945 * function is the last one to run. See mem_cgroup_sk_alloc()
2946 * for details, and note that we don't mark any socket as
2947 * belonging to this memcg until that flag is up.
d55f90bf
VD
2948 *
2949 * We need to do this, because static_keys will span multiple
2950 * sites, but we can't control their order. If we mark a socket
2951 * as accounted, but the accounting functions are not patched in
2952 * yet, we'll lose accounting.
2953 *
2d758073 2954 * We never race with the readers in mem_cgroup_sk_alloc(),
d55f90bf
VD
2955 * because when this value change, the code to process it is not
2956 * patched in yet.
2957 */
2958 static_branch_inc(&memcg_sockets_enabled_key);
0db15298 2959 memcg->tcpmem_active = true;
d55f90bf
VD
2960 }
2961out:
2962 mutex_unlock(&memcg_limit_mutex);
2963 return ret;
2964}
d55f90bf 2965
628f4235
KH
2966/*
2967 * The user of this function is...
2968 * RES_LIMIT.
2969 */
451af504
TH
2970static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2971 char *buf, size_t nbytes, loff_t off)
8cdea7c0 2972{
451af504 2973 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 2974 unsigned long nr_pages;
628f4235
KH
2975 int ret;
2976
451af504 2977 buf = strstrip(buf);
650c5e56 2978 ret = page_counter_memparse(buf, "-1", &nr_pages);
3e32cb2e
JW
2979 if (ret)
2980 return ret;
af36f906 2981
3e32cb2e 2982 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 2983 case RES_LIMIT:
4b3bde4c
BS
2984 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2985 ret = -EINVAL;
2986 break;
2987 }
3e32cb2e
JW
2988 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2989 case _MEM:
2990 ret = mem_cgroup_resize_limit(memcg, nr_pages);
8c7c6e34 2991 break;
3e32cb2e
JW
2992 case _MEMSWAP:
2993 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
296c81d8 2994 break;
3e32cb2e
JW
2995 case _KMEM:
2996 ret = memcg_update_kmem_limit(memcg, nr_pages);
2997 break;
d55f90bf
VD
2998 case _TCP:
2999 ret = memcg_update_tcp_limit(memcg, nr_pages);
3000 break;
3e32cb2e 3001 }
296c81d8 3002 break;
3e32cb2e
JW
3003 case RES_SOFT_LIMIT:
3004 memcg->soft_limit = nr_pages;
3005 ret = 0;
628f4235
KH
3006 break;
3007 }
451af504 3008 return ret ?: nbytes;
8cdea7c0
BS
3009}
3010
6770c64e
TH
3011static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3012 size_t nbytes, loff_t off)
c84872e1 3013{
6770c64e 3014 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 3015 struct page_counter *counter;
c84872e1 3016
3e32cb2e
JW
3017 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3018 case _MEM:
3019 counter = &memcg->memory;
3020 break;
3021 case _MEMSWAP:
3022 counter = &memcg->memsw;
3023 break;
3024 case _KMEM:
3025 counter = &memcg->kmem;
3026 break;
d55f90bf 3027 case _TCP:
0db15298 3028 counter = &memcg->tcpmem;
d55f90bf 3029 break;
3e32cb2e
JW
3030 default:
3031 BUG();
3032 }
af36f906 3033
3e32cb2e 3034 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 3035 case RES_MAX_USAGE:
3e32cb2e 3036 page_counter_reset_watermark(counter);
29f2a4da
PE
3037 break;
3038 case RES_FAILCNT:
3e32cb2e 3039 counter->failcnt = 0;
29f2a4da 3040 break;
3e32cb2e
JW
3041 default:
3042 BUG();
29f2a4da 3043 }
f64c3f54 3044
6770c64e 3045 return nbytes;
c84872e1
PE
3046}
3047
182446d0 3048static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
3049 struct cftype *cft)
3050{
182446d0 3051 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
3052}
3053
02491447 3054#ifdef CONFIG_MMU
182446d0 3055static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
3056 struct cftype *cft, u64 val)
3057{
182446d0 3058 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0 3059
1dfab5ab 3060 if (val & ~MOVE_MASK)
7dc74be0 3061 return -EINVAL;
ee5e8472 3062
7dc74be0 3063 /*
ee5e8472
GC
3064 * No kind of locking is needed in here, because ->can_attach() will
3065 * check this value once in the beginning of the process, and then carry
3066 * on with stale data. This means that changes to this value will only
3067 * affect task migrations starting after the change.
7dc74be0 3068 */
c0ff4b85 3069 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
3070 return 0;
3071}
02491447 3072#else
182446d0 3073static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
3074 struct cftype *cft, u64 val)
3075{
3076 return -ENOSYS;
3077}
3078#endif
7dc74be0 3079
406eb0c9 3080#ifdef CONFIG_NUMA
2da8ca82 3081static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 3082{
25485de6
GT
3083 struct numa_stat {
3084 const char *name;
3085 unsigned int lru_mask;
3086 };
3087
3088 static const struct numa_stat stats[] = {
3089 { "total", LRU_ALL },
3090 { "file", LRU_ALL_FILE },
3091 { "anon", LRU_ALL_ANON },
3092 { "unevictable", BIT(LRU_UNEVICTABLE) },
3093 };
3094 const struct numa_stat *stat;
406eb0c9 3095 int nid;
25485de6 3096 unsigned long nr;
2da8ca82 3097 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
406eb0c9 3098
25485de6
GT
3099 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3100 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3101 seq_printf(m, "%s=%lu", stat->name, nr);
3102 for_each_node_state(nid, N_MEMORY) {
3103 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3104 stat->lru_mask);
3105 seq_printf(m, " N%d=%lu", nid, nr);
3106 }
3107 seq_putc(m, '\n');
406eb0c9 3108 }
406eb0c9 3109
071aee13
YH
3110 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3111 struct mem_cgroup *iter;
3112
3113 nr = 0;
3114 for_each_mem_cgroup_tree(iter, memcg)
3115 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3116 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3117 for_each_node_state(nid, N_MEMORY) {
3118 nr = 0;
3119 for_each_mem_cgroup_tree(iter, memcg)
3120 nr += mem_cgroup_node_nr_lru_pages(
3121 iter, nid, stat->lru_mask);
3122 seq_printf(m, " N%d=%lu", nid, nr);
3123 }
3124 seq_putc(m, '\n');
406eb0c9 3125 }
406eb0c9 3126
406eb0c9
YH
3127 return 0;
3128}
3129#endif /* CONFIG_NUMA */
3130
2da8ca82 3131static int memcg_stat_show(struct seq_file *m, void *v)
d2ceb9b7 3132{
2da8ca82 3133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3e32cb2e 3134 unsigned long memory, memsw;
af7c4b0e
JW
3135 struct mem_cgroup *mi;
3136 unsigned int i;
406eb0c9 3137
0ca44b14
GT
3138 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3139 MEM_CGROUP_STAT_NSTATS);
3140 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3141 MEM_CGROUP_EVENTS_NSTATS);
70bc068c
RS
3142 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3143
af7c4b0e 3144 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
7941d214 3145 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
1dd3a273 3146 continue;
484ebb3b 3147 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
af7c4b0e 3148 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
1dd3a273 3149 }
7b854121 3150
af7c4b0e
JW
3151 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3152 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3153 mem_cgroup_read_events(memcg, i));
3154
3155 for (i = 0; i < NR_LRU_LISTS; i++)
3156 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3157 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3158
14067bb3 3159 /* Hierarchical information */
3e32cb2e
JW
3160 memory = memsw = PAGE_COUNTER_MAX;
3161 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3162 memory = min(memory, mi->memory.limit);
3163 memsw = min(memsw, mi->memsw.limit);
fee7b548 3164 }
3e32cb2e
JW
3165 seq_printf(m, "hierarchical_memory_limit %llu\n",
3166 (u64)memory * PAGE_SIZE);
7941d214 3167 if (do_memsw_account())
3e32cb2e
JW
3168 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3169 (u64)memsw * PAGE_SIZE);
7f016ee8 3170
af7c4b0e 3171 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
484ebb3b 3172 unsigned long long val = 0;
af7c4b0e 3173
7941d214 3174 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
1dd3a273 3175 continue;
af7c4b0e
JW
3176 for_each_mem_cgroup_tree(mi, memcg)
3177 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
484ebb3b 3178 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
af7c4b0e
JW
3179 }
3180
3181 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3182 unsigned long long val = 0;
3183
3184 for_each_mem_cgroup_tree(mi, memcg)
3185 val += mem_cgroup_read_events(mi, i);
3186 seq_printf(m, "total_%s %llu\n",
3187 mem_cgroup_events_names[i], val);
3188 }
3189
3190 for (i = 0; i < NR_LRU_LISTS; i++) {
3191 unsigned long long val = 0;
3192
3193 for_each_mem_cgroup_tree(mi, memcg)
3194 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3195 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
1dd3a273 3196 }
14067bb3 3197
7f016ee8 3198#ifdef CONFIG_DEBUG_VM
7f016ee8 3199 {
ef8f2327
MG
3200 pg_data_t *pgdat;
3201 struct mem_cgroup_per_node *mz;
89abfab1 3202 struct zone_reclaim_stat *rstat;
7f016ee8
KM
3203 unsigned long recent_rotated[2] = {0, 0};
3204 unsigned long recent_scanned[2] = {0, 0};
3205
ef8f2327
MG
3206 for_each_online_pgdat(pgdat) {
3207 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3208 rstat = &mz->lruvec.reclaim_stat;
7f016ee8 3209
ef8f2327
MG
3210 recent_rotated[0] += rstat->recent_rotated[0];
3211 recent_rotated[1] += rstat->recent_rotated[1];
3212 recent_scanned[0] += rstat->recent_scanned[0];
3213 recent_scanned[1] += rstat->recent_scanned[1];
3214 }
78ccf5b5
JW
3215 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3216 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3217 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3218 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
7f016ee8
KM
3219 }
3220#endif
3221
d2ceb9b7
KH
3222 return 0;
3223}
3224
182446d0
TH
3225static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3226 struct cftype *cft)
a7885eb8 3227{
182446d0 3228 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 3229
1f4c025b 3230 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
3231}
3232
182446d0
TH
3233static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3234 struct cftype *cft, u64 val)
a7885eb8 3235{
182446d0 3236 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 3237
3dae7fec 3238 if (val > 100)
a7885eb8
KM
3239 return -EINVAL;
3240
14208b0e 3241 if (css->parent)
3dae7fec
JW
3242 memcg->swappiness = val;
3243 else
3244 vm_swappiness = val;
068b38c1 3245
a7885eb8
KM
3246 return 0;
3247}
3248
2e72b634
KS
3249static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3250{
3251 struct mem_cgroup_threshold_ary *t;
3e32cb2e 3252 unsigned long usage;
2e72b634
KS
3253 int i;
3254
3255 rcu_read_lock();
3256 if (!swap)
2c488db2 3257 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 3258 else
2c488db2 3259 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
3260
3261 if (!t)
3262 goto unlock;
3263
ce00a967 3264 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
3265
3266 /*
748dad36 3267 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
3268 * If it's not true, a threshold was crossed after last
3269 * call of __mem_cgroup_threshold().
3270 */
5407a562 3271 i = t->current_threshold;
2e72b634
KS
3272
3273 /*
3274 * Iterate backward over array of thresholds starting from
3275 * current_threshold and check if a threshold is crossed.
3276 * If none of thresholds below usage is crossed, we read
3277 * only one element of the array here.
3278 */
3279 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3280 eventfd_signal(t->entries[i].eventfd, 1);
3281
3282 /* i = current_threshold + 1 */
3283 i++;
3284
3285 /*
3286 * Iterate forward over array of thresholds starting from
3287 * current_threshold+1 and check if a threshold is crossed.
3288 * If none of thresholds above usage is crossed, we read
3289 * only one element of the array here.
3290 */
3291 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3292 eventfd_signal(t->entries[i].eventfd, 1);
3293
3294 /* Update current_threshold */
5407a562 3295 t->current_threshold = i - 1;
2e72b634
KS
3296unlock:
3297 rcu_read_unlock();
3298}
3299
3300static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3301{
ad4ca5f4
KS
3302 while (memcg) {
3303 __mem_cgroup_threshold(memcg, false);
7941d214 3304 if (do_memsw_account())
ad4ca5f4
KS
3305 __mem_cgroup_threshold(memcg, true);
3306
3307 memcg = parent_mem_cgroup(memcg);
3308 }
2e72b634
KS
3309}
3310
3311static int compare_thresholds(const void *a, const void *b)
3312{
3313 const struct mem_cgroup_threshold *_a = a;
3314 const struct mem_cgroup_threshold *_b = b;
3315
2bff24a3
GT
3316 if (_a->threshold > _b->threshold)
3317 return 1;
3318
3319 if (_a->threshold < _b->threshold)
3320 return -1;
3321
3322 return 0;
2e72b634
KS
3323}
3324
c0ff4b85 3325static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
3326{
3327 struct mem_cgroup_eventfd_list *ev;
3328
2bcf2e92
MH
3329 spin_lock(&memcg_oom_lock);
3330
c0ff4b85 3331 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27 3332 eventfd_signal(ev->eventfd, 1);
2bcf2e92
MH
3333
3334 spin_unlock(&memcg_oom_lock);
9490ff27
KH
3335 return 0;
3336}
3337
c0ff4b85 3338static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 3339{
7d74b06f
KH
3340 struct mem_cgroup *iter;
3341
c0ff4b85 3342 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 3343 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
3344}
3345
59b6f873 3346static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 3347 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 3348{
2c488db2
KS
3349 struct mem_cgroup_thresholds *thresholds;
3350 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
3351 unsigned long threshold;
3352 unsigned long usage;
2c488db2 3353 int i, size, ret;
2e72b634 3354
650c5e56 3355 ret = page_counter_memparse(args, "-1", &threshold);
2e72b634
KS
3356 if (ret)
3357 return ret;
3358
3359 mutex_lock(&memcg->thresholds_lock);
2c488db2 3360
05b84301 3361 if (type == _MEM) {
2c488db2 3362 thresholds = &memcg->thresholds;
ce00a967 3363 usage = mem_cgroup_usage(memcg, false);
05b84301 3364 } else if (type == _MEMSWAP) {
2c488db2 3365 thresholds = &memcg->memsw_thresholds;
ce00a967 3366 usage = mem_cgroup_usage(memcg, true);
05b84301 3367 } else
2e72b634
KS
3368 BUG();
3369
2e72b634 3370 /* Check if a threshold crossed before adding a new one */
2c488db2 3371 if (thresholds->primary)
2e72b634
KS
3372 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3373
2c488db2 3374 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
3375
3376 /* Allocate memory for new array of thresholds */
2c488db2 3377 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 3378 GFP_KERNEL);
2c488db2 3379 if (!new) {
2e72b634
KS
3380 ret = -ENOMEM;
3381 goto unlock;
3382 }
2c488db2 3383 new->size = size;
2e72b634
KS
3384
3385 /* Copy thresholds (if any) to new array */
2c488db2
KS
3386 if (thresholds->primary) {
3387 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 3388 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
3389 }
3390
2e72b634 3391 /* Add new threshold */
2c488db2
KS
3392 new->entries[size - 1].eventfd = eventfd;
3393 new->entries[size - 1].threshold = threshold;
2e72b634
KS
3394
3395 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 3396 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
3397 compare_thresholds, NULL);
3398
3399 /* Find current threshold */
2c488db2 3400 new->current_threshold = -1;
2e72b634 3401 for (i = 0; i < size; i++) {
748dad36 3402 if (new->entries[i].threshold <= usage) {
2e72b634 3403 /*
2c488db2
KS
3404 * new->current_threshold will not be used until
3405 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
3406 * it here.
3407 */
2c488db2 3408 ++new->current_threshold;
748dad36
SZ
3409 } else
3410 break;
2e72b634
KS
3411 }
3412
2c488db2
KS
3413 /* Free old spare buffer and save old primary buffer as spare */
3414 kfree(thresholds->spare);
3415 thresholds->spare = thresholds->primary;
3416
3417 rcu_assign_pointer(thresholds->primary, new);
2e72b634 3418
907860ed 3419 /* To be sure that nobody uses thresholds */
2e72b634
KS
3420 synchronize_rcu();
3421
2e72b634
KS
3422unlock:
3423 mutex_unlock(&memcg->thresholds_lock);
3424
3425 return ret;
3426}
3427
59b6f873 3428static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
3429 struct eventfd_ctx *eventfd, const char *args)
3430{
59b6f873 3431 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
3432}
3433
59b6f873 3434static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
3435 struct eventfd_ctx *eventfd, const char *args)
3436{
59b6f873 3437 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
3438}
3439
59b6f873 3440static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 3441 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 3442{
2c488db2
KS
3443 struct mem_cgroup_thresholds *thresholds;
3444 struct mem_cgroup_threshold_ary *new;
3e32cb2e 3445 unsigned long usage;
2c488db2 3446 int i, j, size;
2e72b634
KS
3447
3448 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
3449
3450 if (type == _MEM) {
2c488db2 3451 thresholds = &memcg->thresholds;
ce00a967 3452 usage = mem_cgroup_usage(memcg, false);
05b84301 3453 } else if (type == _MEMSWAP) {
2c488db2 3454 thresholds = &memcg->memsw_thresholds;
ce00a967 3455 usage = mem_cgroup_usage(memcg, true);
05b84301 3456 } else
2e72b634
KS
3457 BUG();
3458
371528ca
AV
3459 if (!thresholds->primary)
3460 goto unlock;
3461
2e72b634
KS
3462 /* Check if a threshold crossed before removing */
3463 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3464
3465 /* Calculate new number of threshold */
2c488db2
KS
3466 size = 0;
3467 for (i = 0; i < thresholds->primary->size; i++) {
3468 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
3469 size++;
3470 }
3471
2c488db2 3472 new = thresholds->spare;
907860ed 3473
2e72b634
KS
3474 /* Set thresholds array to NULL if we don't have thresholds */
3475 if (!size) {
2c488db2
KS
3476 kfree(new);
3477 new = NULL;
907860ed 3478 goto swap_buffers;
2e72b634
KS
3479 }
3480
2c488db2 3481 new->size = size;
2e72b634
KS
3482
3483 /* Copy thresholds and find current threshold */
2c488db2
KS
3484 new->current_threshold = -1;
3485 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3486 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
3487 continue;
3488
2c488db2 3489 new->entries[j] = thresholds->primary->entries[i];
748dad36 3490 if (new->entries[j].threshold <= usage) {
2e72b634 3491 /*
2c488db2 3492 * new->current_threshold will not be used
2e72b634
KS
3493 * until rcu_assign_pointer(), so it's safe to increment
3494 * it here.
3495 */
2c488db2 3496 ++new->current_threshold;
2e72b634
KS
3497 }
3498 j++;
3499 }
3500
907860ed 3501swap_buffers:
2c488db2
KS
3502 /* Swap primary and spare array */
3503 thresholds->spare = thresholds->primary;
8c757763 3504
2c488db2 3505 rcu_assign_pointer(thresholds->primary, new);
2e72b634 3506
907860ed 3507 /* To be sure that nobody uses thresholds */
2e72b634 3508 synchronize_rcu();
6611d8d7
MC
3509
3510 /* If all events are unregistered, free the spare array */
3511 if (!new) {
3512 kfree(thresholds->spare);
3513 thresholds->spare = NULL;
3514 }
371528ca 3515unlock:
2e72b634 3516 mutex_unlock(&memcg->thresholds_lock);
2e72b634 3517}
c1e862c1 3518
59b6f873 3519static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
3520 struct eventfd_ctx *eventfd)
3521{
59b6f873 3522 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
3523}
3524
59b6f873 3525static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
3526 struct eventfd_ctx *eventfd)
3527{
59b6f873 3528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
3529}
3530
59b6f873 3531static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 3532 struct eventfd_ctx *eventfd, const char *args)
9490ff27 3533{
9490ff27 3534 struct mem_cgroup_eventfd_list *event;
9490ff27 3535
9490ff27
KH
3536 event = kmalloc(sizeof(*event), GFP_KERNEL);
3537 if (!event)
3538 return -ENOMEM;
3539
1af8efe9 3540 spin_lock(&memcg_oom_lock);
9490ff27
KH
3541
3542 event->eventfd = eventfd;
3543 list_add(&event->list, &memcg->oom_notify);
3544
3545 /* already in OOM ? */
c2b42d3c 3546 if (memcg->under_oom)
9490ff27 3547 eventfd_signal(eventfd, 1);
1af8efe9 3548 spin_unlock(&memcg_oom_lock);
9490ff27
KH
3549
3550 return 0;
3551}
3552
59b6f873 3553static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 3554 struct eventfd_ctx *eventfd)
9490ff27 3555{
9490ff27 3556 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 3557
1af8efe9 3558 spin_lock(&memcg_oom_lock);
9490ff27 3559
c0ff4b85 3560 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
3561 if (ev->eventfd == eventfd) {
3562 list_del(&ev->list);
3563 kfree(ev);
3564 }
3565 }
3566
1af8efe9 3567 spin_unlock(&memcg_oom_lock);
9490ff27
KH
3568}
3569
2da8ca82 3570static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 3571{
2da8ca82 3572 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3c11ecf4 3573
791badbd 3574 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
c2b42d3c 3575 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3c11ecf4
KH
3576 return 0;
3577}
3578
182446d0 3579static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
3580 struct cftype *cft, u64 val)
3581{
182446d0 3582 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
3583
3584 /* cannot set to root cgroup and only 0 and 1 are allowed */
14208b0e 3585 if (!css->parent || !((val == 0) || (val == 1)))
3c11ecf4
KH
3586 return -EINVAL;
3587
c0ff4b85 3588 memcg->oom_kill_disable = val;
4d845ebf 3589 if (!val)
c0ff4b85 3590 memcg_oom_recover(memcg);
3dae7fec 3591
3c11ecf4
KH
3592 return 0;
3593}
3594
52ebea74
TH
3595#ifdef CONFIG_CGROUP_WRITEBACK
3596
3597struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3598{
3599 return &memcg->cgwb_list;
3600}
3601
841710aa
TH
3602static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3603{
3604 return wb_domain_init(&memcg->cgwb_domain, gfp);
3605}
3606
3607static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3608{
3609 wb_domain_exit(&memcg->cgwb_domain);
3610}
3611
2529bb3a
TH
3612static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3613{
3614 wb_domain_size_changed(&memcg->cgwb_domain);
3615}
3616
841710aa
TH
3617struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3618{
3619 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3620
3621 if (!memcg->css.parent)
3622 return NULL;
3623
3624 return &memcg->cgwb_domain;
3625}
3626
c2aa723a
TH
3627/**
3628 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3629 * @wb: bdi_writeback in question
c5edf9cd
TH
3630 * @pfilepages: out parameter for number of file pages
3631 * @pheadroom: out parameter for number of allocatable pages according to memcg
c2aa723a
TH
3632 * @pdirty: out parameter for number of dirty pages
3633 * @pwriteback: out parameter for number of pages under writeback
3634 *
c5edf9cd
TH
3635 * Determine the numbers of file, headroom, dirty, and writeback pages in
3636 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3637 * is a bit more involved.
c2aa723a 3638 *
c5edf9cd
TH
3639 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3640 * headroom is calculated as the lowest headroom of itself and the
3641 * ancestors. Note that this doesn't consider the actual amount of
3642 * available memory in the system. The caller should further cap
3643 * *@pheadroom accordingly.
c2aa723a 3644 */
c5edf9cd
TH
3645void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3646 unsigned long *pheadroom, unsigned long *pdirty,
3647 unsigned long *pwriteback)
c2aa723a
TH
3648{
3649 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3650 struct mem_cgroup *parent;
c2aa723a
TH
3651
3652 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3653
3654 /* this should eventually include NR_UNSTABLE_NFS */
3655 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
c5edf9cd
TH
3656 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3657 (1 << LRU_ACTIVE_FILE));
3658 *pheadroom = PAGE_COUNTER_MAX;
c2aa723a 3659
c2aa723a
TH
3660 while ((parent = parent_mem_cgroup(memcg))) {
3661 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3662 unsigned long used = page_counter_read(&memcg->memory);
3663
c5edf9cd 3664 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
c2aa723a
TH
3665 memcg = parent;
3666 }
c2aa723a
TH
3667}
3668
841710aa
TH
3669#else /* CONFIG_CGROUP_WRITEBACK */
3670
3671static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3672{
3673 return 0;
3674}
3675
3676static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3677{
3678}
3679
2529bb3a
TH
3680static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3681{
3682}
3683
52ebea74
TH
3684#endif /* CONFIG_CGROUP_WRITEBACK */
3685
3bc942f3
TH
3686/*
3687 * DO NOT USE IN NEW FILES.
3688 *
3689 * "cgroup.event_control" implementation.
3690 *
3691 * This is way over-engineered. It tries to support fully configurable
3692 * events for each user. Such level of flexibility is completely
3693 * unnecessary especially in the light of the planned unified hierarchy.
3694 *
3695 * Please deprecate this and replace with something simpler if at all
3696 * possible.
3697 */
3698
79bd9814
TH
3699/*
3700 * Unregister event and free resources.
3701 *
3702 * Gets called from workqueue.
3703 */
3bc942f3 3704static void memcg_event_remove(struct work_struct *work)
79bd9814 3705{
3bc942f3
TH
3706 struct mem_cgroup_event *event =
3707 container_of(work, struct mem_cgroup_event, remove);
59b6f873 3708 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
3709
3710 remove_wait_queue(event->wqh, &event->wait);
3711
59b6f873 3712 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
3713
3714 /* Notify userspace the event is going away. */
3715 eventfd_signal(event->eventfd, 1);
3716
3717 eventfd_ctx_put(event->eventfd);
3718 kfree(event);
59b6f873 3719 css_put(&memcg->css);
79bd9814
TH
3720}
3721
3722/*
3723 * Gets called on POLLHUP on eventfd when user closes it.
3724 *
3725 * Called with wqh->lock held and interrupts disabled.
3726 */
3bc942f3
TH
3727static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3728 int sync, void *key)
79bd9814 3729{
3bc942f3
TH
3730 struct mem_cgroup_event *event =
3731 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 3732 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
3733 unsigned long flags = (unsigned long)key;
3734
3735 if (flags & POLLHUP) {
3736 /*
3737 * If the event has been detached at cgroup removal, we
3738 * can simply return knowing the other side will cleanup
3739 * for us.
3740 *
3741 * We can't race against event freeing since the other
3742 * side will require wqh->lock via remove_wait_queue(),
3743 * which we hold.
3744 */
fba94807 3745 spin_lock(&memcg->event_list_lock);
79bd9814
TH
3746 if (!list_empty(&event->list)) {
3747 list_del_init(&event->list);
3748 /*
3749 * We are in atomic context, but cgroup_event_remove()
3750 * may sleep, so we have to call it in workqueue.
3751 */
3752 schedule_work(&event->remove);
3753 }
fba94807 3754 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
3755 }
3756
3757 return 0;
3758}
3759
3bc942f3 3760static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
3761 wait_queue_head_t *wqh, poll_table *pt)
3762{
3bc942f3
TH
3763 struct mem_cgroup_event *event =
3764 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
3765
3766 event->wqh = wqh;
3767 add_wait_queue(wqh, &event->wait);
3768}
3769
3770/*
3bc942f3
TH
3771 * DO NOT USE IN NEW FILES.
3772 *
79bd9814
TH
3773 * Parse input and register new cgroup event handler.
3774 *
3775 * Input must be in format '<event_fd> <control_fd> <args>'.
3776 * Interpretation of args is defined by control file implementation.
3777 */
451af504
TH
3778static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3779 char *buf, size_t nbytes, loff_t off)
79bd9814 3780{
451af504 3781 struct cgroup_subsys_state *css = of_css(of);
fba94807 3782 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 3783 struct mem_cgroup_event *event;
79bd9814
TH
3784 struct cgroup_subsys_state *cfile_css;
3785 unsigned int efd, cfd;
3786 struct fd efile;
3787 struct fd cfile;
fba94807 3788 const char *name;
79bd9814
TH
3789 char *endp;
3790 int ret;
3791
451af504
TH
3792 buf = strstrip(buf);
3793
3794 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
3795 if (*endp != ' ')
3796 return -EINVAL;
451af504 3797 buf = endp + 1;
79bd9814 3798
451af504 3799 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
3800 if ((*endp != ' ') && (*endp != '\0'))
3801 return -EINVAL;
451af504 3802 buf = endp + 1;
79bd9814
TH
3803
3804 event = kzalloc(sizeof(*event), GFP_KERNEL);
3805 if (!event)
3806 return -ENOMEM;
3807
59b6f873 3808 event->memcg = memcg;
79bd9814 3809 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
3810 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3811 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3812 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
3813
3814 efile = fdget(efd);
3815 if (!efile.file) {
3816 ret = -EBADF;
3817 goto out_kfree;
3818 }
3819
3820 event->eventfd = eventfd_ctx_fileget(efile.file);
3821 if (IS_ERR(event->eventfd)) {
3822 ret = PTR_ERR(event->eventfd);
3823 goto out_put_efile;
3824 }
3825
3826 cfile = fdget(cfd);
3827 if (!cfile.file) {
3828 ret = -EBADF;
3829 goto out_put_eventfd;
3830 }
3831
3832 /* the process need read permission on control file */
3833 /* AV: shouldn't we check that it's been opened for read instead? */
3834 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3835 if (ret < 0)
3836 goto out_put_cfile;
3837
fba94807
TH
3838 /*
3839 * Determine the event callbacks and set them in @event. This used
3840 * to be done via struct cftype but cgroup core no longer knows
3841 * about these events. The following is crude but the whole thing
3842 * is for compatibility anyway.
3bc942f3
TH
3843 *
3844 * DO NOT ADD NEW FILES.
fba94807 3845 */
b583043e 3846 name = cfile.file->f_path.dentry->d_name.name;
fba94807
TH
3847
3848 if (!strcmp(name, "memory.usage_in_bytes")) {
3849 event->register_event = mem_cgroup_usage_register_event;
3850 event->unregister_event = mem_cgroup_usage_unregister_event;
3851 } else if (!strcmp(name, "memory.oom_control")) {
3852 event->register_event = mem_cgroup_oom_register_event;
3853 event->unregister_event = mem_cgroup_oom_unregister_event;
3854 } else if (!strcmp(name, "memory.pressure_level")) {
3855 event->register_event = vmpressure_register_event;
3856 event->unregister_event = vmpressure_unregister_event;
3857 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
3858 event->register_event = memsw_cgroup_usage_register_event;
3859 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
3860 } else {
3861 ret = -EINVAL;
3862 goto out_put_cfile;
3863 }
3864
79bd9814 3865 /*
b5557c4c
TH
3866 * Verify @cfile should belong to @css. Also, remaining events are
3867 * automatically removed on cgroup destruction but the removal is
3868 * asynchronous, so take an extra ref on @css.
79bd9814 3869 */
b583043e 3870 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
ec903c0c 3871 &memory_cgrp_subsys);
79bd9814 3872 ret = -EINVAL;
5a17f543 3873 if (IS_ERR(cfile_css))
79bd9814 3874 goto out_put_cfile;
5a17f543
TH
3875 if (cfile_css != css) {
3876 css_put(cfile_css);
79bd9814 3877 goto out_put_cfile;
5a17f543 3878 }
79bd9814 3879
451af504 3880 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
3881 if (ret)
3882 goto out_put_css;
3883
3884 efile.file->f_op->poll(efile.file, &event->pt);
3885
fba94807
TH
3886 spin_lock(&memcg->event_list_lock);
3887 list_add(&event->list, &memcg->event_list);
3888 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
3889
3890 fdput(cfile);
3891 fdput(efile);
3892
451af504 3893 return nbytes;
79bd9814
TH
3894
3895out_put_css:
b5557c4c 3896 css_put(css);
79bd9814
TH
3897out_put_cfile:
3898 fdput(cfile);
3899out_put_eventfd:
3900 eventfd_ctx_put(event->eventfd);
3901out_put_efile:
3902 fdput(efile);
3903out_kfree:
3904 kfree(event);
3905
3906 return ret;
3907}
3908
241994ed 3909static struct cftype mem_cgroup_legacy_files[] = {
8cdea7c0 3910 {
0eea1030 3911 .name = "usage_in_bytes",
8c7c6e34 3912 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 3913 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 3914 },
c84872e1
PE
3915 {
3916 .name = "max_usage_in_bytes",
8c7c6e34 3917 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 3918 .write = mem_cgroup_reset,
791badbd 3919 .read_u64 = mem_cgroup_read_u64,
c84872e1 3920 },
8cdea7c0 3921 {
0eea1030 3922 .name = "limit_in_bytes",
8c7c6e34 3923 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 3924 .write = mem_cgroup_write,
791badbd 3925 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 3926 },
296c81d8
BS
3927 {
3928 .name = "soft_limit_in_bytes",
3929 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 3930 .write = mem_cgroup_write,
791badbd 3931 .read_u64 = mem_cgroup_read_u64,
296c81d8 3932 },
8cdea7c0
BS
3933 {
3934 .name = "failcnt",
8c7c6e34 3935 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 3936 .write = mem_cgroup_reset,
791badbd 3937 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 3938 },
d2ceb9b7
KH
3939 {
3940 .name = "stat",
2da8ca82 3941 .seq_show = memcg_stat_show,
d2ceb9b7 3942 },
c1e862c1
KH
3943 {
3944 .name = "force_empty",
6770c64e 3945 .write = mem_cgroup_force_empty_write,
c1e862c1 3946 },
18f59ea7
BS
3947 {
3948 .name = "use_hierarchy",
3949 .write_u64 = mem_cgroup_hierarchy_write,
3950 .read_u64 = mem_cgroup_hierarchy_read,
3951 },
79bd9814 3952 {
3bc942f3 3953 .name = "cgroup.event_control", /* XXX: for compat */
451af504 3954 .write = memcg_write_event_control,
7dbdb199 3955 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
79bd9814 3956 },
a7885eb8
KM
3957 {
3958 .name = "swappiness",
3959 .read_u64 = mem_cgroup_swappiness_read,
3960 .write_u64 = mem_cgroup_swappiness_write,
3961 },
7dc74be0
DN
3962 {
3963 .name = "move_charge_at_immigrate",
3964 .read_u64 = mem_cgroup_move_charge_read,
3965 .write_u64 = mem_cgroup_move_charge_write,
3966 },
9490ff27
KH
3967 {
3968 .name = "oom_control",
2da8ca82 3969 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 3970 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
3971 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3972 },
70ddf637
AV
3973 {
3974 .name = "pressure_level",
70ddf637 3975 },
406eb0c9
YH
3976#ifdef CONFIG_NUMA
3977 {
3978 .name = "numa_stat",
2da8ca82 3979 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
3980 },
3981#endif
510fc4e1
GC
3982 {
3983 .name = "kmem.limit_in_bytes",
3984 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
451af504 3985 .write = mem_cgroup_write,
791badbd 3986 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
3987 },
3988 {
3989 .name = "kmem.usage_in_bytes",
3990 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 3991 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
3992 },
3993 {
3994 .name = "kmem.failcnt",
3995 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 3996 .write = mem_cgroup_reset,
791badbd 3997 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
3998 },
3999 {
4000 .name = "kmem.max_usage_in_bytes",
4001 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 4002 .write = mem_cgroup_reset,
791badbd 4003 .read_u64 = mem_cgroup_read_u64,
510fc4e1 4004 },
749c5415
GC
4005#ifdef CONFIG_SLABINFO
4006 {
4007 .name = "kmem.slabinfo",
bc2791f8
TH
4008 .seq_start = memcg_slab_start,
4009 .seq_next = memcg_slab_next,
4010 .seq_stop = memcg_slab_stop,
b047501c 4011 .seq_show = memcg_slab_show,
749c5415
GC
4012 },
4013#endif
d55f90bf
VD
4014 {
4015 .name = "kmem.tcp.limit_in_bytes",
4016 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4017 .write = mem_cgroup_write,
4018 .read_u64 = mem_cgroup_read_u64,
4019 },
4020 {
4021 .name = "kmem.tcp.usage_in_bytes",
4022 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4023 .read_u64 = mem_cgroup_read_u64,
4024 },
4025 {
4026 .name = "kmem.tcp.failcnt",
4027 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4028 .write = mem_cgroup_reset,
4029 .read_u64 = mem_cgroup_read_u64,
4030 },
4031 {
4032 .name = "kmem.tcp.max_usage_in_bytes",
4033 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4034 .write = mem_cgroup_reset,
4035 .read_u64 = mem_cgroup_read_u64,
4036 },
6bc10349 4037 { }, /* terminate */
af36f906 4038};
8c7c6e34 4039
73f576c0
JW
4040/*
4041 * Private memory cgroup IDR
4042 *
4043 * Swap-out records and page cache shadow entries need to store memcg
4044 * references in constrained space, so we maintain an ID space that is
4045 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4046 * memory-controlled cgroups to 64k.
4047 *
4048 * However, there usually are many references to the oflline CSS after
4049 * the cgroup has been destroyed, such as page cache or reclaimable
4050 * slab objects, that don't need to hang on to the ID. We want to keep
4051 * those dead CSS from occupying IDs, or we might quickly exhaust the
4052 * relatively small ID space and prevent the creation of new cgroups
4053 * even when there are much fewer than 64k cgroups - possibly none.
4054 *
4055 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4056 * be freed and recycled when it's no longer needed, which is usually
4057 * when the CSS is offlined.
4058 *
4059 * The only exception to that are records of swapped out tmpfs/shmem
4060 * pages that need to be attributed to live ancestors on swapin. But
4061 * those references are manageable from userspace.
4062 */
4063
4064static DEFINE_IDR(mem_cgroup_idr);
4065
615d66c3 4066static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 4067{
58fa2a55 4068 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
615d66c3 4069 atomic_add(n, &memcg->id.ref);
73f576c0
JW
4070}
4071
615d66c3 4072static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 4073{
58fa2a55 4074 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
615d66c3 4075 if (atomic_sub_and_test(n, &memcg->id.ref)) {
73f576c0
JW
4076 idr_remove(&mem_cgroup_idr, memcg->id.id);
4077 memcg->id.id = 0;
4078
4079 /* Memcg ID pins CSS */
4080 css_put(&memcg->css);
4081 }
4082}
4083
615d66c3
VD
4084static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4085{
4086 mem_cgroup_id_get_many(memcg, 1);
4087}
4088
4089static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4090{
4091 mem_cgroup_id_put_many(memcg, 1);
4092}
4093
73f576c0
JW
4094/**
4095 * mem_cgroup_from_id - look up a memcg from a memcg id
4096 * @id: the memcg id to look up
4097 *
4098 * Caller must hold rcu_read_lock().
4099 */
4100struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4101{
4102 WARN_ON_ONCE(!rcu_read_lock_held());
4103 return idr_find(&mem_cgroup_idr, id);
4104}
4105
ef8f2327 4106static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
4107{
4108 struct mem_cgroup_per_node *pn;
ef8f2327 4109 int tmp = node;
1ecaab2b
KH
4110 /*
4111 * This routine is called against possible nodes.
4112 * But it's BUG to call kmalloc() against offline node.
4113 *
4114 * TODO: this routine can waste much memory for nodes which will
4115 * never be onlined. It's better to use memory hotplug callback
4116 * function.
4117 */
41e3355d
KH
4118 if (!node_state(node, N_NORMAL_MEMORY))
4119 tmp = -1;
17295c88 4120 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
4121 if (!pn)
4122 return 1;
1ecaab2b 4123
ef8f2327
MG
4124 lruvec_init(&pn->lruvec);
4125 pn->usage_in_excess = 0;
4126 pn->on_tree = false;
4127 pn->memcg = memcg;
4128
54f72fe0 4129 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
4130 return 0;
4131}
4132
ef8f2327 4133static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1ecaab2b 4134{
54f72fe0 4135 kfree(memcg->nodeinfo[node]);
1ecaab2b
KH
4136}
4137
0b8f73e1 4138static void mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 4139{
c8b2a36f 4140 int node;
59927fb9 4141
0b8f73e1 4142 memcg_wb_domain_exit(memcg);
c8b2a36f 4143 for_each_node(node)
ef8f2327 4144 free_mem_cgroup_per_node_info(memcg, node);
c8b2a36f 4145 free_percpu(memcg->stat);
8ff69e2c 4146 kfree(memcg);
59927fb9 4147}
3afe36b1 4148
0b8f73e1 4149static struct mem_cgroup *mem_cgroup_alloc(void)
8cdea7c0 4150{
d142e3e6 4151 struct mem_cgroup *memcg;
0b8f73e1 4152 size_t size;
6d12e2d8 4153 int node;
8cdea7c0 4154
0b8f73e1
JW
4155 size = sizeof(struct mem_cgroup);
4156 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4157
4158 memcg = kzalloc(size, GFP_KERNEL);
c0ff4b85 4159 if (!memcg)
0b8f73e1
JW
4160 return NULL;
4161
73f576c0
JW
4162 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4163 1, MEM_CGROUP_ID_MAX,
4164 GFP_KERNEL);
4165 if (memcg->id.id < 0)
4166 goto fail;
4167
0b8f73e1
JW
4168 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4169 if (!memcg->stat)
4170 goto fail;
78fb7466 4171
3ed28fa1 4172 for_each_node(node)
ef8f2327 4173 if (alloc_mem_cgroup_per_node_info(memcg, node))
0b8f73e1 4174 goto fail;
f64c3f54 4175
0b8f73e1
JW
4176 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4177 goto fail;
28dbc4b6 4178
f7e1cb6e 4179 INIT_WORK(&memcg->high_work, high_work_func);
d142e3e6
GC
4180 memcg->last_scanned_node = MAX_NUMNODES;
4181 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
4182 mutex_init(&memcg->thresholds_lock);
4183 spin_lock_init(&memcg->move_lock);
70ddf637 4184 vmpressure_init(&memcg->vmpressure);
fba94807
TH
4185 INIT_LIST_HEAD(&memcg->event_list);
4186 spin_lock_init(&memcg->event_list_lock);
d886f4e4 4187 memcg->socket_pressure = jiffies;
127424c8 4188#ifndef CONFIG_SLOB
900a38f0 4189 memcg->kmemcg_id = -1;
900a38f0 4190#endif
52ebea74
TH
4191#ifdef CONFIG_CGROUP_WRITEBACK
4192 INIT_LIST_HEAD(&memcg->cgwb_list);
4193#endif
73f576c0 4194 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
0b8f73e1
JW
4195 return memcg;
4196fail:
73f576c0
JW
4197 if (memcg->id.id > 0)
4198 idr_remove(&mem_cgroup_idr, memcg->id.id);
0b8f73e1
JW
4199 mem_cgroup_free(memcg);
4200 return NULL;
d142e3e6
GC
4201}
4202
0b8f73e1
JW
4203static struct cgroup_subsys_state * __ref
4204mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
d142e3e6 4205{
0b8f73e1
JW
4206 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4207 struct mem_cgroup *memcg;
4208 long error = -ENOMEM;
d142e3e6 4209
0b8f73e1
JW
4210 memcg = mem_cgroup_alloc();
4211 if (!memcg)
4212 return ERR_PTR(error);
d142e3e6 4213
0b8f73e1
JW
4214 memcg->high = PAGE_COUNTER_MAX;
4215 memcg->soft_limit = PAGE_COUNTER_MAX;
4216 if (parent) {
4217 memcg->swappiness = mem_cgroup_swappiness(parent);
4218 memcg->oom_kill_disable = parent->oom_kill_disable;
4219 }
4220 if (parent && parent->use_hierarchy) {
4221 memcg->use_hierarchy = true;
3e32cb2e 4222 page_counter_init(&memcg->memory, &parent->memory);
37e84351 4223 page_counter_init(&memcg->swap, &parent->swap);
3e32cb2e
JW
4224 page_counter_init(&memcg->memsw, &parent->memsw);
4225 page_counter_init(&memcg->kmem, &parent->kmem);
0db15298 4226 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
18f59ea7 4227 } else {
3e32cb2e 4228 page_counter_init(&memcg->memory, NULL);
37e84351 4229 page_counter_init(&memcg->swap, NULL);
3e32cb2e
JW
4230 page_counter_init(&memcg->memsw, NULL);
4231 page_counter_init(&memcg->kmem, NULL);
0db15298 4232 page_counter_init(&memcg->tcpmem, NULL);
8c7f6edb
TH
4233 /*
4234 * Deeper hierachy with use_hierarchy == false doesn't make
4235 * much sense so let cgroup subsystem know about this
4236 * unfortunate state in our controller.
4237 */
d142e3e6 4238 if (parent != root_mem_cgroup)
073219e9 4239 memory_cgrp_subsys.broken_hierarchy = true;
18f59ea7 4240 }
d6441637 4241
0b8f73e1
JW
4242 /* The following stuff does not apply to the root */
4243 if (!parent) {
4244 root_mem_cgroup = memcg;
4245 return &memcg->css;
4246 }
4247
b313aeee 4248 error = memcg_online_kmem(memcg);
0b8f73e1
JW
4249 if (error)
4250 goto fail;
127424c8 4251
f7e1cb6e 4252 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 4253 static_branch_inc(&memcg_sockets_enabled_key);
f7e1cb6e 4254
0b8f73e1
JW
4255 return &memcg->css;
4256fail:
4257 mem_cgroup_free(memcg);
ea3a9645 4258 return ERR_PTR(-ENOMEM);
0b8f73e1
JW
4259}
4260
73f576c0 4261static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
0b8f73e1 4262{
58fa2a55
VD
4263 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4264
73f576c0 4265 /* Online state pins memcg ID, memcg ID pins CSS */
58fa2a55 4266 atomic_set(&memcg->id.ref, 1);
73f576c0 4267 css_get(css);
2f7dd7a4 4268 return 0;
8cdea7c0
BS
4269}
4270
eb95419b 4271static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 4272{
eb95419b 4273 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 4274 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
4275
4276 /*
4277 * Unregister events and notify userspace.
4278 * Notify userspace about cgroup removing only after rmdir of cgroup
4279 * directory to avoid race between userspace and kernelspace.
4280 */
fba94807
TH
4281 spin_lock(&memcg->event_list_lock);
4282 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
4283 list_del_init(&event->list);
4284 schedule_work(&event->remove);
4285 }
fba94807 4286 spin_unlock(&memcg->event_list_lock);
ec64f515 4287
567e9ab2 4288 memcg_offline_kmem(memcg);
52ebea74 4289 wb_memcg_offline(memcg);
73f576c0
JW
4290
4291 mem_cgroup_id_put(memcg);
df878fb0
KH
4292}
4293
6df38689
VD
4294static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4295{
4296 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4297
4298 invalidate_reclaim_iterators(memcg);
4299}
4300
eb95419b 4301static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 4302{
eb95419b 4303 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
c268e994 4304
f7e1cb6e 4305 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 4306 static_branch_dec(&memcg_sockets_enabled_key);
127424c8 4307
0db15298 4308 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
d55f90bf 4309 static_branch_dec(&memcg_sockets_enabled_key);
3893e302 4310
0b8f73e1
JW
4311 vmpressure_cleanup(&memcg->vmpressure);
4312 cancel_work_sync(&memcg->high_work);
4313 mem_cgroup_remove_from_trees(memcg);
d886f4e4 4314 memcg_free_kmem(memcg);
0b8f73e1 4315 mem_cgroup_free(memcg);
8cdea7c0
BS
4316}
4317
1ced953b
TH
4318/**
4319 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4320 * @css: the target css
4321 *
4322 * Reset the states of the mem_cgroup associated with @css. This is
4323 * invoked when the userland requests disabling on the default hierarchy
4324 * but the memcg is pinned through dependency. The memcg should stop
4325 * applying policies and should revert to the vanilla state as it may be
4326 * made visible again.
4327 *
4328 * The current implementation only resets the essential configurations.
4329 * This needs to be expanded to cover all the visible parts.
4330 */
4331static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4332{
4333 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4334
d334c9bc
VD
4335 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4336 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4337 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4338 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4339 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
241994ed
JW
4340 memcg->low = 0;
4341 memcg->high = PAGE_COUNTER_MAX;
24d404dc 4342 memcg->soft_limit = PAGE_COUNTER_MAX;
2529bb3a 4343 memcg_wb_domain_size_changed(memcg);
1ced953b
TH
4344}
4345
02491447 4346#ifdef CONFIG_MMU
7dc74be0 4347/* Handlers for move charge at task migration. */
854ffa8d 4348static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 4349{
05b84301 4350 int ret;
9476db97 4351
d0164adc
MG
4352 /* Try a single bulk charge without reclaim first, kswapd may wake */
4353 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
9476db97 4354 if (!ret) {
854ffa8d 4355 mc.precharge += count;
854ffa8d
DN
4356 return ret;
4357 }
9476db97 4358
3674534b 4359 /* Try charges one by one with reclaim, but do not retry */
854ffa8d 4360 while (count--) {
3674534b 4361 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
38c5d72f 4362 if (ret)
38c5d72f 4363 return ret;
854ffa8d 4364 mc.precharge++;
9476db97 4365 cond_resched();
854ffa8d 4366 }
9476db97 4367 return 0;
4ffef5fe
DN
4368}
4369
4ffef5fe
DN
4370union mc_target {
4371 struct page *page;
02491447 4372 swp_entry_t ent;
4ffef5fe
DN
4373};
4374
4ffef5fe 4375enum mc_target_type {
8d32ff84 4376 MC_TARGET_NONE = 0,
4ffef5fe 4377 MC_TARGET_PAGE,
02491447 4378 MC_TARGET_SWAP,
4ffef5fe
DN
4379};
4380
90254a65
DN
4381static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4382 unsigned long addr, pte_t ptent)
4ffef5fe 4383{
90254a65 4384 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 4385
90254a65
DN
4386 if (!page || !page_mapped(page))
4387 return NULL;
4388 if (PageAnon(page)) {
1dfab5ab 4389 if (!(mc.flags & MOVE_ANON))
90254a65 4390 return NULL;
1dfab5ab
JW
4391 } else {
4392 if (!(mc.flags & MOVE_FILE))
4393 return NULL;
4394 }
90254a65
DN
4395 if (!get_page_unless_zero(page))
4396 return NULL;
4397
4398 return page;
4399}
4400
4b91355e 4401#ifdef CONFIG_SWAP
90254a65 4402static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 4403 pte_t ptent, swp_entry_t *entry)
90254a65 4404{
90254a65
DN
4405 struct page *page = NULL;
4406 swp_entry_t ent = pte_to_swp_entry(ptent);
4407
1dfab5ab 4408 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
90254a65 4409 return NULL;
4b91355e
KH
4410 /*
4411 * Because lookup_swap_cache() updates some statistics counter,
4412 * we call find_get_page() with swapper_space directly.
4413 */
f6ab1f7f 4414 page = find_get_page(swap_address_space(ent), swp_offset(ent));
7941d214 4415 if (do_memsw_account())
90254a65
DN
4416 entry->val = ent.val;
4417
4418 return page;
4419}
4b91355e
KH
4420#else
4421static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 4422 pte_t ptent, swp_entry_t *entry)
4b91355e
KH
4423{
4424 return NULL;
4425}
4426#endif
90254a65 4427
87946a72
DN
4428static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4429 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4430{
4431 struct page *page = NULL;
87946a72
DN
4432 struct address_space *mapping;
4433 pgoff_t pgoff;
4434
4435 if (!vma->vm_file) /* anonymous vma */
4436 return NULL;
1dfab5ab 4437 if (!(mc.flags & MOVE_FILE))
87946a72
DN
4438 return NULL;
4439
87946a72 4440 mapping = vma->vm_file->f_mapping;
0661a336 4441 pgoff = linear_page_index(vma, addr);
87946a72
DN
4442
4443 /* page is moved even if it's not RSS of this task(page-faulted). */
aa3b1895
HD
4444#ifdef CONFIG_SWAP
4445 /* shmem/tmpfs may report page out on swap: account for that too. */
139b6a6f
JW
4446 if (shmem_mapping(mapping)) {
4447 page = find_get_entry(mapping, pgoff);
4448 if (radix_tree_exceptional_entry(page)) {
4449 swp_entry_t swp = radix_to_swp_entry(page);
7941d214 4450 if (do_memsw_account())
139b6a6f 4451 *entry = swp;
f6ab1f7f
HY
4452 page = find_get_page(swap_address_space(swp),
4453 swp_offset(swp));
139b6a6f
JW
4454 }
4455 } else
4456 page = find_get_page(mapping, pgoff);
4457#else
4458 page = find_get_page(mapping, pgoff);
aa3b1895 4459#endif
87946a72
DN
4460 return page;
4461}
4462
b1b0deab
CG
4463/**
4464 * mem_cgroup_move_account - move account of the page
4465 * @page: the page
25843c2b 4466 * @compound: charge the page as compound or small page
b1b0deab
CG
4467 * @from: mem_cgroup which the page is moved from.
4468 * @to: mem_cgroup which the page is moved to. @from != @to.
4469 *
3ac808fd 4470 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
b1b0deab
CG
4471 *
4472 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4473 * from old cgroup.
4474 */
4475static int mem_cgroup_move_account(struct page *page,
f627c2f5 4476 bool compound,
b1b0deab
CG
4477 struct mem_cgroup *from,
4478 struct mem_cgroup *to)
4479{
4480 unsigned long flags;
f627c2f5 4481 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
b1b0deab 4482 int ret;
c4843a75 4483 bool anon;
b1b0deab
CG
4484
4485 VM_BUG_ON(from == to);
4486 VM_BUG_ON_PAGE(PageLRU(page), page);
f627c2f5 4487 VM_BUG_ON(compound && !PageTransHuge(page));
b1b0deab
CG
4488
4489 /*
6a93ca8f 4490 * Prevent mem_cgroup_migrate() from looking at
45637bab 4491 * page->mem_cgroup of its source page while we change it.
b1b0deab 4492 */
f627c2f5 4493 ret = -EBUSY;
b1b0deab
CG
4494 if (!trylock_page(page))
4495 goto out;
4496
4497 ret = -EINVAL;
4498 if (page->mem_cgroup != from)
4499 goto out_unlock;
4500
c4843a75
GT
4501 anon = PageAnon(page);
4502
b1b0deab
CG
4503 spin_lock_irqsave(&from->move_lock, flags);
4504
c4843a75 4505 if (!anon && page_mapped(page)) {
b1b0deab
CG
4506 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4507 nr_pages);
4508 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4509 nr_pages);
4510 }
4511
c4843a75
GT
4512 /*
4513 * move_lock grabbed above and caller set from->moving_account, so
4514 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4515 * So mapping should be stable for dirty pages.
4516 */
4517 if (!anon && PageDirty(page)) {
4518 struct address_space *mapping = page_mapping(page);
4519
4520 if (mapping_cap_account_dirty(mapping)) {
4521 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4522 nr_pages);
4523 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4524 nr_pages);
4525 }
4526 }
4527
b1b0deab
CG
4528 if (PageWriteback(page)) {
4529 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4530 nr_pages);
4531 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4532 nr_pages);
4533 }
4534
4535 /*
4536 * It is safe to change page->mem_cgroup here because the page
4537 * is referenced, charged, and isolated - we can't race with
4538 * uncharging, charging, migration, or LRU putback.
4539 */
4540
4541 /* caller should have done css_get */
4542 page->mem_cgroup = to;
4543 spin_unlock_irqrestore(&from->move_lock, flags);
4544
4545 ret = 0;
4546
4547 local_irq_disable();
f627c2f5 4548 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
b1b0deab 4549 memcg_check_events(to, page);
f627c2f5 4550 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
b1b0deab
CG
4551 memcg_check_events(from, page);
4552 local_irq_enable();
4553out_unlock:
4554 unlock_page(page);
4555out:
4556 return ret;
4557}
4558
7cf7806c
LR
4559/**
4560 * get_mctgt_type - get target type of moving charge
4561 * @vma: the vma the pte to be checked belongs
4562 * @addr: the address corresponding to the pte to be checked
4563 * @ptent: the pte to be checked
4564 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4565 *
4566 * Returns
4567 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4568 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4569 * move charge. if @target is not NULL, the page is stored in target->page
4570 * with extra refcnt got(Callers should handle it).
4571 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4572 * target for charge migration. if @target is not NULL, the entry is stored
4573 * in target->ent.
4574 *
4575 * Called with pte lock held.
4576 */
4577
8d32ff84 4578static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
4579 unsigned long addr, pte_t ptent, union mc_target *target)
4580{
4581 struct page *page = NULL;
8d32ff84 4582 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
4583 swp_entry_t ent = { .val = 0 };
4584
4585 if (pte_present(ptent))
4586 page = mc_handle_present_pte(vma, addr, ptent);
4587 else if (is_swap_pte(ptent))
48406ef8 4588 page = mc_handle_swap_pte(vma, ptent, &ent);
0661a336 4589 else if (pte_none(ptent))
87946a72 4590 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
4591
4592 if (!page && !ent.val)
8d32ff84 4593 return ret;
02491447 4594 if (page) {
02491447 4595 /*
0a31bc97 4596 * Do only loose check w/o serialization.
1306a85a 4597 * mem_cgroup_move_account() checks the page is valid or
0a31bc97 4598 * not under LRU exclusion.
02491447 4599 */
1306a85a 4600 if (page->mem_cgroup == mc.from) {
02491447
DN
4601 ret = MC_TARGET_PAGE;
4602 if (target)
4603 target->page = page;
4604 }
4605 if (!ret || !target)
4606 put_page(page);
4607 }
90254a65
DN
4608 /* There is a swap entry and a page doesn't exist or isn't charged */
4609 if (ent.val && !ret &&
34c00c31 4610 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
4611 ret = MC_TARGET_SWAP;
4612 if (target)
4613 target->ent = ent;
4ffef5fe 4614 }
4ffef5fe
DN
4615 return ret;
4616}
4617
12724850
NH
4618#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4619/*
4620 * We don't consider swapping or file mapped pages because THP does not
4621 * support them for now.
4622 * Caller should make sure that pmd_trans_huge(pmd) is true.
4623 */
4624static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4625 unsigned long addr, pmd_t pmd, union mc_target *target)
4626{
4627 struct page *page = NULL;
12724850
NH
4628 enum mc_target_type ret = MC_TARGET_NONE;
4629
4630 page = pmd_page(pmd);
309381fe 4631 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
1dfab5ab 4632 if (!(mc.flags & MOVE_ANON))
12724850 4633 return ret;
1306a85a 4634 if (page->mem_cgroup == mc.from) {
12724850
NH
4635 ret = MC_TARGET_PAGE;
4636 if (target) {
4637 get_page(page);
4638 target->page = page;
4639 }
4640 }
4641 return ret;
4642}
4643#else
4644static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4645 unsigned long addr, pmd_t pmd, union mc_target *target)
4646{
4647 return MC_TARGET_NONE;
4648}
4649#endif
4650
4ffef5fe
DN
4651static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4652 unsigned long addr, unsigned long end,
4653 struct mm_walk *walk)
4654{
26bcd64a 4655 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
4656 pte_t *pte;
4657 spinlock_t *ptl;
4658
b6ec57f4
KS
4659 ptl = pmd_trans_huge_lock(pmd, vma);
4660 if (ptl) {
12724850
NH
4661 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4662 mc.precharge += HPAGE_PMD_NR;
bf929152 4663 spin_unlock(ptl);
1a5a9906 4664 return 0;
12724850 4665 }
03319327 4666
45f83cef
AA
4667 if (pmd_trans_unstable(pmd))
4668 return 0;
4ffef5fe
DN
4669 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4670 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 4671 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
4672 mc.precharge++; /* increment precharge temporarily */
4673 pte_unmap_unlock(pte - 1, ptl);
4674 cond_resched();
4675
7dc74be0
DN
4676 return 0;
4677}
4678
4ffef5fe
DN
4679static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4680{
4681 unsigned long precharge;
4ffef5fe 4682
26bcd64a
NH
4683 struct mm_walk mem_cgroup_count_precharge_walk = {
4684 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4685 .mm = mm,
4686 };
dfe076b0 4687 down_read(&mm->mmap_sem);
0247f3f4
JM
4688 walk_page_range(0, mm->highest_vm_end,
4689 &mem_cgroup_count_precharge_walk);
dfe076b0 4690 up_read(&mm->mmap_sem);
4ffef5fe
DN
4691
4692 precharge = mc.precharge;
4693 mc.precharge = 0;
4694
4695 return precharge;
4696}
4697
4ffef5fe
DN
4698static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4699{
dfe076b0
DN
4700 unsigned long precharge = mem_cgroup_count_precharge(mm);
4701
4702 VM_BUG_ON(mc.moving_task);
4703 mc.moving_task = current;
4704 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
4705}
4706
dfe076b0
DN
4707/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4708static void __mem_cgroup_clear_mc(void)
4ffef5fe 4709{
2bd9bb20
KH
4710 struct mem_cgroup *from = mc.from;
4711 struct mem_cgroup *to = mc.to;
4712
4ffef5fe 4713 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 4714 if (mc.precharge) {
00501b53 4715 cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
4716 mc.precharge = 0;
4717 }
4718 /*
4719 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4720 * we must uncharge here.
4721 */
4722 if (mc.moved_charge) {
00501b53 4723 cancel_charge(mc.from, mc.moved_charge);
854ffa8d 4724 mc.moved_charge = 0;
4ffef5fe 4725 }
483c30b5
DN
4726 /* we must fixup refcnts and charges */
4727 if (mc.moved_swap) {
483c30b5 4728 /* uncharge swap account from the old cgroup */
ce00a967 4729 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 4730 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 4731
615d66c3
VD
4732 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4733
05b84301 4734 /*
3e32cb2e
JW
4735 * we charged both to->memory and to->memsw, so we
4736 * should uncharge to->memory.
05b84301 4737 */
ce00a967 4738 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
4739 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4740
615d66c3
VD
4741 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4742 css_put_many(&mc.to->css, mc.moved_swap);
3e32cb2e 4743
483c30b5
DN
4744 mc.moved_swap = 0;
4745 }
dfe076b0
DN
4746 memcg_oom_recover(from);
4747 memcg_oom_recover(to);
4748 wake_up_all(&mc.waitq);
4749}
4750
4751static void mem_cgroup_clear_mc(void)
4752{
264a0ae1
TH
4753 struct mm_struct *mm = mc.mm;
4754
dfe076b0
DN
4755 /*
4756 * we must clear moving_task before waking up waiters at the end of
4757 * task migration.
4758 */
4759 mc.moving_task = NULL;
4760 __mem_cgroup_clear_mc();
2bd9bb20 4761 spin_lock(&mc.lock);
4ffef5fe
DN
4762 mc.from = NULL;
4763 mc.to = NULL;
264a0ae1 4764 mc.mm = NULL;
2bd9bb20 4765 spin_unlock(&mc.lock);
264a0ae1
TH
4766
4767 mmput(mm);
4ffef5fe
DN
4768}
4769
1f7dd3e5 4770static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
7dc74be0 4771{
1f7dd3e5 4772 struct cgroup_subsys_state *css;
eed67d75 4773 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
9f2115f9 4774 struct mem_cgroup *from;
4530eddb 4775 struct task_struct *leader, *p;
9f2115f9 4776 struct mm_struct *mm;
1dfab5ab 4777 unsigned long move_flags;
9f2115f9 4778 int ret = 0;
7dc74be0 4779
1f7dd3e5
TH
4780 /* charge immigration isn't supported on the default hierarchy */
4781 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
9f2115f9
TH
4782 return 0;
4783
4530eddb
TH
4784 /*
4785 * Multi-process migrations only happen on the default hierarchy
4786 * where charge immigration is not used. Perform charge
4787 * immigration if @tset contains a leader and whine if there are
4788 * multiple.
4789 */
4790 p = NULL;
1f7dd3e5 4791 cgroup_taskset_for_each_leader(leader, css, tset) {
4530eddb
TH
4792 WARN_ON_ONCE(p);
4793 p = leader;
1f7dd3e5 4794 memcg = mem_cgroup_from_css(css);
4530eddb
TH
4795 }
4796 if (!p)
4797 return 0;
4798
1f7dd3e5
TH
4799 /*
4800 * We are now commited to this value whatever it is. Changes in this
4801 * tunable will only affect upcoming migrations, not the current one.
4802 * So we need to save it, and keep it going.
4803 */
4804 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4805 if (!move_flags)
4806 return 0;
4807
9f2115f9
TH
4808 from = mem_cgroup_from_task(p);
4809
4810 VM_BUG_ON(from == memcg);
4811
4812 mm = get_task_mm(p);
4813 if (!mm)
4814 return 0;
4815 /* We move charges only when we move a owner of the mm */
4816 if (mm->owner == p) {
4817 VM_BUG_ON(mc.from);
4818 VM_BUG_ON(mc.to);
4819 VM_BUG_ON(mc.precharge);
4820 VM_BUG_ON(mc.moved_charge);
4821 VM_BUG_ON(mc.moved_swap);
4822
4823 spin_lock(&mc.lock);
264a0ae1 4824 mc.mm = mm;
9f2115f9
TH
4825 mc.from = from;
4826 mc.to = memcg;
4827 mc.flags = move_flags;
4828 spin_unlock(&mc.lock);
4829 /* We set mc.moving_task later */
4830
4831 ret = mem_cgroup_precharge_mc(mm);
4832 if (ret)
4833 mem_cgroup_clear_mc();
264a0ae1
TH
4834 } else {
4835 mmput(mm);
7dc74be0
DN
4836 }
4837 return ret;
4838}
4839
1f7dd3e5 4840static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
7dc74be0 4841{
4e2f245d
JW
4842 if (mc.to)
4843 mem_cgroup_clear_mc();
7dc74be0
DN
4844}
4845
4ffef5fe
DN
4846static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4847 unsigned long addr, unsigned long end,
4848 struct mm_walk *walk)
7dc74be0 4849{
4ffef5fe 4850 int ret = 0;
26bcd64a 4851 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
4852 pte_t *pte;
4853 spinlock_t *ptl;
12724850
NH
4854 enum mc_target_type target_type;
4855 union mc_target target;
4856 struct page *page;
4ffef5fe 4857
b6ec57f4
KS
4858 ptl = pmd_trans_huge_lock(pmd, vma);
4859 if (ptl) {
62ade86a 4860 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 4861 spin_unlock(ptl);
12724850
NH
4862 return 0;
4863 }
4864 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4865 if (target_type == MC_TARGET_PAGE) {
4866 page = target.page;
4867 if (!isolate_lru_page(page)) {
f627c2f5 4868 if (!mem_cgroup_move_account(page, true,
1306a85a 4869 mc.from, mc.to)) {
12724850
NH
4870 mc.precharge -= HPAGE_PMD_NR;
4871 mc.moved_charge += HPAGE_PMD_NR;
4872 }
4873 putback_lru_page(page);
4874 }
4875 put_page(page);
4876 }
bf929152 4877 spin_unlock(ptl);
1a5a9906 4878 return 0;
12724850
NH
4879 }
4880
45f83cef
AA
4881 if (pmd_trans_unstable(pmd))
4882 return 0;
4ffef5fe
DN
4883retry:
4884 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4885 for (; addr != end; addr += PAGE_SIZE) {
4886 pte_t ptent = *(pte++);
02491447 4887 swp_entry_t ent;
4ffef5fe
DN
4888
4889 if (!mc.precharge)
4890 break;
4891
8d32ff84 4892 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4ffef5fe
DN
4893 case MC_TARGET_PAGE:
4894 page = target.page;
53f9263b
KS
4895 /*
4896 * We can have a part of the split pmd here. Moving it
4897 * can be done but it would be too convoluted so simply
4898 * ignore such a partial THP and keep it in original
4899 * memcg. There should be somebody mapping the head.
4900 */
4901 if (PageTransCompound(page))
4902 goto put;
4ffef5fe
DN
4903 if (isolate_lru_page(page))
4904 goto put;
f627c2f5
KS
4905 if (!mem_cgroup_move_account(page, false,
4906 mc.from, mc.to)) {
4ffef5fe 4907 mc.precharge--;
854ffa8d
DN
4908 /* we uncharge from mc.from later. */
4909 mc.moved_charge++;
4ffef5fe
DN
4910 }
4911 putback_lru_page(page);
8d32ff84 4912put: /* get_mctgt_type() gets the page */
4ffef5fe
DN
4913 put_page(page);
4914 break;
02491447
DN
4915 case MC_TARGET_SWAP:
4916 ent = target.ent;
e91cbb42 4917 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 4918 mc.precharge--;
483c30b5
DN
4919 /* we fixup refcnts and charges later. */
4920 mc.moved_swap++;
4921 }
02491447 4922 break;
4ffef5fe
DN
4923 default:
4924 break;
4925 }
4926 }
4927 pte_unmap_unlock(pte - 1, ptl);
4928 cond_resched();
4929
4930 if (addr != end) {
4931 /*
4932 * We have consumed all precharges we got in can_attach().
4933 * We try charge one by one, but don't do any additional
4934 * charges to mc.to if we have failed in charge once in attach()
4935 * phase.
4936 */
854ffa8d 4937 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
4938 if (!ret)
4939 goto retry;
4940 }
4941
4942 return ret;
4943}
4944
264a0ae1 4945static void mem_cgroup_move_charge(void)
4ffef5fe 4946{
26bcd64a
NH
4947 struct mm_walk mem_cgroup_move_charge_walk = {
4948 .pmd_entry = mem_cgroup_move_charge_pte_range,
264a0ae1 4949 .mm = mc.mm,
26bcd64a 4950 };
4ffef5fe
DN
4951
4952 lru_add_drain_all();
312722cb 4953 /*
81f8c3a4
JW
4954 * Signal lock_page_memcg() to take the memcg's move_lock
4955 * while we're moving its pages to another memcg. Then wait
4956 * for already started RCU-only updates to finish.
312722cb
JW
4957 */
4958 atomic_inc(&mc.from->moving_account);
4959 synchronize_rcu();
dfe076b0 4960retry:
264a0ae1 4961 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
dfe076b0
DN
4962 /*
4963 * Someone who are holding the mmap_sem might be waiting in
4964 * waitq. So we cancel all extra charges, wake up all waiters,
4965 * and retry. Because we cancel precharges, we might not be able
4966 * to move enough charges, but moving charge is a best-effort
4967 * feature anyway, so it wouldn't be a big problem.
4968 */
4969 __mem_cgroup_clear_mc();
4970 cond_resched();
4971 goto retry;
4972 }
26bcd64a
NH
4973 /*
4974 * When we have consumed all precharges and failed in doing
4975 * additional charge, the page walk just aborts.
4976 */
0247f3f4
JM
4977 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
4978
264a0ae1 4979 up_read(&mc.mm->mmap_sem);
312722cb 4980 atomic_dec(&mc.from->moving_account);
7dc74be0
DN
4981}
4982
264a0ae1 4983static void mem_cgroup_move_task(void)
67e465a7 4984{
264a0ae1
TH
4985 if (mc.to) {
4986 mem_cgroup_move_charge();
a433658c 4987 mem_cgroup_clear_mc();
264a0ae1 4988 }
67e465a7 4989}
5cfb80a7 4990#else /* !CONFIG_MMU */
1f7dd3e5 4991static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
4992{
4993 return 0;
4994}
1f7dd3e5 4995static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
4996{
4997}
264a0ae1 4998static void mem_cgroup_move_task(void)
5cfb80a7
DN
4999{
5000}
5001#endif
67e465a7 5002
f00baae7
TH
5003/*
5004 * Cgroup retains root cgroups across [un]mount cycles making it necessary
aa6ec29b
TH
5005 * to verify whether we're attached to the default hierarchy on each mount
5006 * attempt.
f00baae7 5007 */
eb95419b 5008static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
f00baae7
TH
5009{
5010 /*
aa6ec29b 5011 * use_hierarchy is forced on the default hierarchy. cgroup core
f00baae7
TH
5012 * guarantees that @root doesn't have any children, so turning it
5013 * on for the root memcg is enough.
5014 */
9e10a130 5015 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7feee590
VD
5016 root_mem_cgroup->use_hierarchy = true;
5017 else
5018 root_mem_cgroup->use_hierarchy = false;
f00baae7
TH
5019}
5020
241994ed
JW
5021static u64 memory_current_read(struct cgroup_subsys_state *css,
5022 struct cftype *cft)
5023{
f5fc3c5d
JW
5024 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5025
5026 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
241994ed
JW
5027}
5028
5029static int memory_low_show(struct seq_file *m, void *v)
5030{
5031 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4db0c3c2 5032 unsigned long low = READ_ONCE(memcg->low);
241994ed
JW
5033
5034 if (low == PAGE_COUNTER_MAX)
d2973697 5035 seq_puts(m, "max\n");
241994ed
JW
5036 else
5037 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5038
5039 return 0;
5040}
5041
5042static ssize_t memory_low_write(struct kernfs_open_file *of,
5043 char *buf, size_t nbytes, loff_t off)
5044{
5045 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5046 unsigned long low;
5047 int err;
5048
5049 buf = strstrip(buf);
d2973697 5050 err = page_counter_memparse(buf, "max", &low);
241994ed
JW
5051 if (err)
5052 return err;
5053
5054 memcg->low = low;
5055
5056 return nbytes;
5057}
5058
5059static int memory_high_show(struct seq_file *m, void *v)
5060{
5061 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4db0c3c2 5062 unsigned long high = READ_ONCE(memcg->high);
241994ed
JW
5063
5064 if (high == PAGE_COUNTER_MAX)
d2973697 5065 seq_puts(m, "max\n");
241994ed
JW
5066 else
5067 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5068
5069 return 0;
5070}
5071
5072static ssize_t memory_high_write(struct kernfs_open_file *of,
5073 char *buf, size_t nbytes, loff_t off)
5074{
5075 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
588083bb 5076 unsigned long nr_pages;
241994ed
JW
5077 unsigned long high;
5078 int err;
5079
5080 buf = strstrip(buf);
d2973697 5081 err = page_counter_memparse(buf, "max", &high);
241994ed
JW
5082 if (err)
5083 return err;
5084
5085 memcg->high = high;
5086
588083bb
JW
5087 nr_pages = page_counter_read(&memcg->memory);
5088 if (nr_pages > high)
5089 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5090 GFP_KERNEL, true);
5091
2529bb3a 5092 memcg_wb_domain_size_changed(memcg);
241994ed
JW
5093 return nbytes;
5094}
5095
5096static int memory_max_show(struct seq_file *m, void *v)
5097{
5098 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4db0c3c2 5099 unsigned long max = READ_ONCE(memcg->memory.limit);
241994ed
JW
5100
5101 if (max == PAGE_COUNTER_MAX)
d2973697 5102 seq_puts(m, "max\n");
241994ed
JW
5103 else
5104 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5105
5106 return 0;
5107}
5108
5109static ssize_t memory_max_write(struct kernfs_open_file *of,
5110 char *buf, size_t nbytes, loff_t off)
5111{
5112 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
b6e6edcf
JW
5113 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5114 bool drained = false;
241994ed
JW
5115 unsigned long max;
5116 int err;
5117
5118 buf = strstrip(buf);
d2973697 5119 err = page_counter_memparse(buf, "max", &max);
241994ed
JW
5120 if (err)
5121 return err;
5122
b6e6edcf
JW
5123 xchg(&memcg->memory.limit, max);
5124
5125 for (;;) {
5126 unsigned long nr_pages = page_counter_read(&memcg->memory);
5127
5128 if (nr_pages <= max)
5129 break;
5130
5131 if (signal_pending(current)) {
5132 err = -EINTR;
5133 break;
5134 }
5135
5136 if (!drained) {
5137 drain_all_stock(memcg);
5138 drained = true;
5139 continue;
5140 }
5141
5142 if (nr_reclaims) {
5143 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5144 GFP_KERNEL, true))
5145 nr_reclaims--;
5146 continue;
5147 }
5148
5149 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5150 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5151 break;
5152 }
241994ed 5153
2529bb3a 5154 memcg_wb_domain_size_changed(memcg);
241994ed
JW
5155 return nbytes;
5156}
5157
5158static int memory_events_show(struct seq_file *m, void *v)
5159{
5160 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5161
5162 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5163 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5164 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5165 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5166
5167 return 0;
5168}
5169
587d9f72
JW
5170static int memory_stat_show(struct seq_file *m, void *v)
5171{
5172 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
72b54e73
VD
5173 unsigned long stat[MEMCG_NR_STAT];
5174 unsigned long events[MEMCG_NR_EVENTS];
587d9f72
JW
5175 int i;
5176
5177 /*
5178 * Provide statistics on the state of the memory subsystem as
5179 * well as cumulative event counters that show past behavior.
5180 *
5181 * This list is ordered following a combination of these gradients:
5182 * 1) generic big picture -> specifics and details
5183 * 2) reflecting userspace activity -> reflecting kernel heuristics
5184 *
5185 * Current memory state:
5186 */
5187
72b54e73
VD
5188 tree_stat(memcg, stat);
5189 tree_events(memcg, events);
5190
587d9f72 5191 seq_printf(m, "anon %llu\n",
72b54e73 5192 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
587d9f72 5193 seq_printf(m, "file %llu\n",
72b54e73 5194 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
12580e4b 5195 seq_printf(m, "kernel_stack %llu\n",
efdc9490 5196 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
27ee57c9
VD
5197 seq_printf(m, "slab %llu\n",
5198 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5199 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
b2807f07 5200 seq_printf(m, "sock %llu\n",
72b54e73 5201 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
587d9f72
JW
5202
5203 seq_printf(m, "file_mapped %llu\n",
72b54e73 5204 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
587d9f72 5205 seq_printf(m, "file_dirty %llu\n",
72b54e73 5206 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
587d9f72 5207 seq_printf(m, "file_writeback %llu\n",
72b54e73 5208 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
587d9f72
JW
5209
5210 for (i = 0; i < NR_LRU_LISTS; i++) {
5211 struct mem_cgroup *mi;
5212 unsigned long val = 0;
5213
5214 for_each_mem_cgroup_tree(mi, memcg)
5215 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5216 seq_printf(m, "%s %llu\n",
5217 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5218 }
5219
27ee57c9
VD
5220 seq_printf(m, "slab_reclaimable %llu\n",
5221 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5222 seq_printf(m, "slab_unreclaimable %llu\n",
5223 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5224
587d9f72
JW
5225 /* Accumulated memory events */
5226
5227 seq_printf(m, "pgfault %lu\n",
72b54e73 5228 events[MEM_CGROUP_EVENTS_PGFAULT]);
587d9f72 5229 seq_printf(m, "pgmajfault %lu\n",
72b54e73 5230 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
587d9f72
JW
5231
5232 return 0;
5233}
5234
241994ed
JW
5235static struct cftype memory_files[] = {
5236 {
5237 .name = "current",
f5fc3c5d 5238 .flags = CFTYPE_NOT_ON_ROOT,
241994ed
JW
5239 .read_u64 = memory_current_read,
5240 },
5241 {
5242 .name = "low",
5243 .flags = CFTYPE_NOT_ON_ROOT,
5244 .seq_show = memory_low_show,
5245 .write = memory_low_write,
5246 },
5247 {
5248 .name = "high",
5249 .flags = CFTYPE_NOT_ON_ROOT,
5250 .seq_show = memory_high_show,
5251 .write = memory_high_write,
5252 },
5253 {
5254 .name = "max",
5255 .flags = CFTYPE_NOT_ON_ROOT,
5256 .seq_show = memory_max_show,
5257 .write = memory_max_write,
5258 },
5259 {
5260 .name = "events",
5261 .flags = CFTYPE_NOT_ON_ROOT,
472912a2 5262 .file_offset = offsetof(struct mem_cgroup, events_file),
241994ed
JW
5263 .seq_show = memory_events_show,
5264 },
587d9f72
JW
5265 {
5266 .name = "stat",
5267 .flags = CFTYPE_NOT_ON_ROOT,
5268 .seq_show = memory_stat_show,
5269 },
241994ed
JW
5270 { } /* terminate */
5271};
5272
073219e9 5273struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 5274 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 5275 .css_online = mem_cgroup_css_online,
92fb9748 5276 .css_offline = mem_cgroup_css_offline,
6df38689 5277 .css_released = mem_cgroup_css_released,
92fb9748 5278 .css_free = mem_cgroup_css_free,
1ced953b 5279 .css_reset = mem_cgroup_css_reset,
7dc74be0
DN
5280 .can_attach = mem_cgroup_can_attach,
5281 .cancel_attach = mem_cgroup_cancel_attach,
264a0ae1 5282 .post_attach = mem_cgroup_move_task,
f00baae7 5283 .bind = mem_cgroup_bind,
241994ed
JW
5284 .dfl_cftypes = memory_files,
5285 .legacy_cftypes = mem_cgroup_legacy_files,
6d12e2d8 5286 .early_init = 0,
8cdea7c0 5287};
c077719b 5288
241994ed
JW
5289/**
5290 * mem_cgroup_low - check if memory consumption is below the normal range
5291 * @root: the highest ancestor to consider
5292 * @memcg: the memory cgroup to check
5293 *
5294 * Returns %true if memory consumption of @memcg, and that of all
5295 * configurable ancestors up to @root, is below the normal range.
5296 */
5297bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5298{
5299 if (mem_cgroup_disabled())
5300 return false;
5301
5302 /*
5303 * The toplevel group doesn't have a configurable range, so
5304 * it's never low when looked at directly, and it is not
5305 * considered an ancestor when assessing the hierarchy.
5306 */
5307
5308 if (memcg == root_mem_cgroup)
5309 return false;
5310
4e54dede 5311 if (page_counter_read(&memcg->memory) >= memcg->low)
241994ed
JW
5312 return false;
5313
5314 while (memcg != root) {
5315 memcg = parent_mem_cgroup(memcg);
5316
5317 if (memcg == root_mem_cgroup)
5318 break;
5319
4e54dede 5320 if (page_counter_read(&memcg->memory) >= memcg->low)
241994ed
JW
5321 return false;
5322 }
5323 return true;
5324}
5325
00501b53
JW
5326/**
5327 * mem_cgroup_try_charge - try charging a page
5328 * @page: page to charge
5329 * @mm: mm context of the victim
5330 * @gfp_mask: reclaim mode
5331 * @memcgp: charged memcg return
25843c2b 5332 * @compound: charge the page as compound or small page
00501b53
JW
5333 *
5334 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5335 * pages according to @gfp_mask if necessary.
5336 *
5337 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5338 * Otherwise, an error code is returned.
5339 *
5340 * After page->mapping has been set up, the caller must finalize the
5341 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5342 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5343 */
5344int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
f627c2f5
KS
5345 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5346 bool compound)
00501b53
JW
5347{
5348 struct mem_cgroup *memcg = NULL;
f627c2f5 5349 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
00501b53
JW
5350 int ret = 0;
5351
5352 if (mem_cgroup_disabled())
5353 goto out;
5354
5355 if (PageSwapCache(page)) {
00501b53
JW
5356 /*
5357 * Every swap fault against a single page tries to charge the
5358 * page, bail as early as possible. shmem_unuse() encounters
5359 * already charged pages, too. The USED bit is protected by
5360 * the page lock, which serializes swap cache removal, which
5361 * in turn serializes uncharging.
5362 */
e993d905 5363 VM_BUG_ON_PAGE(!PageLocked(page), page);
1306a85a 5364 if (page->mem_cgroup)
00501b53 5365 goto out;
e993d905 5366
37e84351 5367 if (do_swap_account) {
e993d905
VD
5368 swp_entry_t ent = { .val = page_private(page), };
5369 unsigned short id = lookup_swap_cgroup_id(ent);
5370
5371 rcu_read_lock();
5372 memcg = mem_cgroup_from_id(id);
5373 if (memcg && !css_tryget_online(&memcg->css))
5374 memcg = NULL;
5375 rcu_read_unlock();
5376 }
00501b53
JW
5377 }
5378
00501b53
JW
5379 if (!memcg)
5380 memcg = get_mem_cgroup_from_mm(mm);
5381
5382 ret = try_charge(memcg, gfp_mask, nr_pages);
5383
5384 css_put(&memcg->css);
00501b53
JW
5385out:
5386 *memcgp = memcg;
5387 return ret;
5388}
5389
5390/**
5391 * mem_cgroup_commit_charge - commit a page charge
5392 * @page: page to charge
5393 * @memcg: memcg to charge the page to
5394 * @lrucare: page might be on LRU already
25843c2b 5395 * @compound: charge the page as compound or small page
00501b53
JW
5396 *
5397 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5398 * after page->mapping has been set up. This must happen atomically
5399 * as part of the page instantiation, i.e. under the page table lock
5400 * for anonymous pages, under the page lock for page and swap cache.
5401 *
5402 * In addition, the page must not be on the LRU during the commit, to
5403 * prevent racing with task migration. If it might be, use @lrucare.
5404 *
5405 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5406 */
5407void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
f627c2f5 5408 bool lrucare, bool compound)
00501b53 5409{
f627c2f5 5410 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
00501b53
JW
5411
5412 VM_BUG_ON_PAGE(!page->mapping, page);
5413 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5414
5415 if (mem_cgroup_disabled())
5416 return;
5417 /*
5418 * Swap faults will attempt to charge the same page multiple
5419 * times. But reuse_swap_page() might have removed the page
5420 * from swapcache already, so we can't check PageSwapCache().
5421 */
5422 if (!memcg)
5423 return;
5424
6abb5a86
JW
5425 commit_charge(page, memcg, lrucare);
5426
6abb5a86 5427 local_irq_disable();
f627c2f5 5428 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6abb5a86
JW
5429 memcg_check_events(memcg, page);
5430 local_irq_enable();
00501b53 5431
7941d214 5432 if (do_memsw_account() && PageSwapCache(page)) {
00501b53
JW
5433 swp_entry_t entry = { .val = page_private(page) };
5434 /*
5435 * The swap entry might not get freed for a long time,
5436 * let's not wait for it. The page already received a
5437 * memory+swap charge, drop the swap entry duplicate.
5438 */
5439 mem_cgroup_uncharge_swap(entry);
5440 }
5441}
5442
5443/**
5444 * mem_cgroup_cancel_charge - cancel a page charge
5445 * @page: page to charge
5446 * @memcg: memcg to charge the page to
25843c2b 5447 * @compound: charge the page as compound or small page
00501b53
JW
5448 *
5449 * Cancel a charge transaction started by mem_cgroup_try_charge().
5450 */
f627c2f5
KS
5451void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5452 bool compound)
00501b53 5453{
f627c2f5 5454 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
00501b53
JW
5455
5456 if (mem_cgroup_disabled())
5457 return;
5458 /*
5459 * Swap faults will attempt to charge the same page multiple
5460 * times. But reuse_swap_page() might have removed the page
5461 * from swapcache already, so we can't check PageSwapCache().
5462 */
5463 if (!memcg)
5464 return;
5465
00501b53
JW
5466 cancel_charge(memcg, nr_pages);
5467}
5468
747db954 5469static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
747db954 5470 unsigned long nr_anon, unsigned long nr_file,
5e8d35f8
VD
5471 unsigned long nr_huge, unsigned long nr_kmem,
5472 struct page *dummy_page)
747db954 5473{
5e8d35f8 5474 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
747db954
JW
5475 unsigned long flags;
5476
ce00a967 5477 if (!mem_cgroup_is_root(memcg)) {
18eca2e6 5478 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 5479 if (do_memsw_account())
18eca2e6 5480 page_counter_uncharge(&memcg->memsw, nr_pages);
5e8d35f8
VD
5481 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
5482 page_counter_uncharge(&memcg->kmem, nr_kmem);
ce00a967
JW
5483 memcg_oom_recover(memcg);
5484 }
747db954
JW
5485
5486 local_irq_save(flags);
5487 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5488 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5489 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5490 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
18eca2e6 5491 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
747db954
JW
5492 memcg_check_events(memcg, dummy_page);
5493 local_irq_restore(flags);
e8ea14cc
JW
5494
5495 if (!mem_cgroup_is_root(memcg))
18eca2e6 5496 css_put_many(&memcg->css, nr_pages);
747db954
JW
5497}
5498
5499static void uncharge_list(struct list_head *page_list)
5500{
5501 struct mem_cgroup *memcg = NULL;
747db954
JW
5502 unsigned long nr_anon = 0;
5503 unsigned long nr_file = 0;
5504 unsigned long nr_huge = 0;
5e8d35f8 5505 unsigned long nr_kmem = 0;
747db954 5506 unsigned long pgpgout = 0;
747db954
JW
5507 struct list_head *next;
5508 struct page *page;
5509
8b592656
JW
5510 /*
5511 * Note that the list can be a single page->lru; hence the
5512 * do-while loop instead of a simple list_for_each_entry().
5513 */
747db954
JW
5514 next = page_list->next;
5515 do {
747db954
JW
5516 page = list_entry(next, struct page, lru);
5517 next = page->lru.next;
5518
5519 VM_BUG_ON_PAGE(PageLRU(page), page);
5520 VM_BUG_ON_PAGE(page_count(page), page);
5521
1306a85a 5522 if (!page->mem_cgroup)
747db954
JW
5523 continue;
5524
5525 /*
5526 * Nobody should be changing or seriously looking at
1306a85a 5527 * page->mem_cgroup at this point, we have fully
29833315 5528 * exclusive access to the page.
747db954
JW
5529 */
5530
1306a85a 5531 if (memcg != page->mem_cgroup) {
747db954 5532 if (memcg) {
18eca2e6 5533 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5e8d35f8
VD
5534 nr_huge, nr_kmem, page);
5535 pgpgout = nr_anon = nr_file =
5536 nr_huge = nr_kmem = 0;
747db954 5537 }
1306a85a 5538 memcg = page->mem_cgroup;
747db954
JW
5539 }
5540
5e8d35f8
VD
5541 if (!PageKmemcg(page)) {
5542 unsigned int nr_pages = 1;
747db954 5543
5e8d35f8
VD
5544 if (PageTransHuge(page)) {
5545 nr_pages <<= compound_order(page);
5e8d35f8
VD
5546 nr_huge += nr_pages;
5547 }
5548 if (PageAnon(page))
5549 nr_anon += nr_pages;
5550 else
5551 nr_file += nr_pages;
5552 pgpgout++;
c4159a75 5553 } else {
5e8d35f8 5554 nr_kmem += 1 << compound_order(page);
c4159a75
VD
5555 __ClearPageKmemcg(page);
5556 }
747db954 5557
1306a85a 5558 page->mem_cgroup = NULL;
747db954
JW
5559 } while (next != page_list);
5560
5561 if (memcg)
18eca2e6 5562 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5e8d35f8 5563 nr_huge, nr_kmem, page);
747db954
JW
5564}
5565
0a31bc97
JW
5566/**
5567 * mem_cgroup_uncharge - uncharge a page
5568 * @page: page to uncharge
5569 *
5570 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5571 * mem_cgroup_commit_charge().
5572 */
5573void mem_cgroup_uncharge(struct page *page)
5574{
0a31bc97
JW
5575 if (mem_cgroup_disabled())
5576 return;
5577
747db954 5578 /* Don't touch page->lru of any random page, pre-check: */
1306a85a 5579 if (!page->mem_cgroup)
0a31bc97
JW
5580 return;
5581
747db954
JW
5582 INIT_LIST_HEAD(&page->lru);
5583 uncharge_list(&page->lru);
5584}
0a31bc97 5585
747db954
JW
5586/**
5587 * mem_cgroup_uncharge_list - uncharge a list of page
5588 * @page_list: list of pages to uncharge
5589 *
5590 * Uncharge a list of pages previously charged with
5591 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5592 */
5593void mem_cgroup_uncharge_list(struct list_head *page_list)
5594{
5595 if (mem_cgroup_disabled())
5596 return;
0a31bc97 5597
747db954
JW
5598 if (!list_empty(page_list))
5599 uncharge_list(page_list);
0a31bc97
JW
5600}
5601
5602/**
6a93ca8f
JW
5603 * mem_cgroup_migrate - charge a page's replacement
5604 * @oldpage: currently circulating page
5605 * @newpage: replacement page
0a31bc97 5606 *
6a93ca8f
JW
5607 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5608 * be uncharged upon free.
0a31bc97
JW
5609 *
5610 * Both pages must be locked, @newpage->mapping must be set up.
5611 */
6a93ca8f 5612void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
0a31bc97 5613{
29833315 5614 struct mem_cgroup *memcg;
44b7a8d3
JW
5615 unsigned int nr_pages;
5616 bool compound;
d93c4130 5617 unsigned long flags;
0a31bc97
JW
5618
5619 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5620 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
0a31bc97 5621 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6abb5a86
JW
5622 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5623 newpage);
0a31bc97
JW
5624
5625 if (mem_cgroup_disabled())
5626 return;
5627
5628 /* Page cache replacement: new page already charged? */
1306a85a 5629 if (newpage->mem_cgroup)
0a31bc97
JW
5630 return;
5631
45637bab 5632 /* Swapcache readahead pages can get replaced before being charged */
1306a85a 5633 memcg = oldpage->mem_cgroup;
29833315 5634 if (!memcg)
0a31bc97
JW
5635 return;
5636
44b7a8d3
JW
5637 /* Force-charge the new page. The old one will be freed soon */
5638 compound = PageTransHuge(newpage);
5639 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5640
5641 page_counter_charge(&memcg->memory, nr_pages);
5642 if (do_memsw_account())
5643 page_counter_charge(&memcg->memsw, nr_pages);
5644 css_get_many(&memcg->css, nr_pages);
0a31bc97 5645
9cf7666a 5646 commit_charge(newpage, memcg, false);
44b7a8d3 5647
d93c4130 5648 local_irq_save(flags);
44b7a8d3
JW
5649 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5650 memcg_check_events(memcg, newpage);
d93c4130 5651 local_irq_restore(flags);
0a31bc97
JW
5652}
5653
ef12947c 5654DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
11092087
JW
5655EXPORT_SYMBOL(memcg_sockets_enabled_key);
5656
2d758073 5657void mem_cgroup_sk_alloc(struct sock *sk)
11092087
JW
5658{
5659 struct mem_cgroup *memcg;
5660
2d758073
JW
5661 if (!mem_cgroup_sockets_enabled)
5662 return;
5663
5664 /*
5665 * Socket cloning can throw us here with sk_memcg already
11092087
JW
5666 * filled. It won't however, necessarily happen from
5667 * process context. So the test for root memcg given
5668 * the current task's memcg won't help us in this case.
5669 *
5670 * Respecting the original socket's memcg is a better
5671 * decision in this case.
5672 */
5673 if (sk->sk_memcg) {
5674 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5675 css_get(&sk->sk_memcg->css);
5676 return;
5677 }
5678
5679 rcu_read_lock();
5680 memcg = mem_cgroup_from_task(current);
f7e1cb6e
JW
5681 if (memcg == root_mem_cgroup)
5682 goto out;
0db15298 5683 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
f7e1cb6e 5684 goto out;
f7e1cb6e 5685 if (css_tryget_online(&memcg->css))
11092087 5686 sk->sk_memcg = memcg;
f7e1cb6e 5687out:
11092087
JW
5688 rcu_read_unlock();
5689}
11092087 5690
2d758073 5691void mem_cgroup_sk_free(struct sock *sk)
11092087 5692{
2d758073
JW
5693 if (sk->sk_memcg)
5694 css_put(&sk->sk_memcg->css);
11092087
JW
5695}
5696
5697/**
5698 * mem_cgroup_charge_skmem - charge socket memory
5699 * @memcg: memcg to charge
5700 * @nr_pages: number of pages to charge
5701 *
5702 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5703 * @memcg's configured limit, %false if the charge had to be forced.
5704 */
5705bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5706{
f7e1cb6e 5707 gfp_t gfp_mask = GFP_KERNEL;
11092087 5708
f7e1cb6e 5709 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 5710 struct page_counter *fail;
f7e1cb6e 5711
0db15298
JW
5712 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5713 memcg->tcpmem_pressure = 0;
f7e1cb6e
JW
5714 return true;
5715 }
0db15298
JW
5716 page_counter_charge(&memcg->tcpmem, nr_pages);
5717 memcg->tcpmem_pressure = 1;
f7e1cb6e 5718 return false;
11092087 5719 }
d886f4e4 5720
f7e1cb6e
JW
5721 /* Don't block in the packet receive path */
5722 if (in_softirq())
5723 gfp_mask = GFP_NOWAIT;
5724
b2807f07
JW
5725 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5726
f7e1cb6e
JW
5727 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5728 return true;
5729
5730 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
11092087
JW
5731 return false;
5732}
5733
5734/**
5735 * mem_cgroup_uncharge_skmem - uncharge socket memory
5736 * @memcg - memcg to uncharge
5737 * @nr_pages - number of pages to uncharge
5738 */
5739void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5740{
f7e1cb6e 5741 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 5742 page_counter_uncharge(&memcg->tcpmem, nr_pages);
f7e1cb6e
JW
5743 return;
5744 }
d886f4e4 5745
b2807f07
JW
5746 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5747
f7e1cb6e
JW
5748 page_counter_uncharge(&memcg->memory, nr_pages);
5749 css_put_many(&memcg->css, nr_pages);
11092087
JW
5750}
5751
f7e1cb6e
JW
5752static int __init cgroup_memory(char *s)
5753{
5754 char *token;
5755
5756 while ((token = strsep(&s, ",")) != NULL) {
5757 if (!*token)
5758 continue;
5759 if (!strcmp(token, "nosocket"))
5760 cgroup_memory_nosocket = true;
04823c83
VD
5761 if (!strcmp(token, "nokmem"))
5762 cgroup_memory_nokmem = true;
f7e1cb6e
JW
5763 }
5764 return 0;
5765}
5766__setup("cgroup.memory=", cgroup_memory);
11092087 5767
2d11085e 5768/*
1081312f
MH
5769 * subsys_initcall() for memory controller.
5770 *
308167fc
SAS
5771 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5772 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5773 * basically everything that doesn't depend on a specific mem_cgroup structure
5774 * should be initialized from here.
2d11085e
MH
5775 */
5776static int __init mem_cgroup_init(void)
5777{
95a045f6
JW
5778 int cpu, node;
5779
13583c3d
VD
5780#ifndef CONFIG_SLOB
5781 /*
5782 * Kmem cache creation is mostly done with the slab_mutex held,
17cc4dfe
TH
5783 * so use a workqueue with limited concurrency to avoid stalling
5784 * all worker threads in case lots of cgroups are created and
5785 * destroyed simultaneously.
13583c3d 5786 */
17cc4dfe
TH
5787 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5788 BUG_ON(!memcg_kmem_cache_wq);
13583c3d
VD
5789#endif
5790
308167fc
SAS
5791 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5792 memcg_hotplug_cpu_dead);
95a045f6
JW
5793
5794 for_each_possible_cpu(cpu)
5795 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5796 drain_local_stock);
5797
5798 for_each_node(node) {
5799 struct mem_cgroup_tree_per_node *rtpn;
95a045f6
JW
5800
5801 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5802 node_online(node) ? node : NUMA_NO_NODE);
5803
ef8f2327
MG
5804 rtpn->rb_root = RB_ROOT;
5805 spin_lock_init(&rtpn->lock);
95a045f6
JW
5806 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5807 }
5808
2d11085e
MH
5809 return 0;
5810}
5811subsys_initcall(mem_cgroup_init);
21afa38e
JW
5812
5813#ifdef CONFIG_MEMCG_SWAP
358c07fc
AB
5814static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5815{
5816 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5817 /*
5818 * The root cgroup cannot be destroyed, so it's refcount must
5819 * always be >= 1.
5820 */
5821 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5822 VM_BUG_ON(1);
5823 break;
5824 }
5825 memcg = parent_mem_cgroup(memcg);
5826 if (!memcg)
5827 memcg = root_mem_cgroup;
5828 }
5829 return memcg;
5830}
5831
21afa38e
JW
5832/**
5833 * mem_cgroup_swapout - transfer a memsw charge to swap
5834 * @page: page whose memsw charge to transfer
5835 * @entry: swap entry to move the charge to
5836 *
5837 * Transfer the memsw charge of @page to @entry.
5838 */
5839void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5840{
1f47b61f 5841 struct mem_cgroup *memcg, *swap_memcg;
21afa38e
JW
5842 unsigned short oldid;
5843
5844 VM_BUG_ON_PAGE(PageLRU(page), page);
5845 VM_BUG_ON_PAGE(page_count(page), page);
5846
7941d214 5847 if (!do_memsw_account())
21afa38e
JW
5848 return;
5849
5850 memcg = page->mem_cgroup;
5851
5852 /* Readahead page, never charged */
5853 if (!memcg)
5854 return;
5855
1f47b61f
VD
5856 /*
5857 * In case the memcg owning these pages has been offlined and doesn't
5858 * have an ID allocated to it anymore, charge the closest online
5859 * ancestor for the swap instead and transfer the memory+swap charge.
5860 */
5861 swap_memcg = mem_cgroup_id_get_online(memcg);
5862 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
21afa38e 5863 VM_BUG_ON_PAGE(oldid, page);
1f47b61f 5864 mem_cgroup_swap_statistics(swap_memcg, true);
21afa38e
JW
5865
5866 page->mem_cgroup = NULL;
5867
5868 if (!mem_cgroup_is_root(memcg))
5869 page_counter_uncharge(&memcg->memory, 1);
5870
1f47b61f
VD
5871 if (memcg != swap_memcg) {
5872 if (!mem_cgroup_is_root(swap_memcg))
5873 page_counter_charge(&swap_memcg->memsw, 1);
5874 page_counter_uncharge(&memcg->memsw, 1);
5875 }
5876
ce9ce665
SAS
5877 /*
5878 * Interrupts should be disabled here because the caller holds the
5879 * mapping->tree_lock lock which is taken with interrupts-off. It is
5880 * important here to have the interrupts disabled because it is the
5881 * only synchronisation we have for udpating the per-CPU variables.
5882 */
5883 VM_BUG_ON(!irqs_disabled());
f627c2f5 5884 mem_cgroup_charge_statistics(memcg, page, false, -1);
21afa38e 5885 memcg_check_events(memcg, page);
73f576c0
JW
5886
5887 if (!mem_cgroup_is_root(memcg))
5888 css_put(&memcg->css);
21afa38e
JW
5889}
5890
37e84351
VD
5891/*
5892 * mem_cgroup_try_charge_swap - try charging a swap entry
5893 * @page: page being added to swap
5894 * @entry: swap entry to charge
5895 *
5896 * Try to charge @entry to the memcg that @page belongs to.
5897 *
5898 * Returns 0 on success, -ENOMEM on failure.
5899 */
5900int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5901{
5902 struct mem_cgroup *memcg;
5903 struct page_counter *counter;
5904 unsigned short oldid;
5905
5906 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5907 return 0;
5908
5909 memcg = page->mem_cgroup;
5910
5911 /* Readahead page, never charged */
5912 if (!memcg)
5913 return 0;
5914
1f47b61f
VD
5915 memcg = mem_cgroup_id_get_online(memcg);
5916
37e84351 5917 if (!mem_cgroup_is_root(memcg) &&
1f47b61f
VD
5918 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5919 mem_cgroup_id_put(memcg);
37e84351 5920 return -ENOMEM;
1f47b61f 5921 }
37e84351
VD
5922
5923 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5924 VM_BUG_ON_PAGE(oldid, page);
5925 mem_cgroup_swap_statistics(memcg, true);
5926
37e84351
VD
5927 return 0;
5928}
5929
21afa38e
JW
5930/**
5931 * mem_cgroup_uncharge_swap - uncharge a swap entry
5932 * @entry: swap entry to uncharge
5933 *
37e84351 5934 * Drop the swap charge associated with @entry.
21afa38e
JW
5935 */
5936void mem_cgroup_uncharge_swap(swp_entry_t entry)
5937{
5938 struct mem_cgroup *memcg;
5939 unsigned short id;
5940
37e84351 5941 if (!do_swap_account)
21afa38e
JW
5942 return;
5943
5944 id = swap_cgroup_record(entry, 0);
5945 rcu_read_lock();
adbe427b 5946 memcg = mem_cgroup_from_id(id);
21afa38e 5947 if (memcg) {
37e84351
VD
5948 if (!mem_cgroup_is_root(memcg)) {
5949 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5950 page_counter_uncharge(&memcg->swap, 1);
5951 else
5952 page_counter_uncharge(&memcg->memsw, 1);
5953 }
21afa38e 5954 mem_cgroup_swap_statistics(memcg, false);
73f576c0 5955 mem_cgroup_id_put(memcg);
21afa38e
JW
5956 }
5957 rcu_read_unlock();
5958}
5959
d8b38438
VD
5960long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5961{
5962 long nr_swap_pages = get_nr_swap_pages();
5963
5964 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5965 return nr_swap_pages;
5966 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5967 nr_swap_pages = min_t(long, nr_swap_pages,
5968 READ_ONCE(memcg->swap.limit) -
5969 page_counter_read(&memcg->swap));
5970 return nr_swap_pages;
5971}
5972
5ccc5aba
VD
5973bool mem_cgroup_swap_full(struct page *page)
5974{
5975 struct mem_cgroup *memcg;
5976
5977 VM_BUG_ON_PAGE(!PageLocked(page), page);
5978
5979 if (vm_swap_full())
5980 return true;
5981 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5982 return false;
5983
5984 memcg = page->mem_cgroup;
5985 if (!memcg)
5986 return false;
5987
5988 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5989 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5990 return true;
5991
5992 return false;
5993}
5994
21afa38e
JW
5995/* for remember boot option*/
5996#ifdef CONFIG_MEMCG_SWAP_ENABLED
5997static int really_do_swap_account __initdata = 1;
5998#else
5999static int really_do_swap_account __initdata;
6000#endif
6001
6002static int __init enable_swap_account(char *s)
6003{
6004 if (!strcmp(s, "1"))
6005 really_do_swap_account = 1;
6006 else if (!strcmp(s, "0"))
6007 really_do_swap_account = 0;
6008 return 1;
6009}
6010__setup("swapaccount=", enable_swap_account);
6011
37e84351
VD
6012static u64 swap_current_read(struct cgroup_subsys_state *css,
6013 struct cftype *cft)
6014{
6015 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6016
6017 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6018}
6019
6020static int swap_max_show(struct seq_file *m, void *v)
6021{
6022 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6023 unsigned long max = READ_ONCE(memcg->swap.limit);
6024
6025 if (max == PAGE_COUNTER_MAX)
6026 seq_puts(m, "max\n");
6027 else
6028 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6029
6030 return 0;
6031}
6032
6033static ssize_t swap_max_write(struct kernfs_open_file *of,
6034 char *buf, size_t nbytes, loff_t off)
6035{
6036 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6037 unsigned long max;
6038 int err;
6039
6040 buf = strstrip(buf);
6041 err = page_counter_memparse(buf, "max", &max);
6042 if (err)
6043 return err;
6044
6045 mutex_lock(&memcg_limit_mutex);
6046 err = page_counter_limit(&memcg->swap, max);
6047 mutex_unlock(&memcg_limit_mutex);
6048 if (err)
6049 return err;
6050
6051 return nbytes;
6052}
6053
6054static struct cftype swap_files[] = {
6055 {
6056 .name = "swap.current",
6057 .flags = CFTYPE_NOT_ON_ROOT,
6058 .read_u64 = swap_current_read,
6059 },
6060 {
6061 .name = "swap.max",
6062 .flags = CFTYPE_NOT_ON_ROOT,
6063 .seq_show = swap_max_show,
6064 .write = swap_max_write,
6065 },
6066 { } /* terminate */
6067};
6068
21afa38e
JW
6069static struct cftype memsw_cgroup_files[] = {
6070 {
6071 .name = "memsw.usage_in_bytes",
6072 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6073 .read_u64 = mem_cgroup_read_u64,
6074 },
6075 {
6076 .name = "memsw.max_usage_in_bytes",
6077 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6078 .write = mem_cgroup_reset,
6079 .read_u64 = mem_cgroup_read_u64,
6080 },
6081 {
6082 .name = "memsw.limit_in_bytes",
6083 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6084 .write = mem_cgroup_write,
6085 .read_u64 = mem_cgroup_read_u64,
6086 },
6087 {
6088 .name = "memsw.failcnt",
6089 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6090 .write = mem_cgroup_reset,
6091 .read_u64 = mem_cgroup_read_u64,
6092 },
6093 { }, /* terminate */
6094};
6095
6096static int __init mem_cgroup_swap_init(void)
6097{
6098 if (!mem_cgroup_disabled() && really_do_swap_account) {
6099 do_swap_account = 1;
37e84351
VD
6100 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6101 swap_files));
21afa38e
JW
6102 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6103 memsw_cgroup_files));
6104 }
6105 return 0;
6106}
6107subsys_initcall(mem_cgroup_swap_init);
6108
6109#endif /* CONFIG_MEMCG_SWAP */