]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/oom_kill.c
dt-bindings: usb: tegra-xudc: Remove extraneous PHYs
[thirdparty/linux.git] / mm / oom_kill.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/oom_kill.c
4 *
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
a63d83f4
DR
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
1da177e4
LT
10 *
11 * The routines in this file are used to kill a process when
a49335cc
PJ
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
1da177e4
LT
14 *
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
19 */
20
8ac773b4 21#include <linux/oom.h>
1da177e4 22#include <linux/mm.h>
4e950f6f 23#include <linux/err.h>
5a0e3ad6 24#include <linux/gfp.h>
1da177e4 25#include <linux/sched.h>
6e84f315 26#include <linux/sched/mm.h>
f7ccbae4 27#include <linux/sched/coredump.h>
29930025 28#include <linux/sched/task.h>
8a7ff02a 29#include <linux/sched/debug.h>
1da177e4 30#include <linux/swap.h>
884a7e59 31#include <linux/syscalls.h>
1da177e4
LT
32#include <linux/timex.h>
33#include <linux/jiffies.h>
ef08e3b4 34#include <linux/cpuset.h>
b95f1b31 35#include <linux/export.h>
8bc719d3 36#include <linux/notifier.h>
c7ba5c9e 37#include <linux/memcontrol.h>
6f48d0eb 38#include <linux/mempolicy.h>
5cd9c58f 39#include <linux/security.h>
edd45544 40#include <linux/ptrace.h>
f660daac 41#include <linux/freezer.h>
43d2b113 42#include <linux/ftrace.h>
dc3f21ea 43#include <linux/ratelimit.h>
aac45363
MH
44#include <linux/kthread.h>
45#include <linux/init.h>
4d4bbd85 46#include <linux/mmu_notifier.h>
aac45363
MH
47
48#include <asm/tlb.h>
49#include "internal.h"
852d8be0 50#include "slab.h"
43d2b113
KH
51
52#define CREATE_TRACE_POINTS
53#include <trace/events/oom.h>
1da177e4 54
43fe219a 55static int sysctl_panic_on_oom;
56static int sysctl_oom_kill_allocating_task;
57static int sysctl_oom_dump_tasks = 1;
58
a195d3f5
MH
59/*
60 * Serializes oom killer invocations (out_of_memory()) from all contexts to
61 * prevent from over eager oom killing (e.g. when the oom killer is invoked
62 * from different domains).
63 *
64 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
65 * and mark_oom_victim
66 */
dc56401f 67DEFINE_MUTEX(oom_lock);
67197a4f
SB
68/* Serializes oom_score_adj and oom_score_adj_min updates */
69DEFINE_MUTEX(oom_adj_mutex);
1da177e4 70
ac311a14
SB
71static inline bool is_memcg_oom(struct oom_control *oc)
72{
73 return oc->memcg != NULL;
74}
75
6f48d0eb
DR
76#ifdef CONFIG_NUMA
77/**
f0953a1b 78 * oom_cpuset_eligible() - check task eligibility for kill
ad962441 79 * @start: task struct of which task to consider
f364f06b 80 * @oc: pointer to struct oom_control
6f48d0eb
DR
81 *
82 * Task eligibility is determined by whether or not a candidate task, @tsk,
83 * shares the same mempolicy nodes as current if it is bound by such a policy
84 * and whether or not it has the same set of allowed cpuset nodes.
ac311a14
SB
85 *
86 * This function is assuming oom-killer context and 'current' has triggered
87 * the oom-killer.
495789a5 88 */
ac311a14
SB
89static bool oom_cpuset_eligible(struct task_struct *start,
90 struct oom_control *oc)
495789a5 91{
ad962441
ON
92 struct task_struct *tsk;
93 bool ret = false;
ac311a14
SB
94 const nodemask_t *mask = oc->nodemask;
95
ad962441 96 rcu_read_lock();
1da4db0c 97 for_each_thread(start, tsk) {
6f48d0eb
DR
98 if (mask) {
99 /*
100 * If this is a mempolicy constrained oom, tsk's
101 * cpuset is irrelevant. Only return true if its
102 * mempolicy intersects current, otherwise it may be
103 * needlessly killed.
104 */
b26e517a 105 ret = mempolicy_in_oom_domain(tsk, mask);
6f48d0eb
DR
106 } else {
107 /*
108 * This is not a mempolicy constrained oom, so only
109 * check the mems of tsk's cpuset.
110 */
ad962441 111 ret = cpuset_mems_allowed_intersects(current, tsk);
6f48d0eb 112 }
ad962441
ON
113 if (ret)
114 break;
1da4db0c 115 }
ad962441 116 rcu_read_unlock();
df1090a8 117
ad962441 118 return ret;
6f48d0eb
DR
119}
120#else
ac311a14 121static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
6f48d0eb
DR
122{
123 return true;
495789a5 124}
6f48d0eb 125#endif /* CONFIG_NUMA */
495789a5 126
6f48d0eb
DR
127/*
128 * The process p may have detached its own ->mm while exiting or through
f5678e7f 129 * kthread_use_mm(), but one or more of its subthreads may still have a valid
6f48d0eb
DR
130 * pointer. Return p, or any of its subthreads with a valid ->mm, with
131 * task_lock() held.
132 */
158e0a2d 133struct task_struct *find_lock_task_mm(struct task_struct *p)
dd8e8f40 134{
1da4db0c 135 struct task_struct *t;
dd8e8f40 136
4d4048be
ON
137 rcu_read_lock();
138
1da4db0c 139 for_each_thread(p, t) {
dd8e8f40
ON
140 task_lock(t);
141 if (likely(t->mm))
4d4048be 142 goto found;
dd8e8f40 143 task_unlock(t);
1da4db0c 144 }
4d4048be
ON
145 t = NULL;
146found:
147 rcu_read_unlock();
dd8e8f40 148
4d4048be 149 return t;
dd8e8f40
ON
150}
151
db2a0dd7
YB
152/*
153 * order == -1 means the oom kill is required by sysrq, otherwise only
154 * for display purposes.
155 */
156static inline bool is_sysrq_oom(struct oom_control *oc)
157{
158 return oc->order == -1;
159}
160
ab290adb 161/* return true if the task is not adequate as candidate victim task. */
ac311a14 162static bool oom_unkillable_task(struct task_struct *p)
ab290adb
KM
163{
164 if (is_global_init(p))
165 return true;
166 if (p->flags & PF_KTHREAD)
167 return true;
ab290adb
KM
168 return false;
169}
170
845be1cd 171/*
259b3633
HS
172 * Check whether unreclaimable slab amount is greater than
173 * all user memory(LRU pages).
174 * dump_unreclaimable_slab() could help in the case that
175 * oom due to too much unreclaimable slab used by kernel.
176*/
177static bool should_dump_unreclaim_slab(void)
852d8be0
YS
178{
179 unsigned long nr_lru;
180
181 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
182 global_node_page_state(NR_INACTIVE_ANON) +
183 global_node_page_state(NR_ACTIVE_FILE) +
184 global_node_page_state(NR_INACTIVE_FILE) +
185 global_node_page_state(NR_ISOLATED_ANON) +
186 global_node_page_state(NR_ISOLATED_FILE) +
187 global_node_page_state(NR_UNEVICTABLE);
188
d42f3245 189 return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
852d8be0
YS
190}
191
1da177e4 192/**
a63d83f4 193 * oom_badness - heuristic function to determine which candidate task to kill
1da177e4 194 * @p: task struct of which task we should calculate
a63d83f4 195 * @totalpages: total present RAM allowed for page allocation
1da177e4 196 *
a63d83f4
DR
197 * The heuristic for determining which task to kill is made to be as simple and
198 * predictable as possible. The goal is to return the highest value for the
199 * task consuming the most memory to avoid subsequent oom failures.
1da177e4 200 */
9066e5cf 201long oom_badness(struct task_struct *p, unsigned long totalpages)
1da177e4 202{
1e11ad8d 203 long points;
61eafb00 204 long adj;
28b83c51 205
ac311a14 206 if (oom_unkillable_task(p))
9066e5cf 207 return LONG_MIN;
1da177e4 208
dd8e8f40
ON
209 p = find_lock_task_mm(p);
210 if (!p)
9066e5cf 211 return LONG_MIN;
1da177e4 212
bb8a4b7f
MH
213 /*
214 * Do not even consider tasks which are explicitly marked oom
b18dc5f2
MH
215 * unkillable or have been already oom reaped or the are in
216 * the middle of vfork
bb8a4b7f 217 */
a9c58b90 218 adj = (long)p->signal->oom_score_adj;
bb8a4b7f 219 if (adj == OOM_SCORE_ADJ_MIN ||
862e3073 220 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
b18dc5f2 221 in_vfork(p)) {
5aecc85a 222 task_unlock(p);
9066e5cf 223 return LONG_MIN;
5aecc85a
MH
224 }
225
1da177e4 226 /*
a63d83f4 227 * The baseline for the badness score is the proportion of RAM that each
f755a042 228 * task's rss, pagetable and swap space use.
1da177e4 229 */
dc6c9a35 230 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
af5b0f6a 231 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
a63d83f4 232 task_unlock(p);
1da177e4 233
61eafb00
DR
234 /* Normalize to oom_score_adj units */
235 adj *= totalpages / 1000;
236 points += adj;
1da177e4 237
9066e5cf 238 return points;
1da177e4
LT
239}
240
ef8444ea 241static const char * const oom_constraint_text[] = {
242 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
243 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
244 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
245 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
7c5f64f8
VD
246};
247
9b0f8b04
CL
248/*
249 * Determine the type of allocation constraint.
250 */
7c5f64f8 251static enum oom_constraint constrained_alloc(struct oom_control *oc)
4365a567 252{
54a6eb5c 253 struct zone *zone;
dd1a239f 254 struct zoneref *z;
97a225e6 255 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
a63d83f4
DR
256 bool cpuset_limited = false;
257 int nid;
9b0f8b04 258
7c5f64f8 259 if (is_memcg_oom(oc)) {
bbec2e15 260 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
7c5f64f8
VD
261 return CONSTRAINT_MEMCG;
262 }
263
a63d83f4 264 /* Default to all available memory */
ca79b0c2 265 oc->totalpages = totalram_pages() + total_swap_pages;
7c5f64f8
VD
266
267 if (!IS_ENABLED(CONFIG_NUMA))
268 return CONSTRAINT_NONE;
a63d83f4 269
6e0fc46d 270 if (!oc->zonelist)
a63d83f4 271 return CONSTRAINT_NONE;
4365a567
KH
272 /*
273 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
274 * to kill current.We have to random task kill in this case.
275 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
276 */
6e0fc46d 277 if (oc->gfp_mask & __GFP_THISNODE)
4365a567 278 return CONSTRAINT_NONE;
9b0f8b04 279
4365a567 280 /*
a63d83f4
DR
281 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
282 * the page allocator means a mempolicy is in effect. Cpuset policy
283 * is enforced in get_page_from_freelist().
4365a567 284 */
6e0fc46d
DR
285 if (oc->nodemask &&
286 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
7c5f64f8 287 oc->totalpages = total_swap_pages;
6e0fc46d 288 for_each_node_mask(nid, *oc->nodemask)
1eb41bb0 289 oc->totalpages += node_present_pages(nid);
9b0f8b04 290 return CONSTRAINT_MEMORY_POLICY;
a63d83f4 291 }
4365a567
KH
292
293 /* Check this allocation failure is caused by cpuset's wall function */
6e0fc46d 294 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
97a225e6 295 highest_zoneidx, oc->nodemask)
6e0fc46d 296 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
a63d83f4 297 cpuset_limited = true;
9b0f8b04 298
a63d83f4 299 if (cpuset_limited) {
7c5f64f8 300 oc->totalpages = total_swap_pages;
a63d83f4 301 for_each_node_mask(nid, cpuset_current_mems_allowed)
1eb41bb0 302 oc->totalpages += node_present_pages(nid);
a63d83f4
DR
303 return CONSTRAINT_CPUSET;
304 }
9b0f8b04
CL
305 return CONSTRAINT_NONE;
306}
307
7c5f64f8 308static int oom_evaluate_task(struct task_struct *task, void *arg)
462607ec 309{
7c5f64f8 310 struct oom_control *oc = arg;
9066e5cf 311 long points;
7c5f64f8 312
ac311a14
SB
313 if (oom_unkillable_task(task))
314 goto next;
315
316 /* p may not have freeable memory in nodemask */
317 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
7c5f64f8 318 goto next;
462607ec
DR
319
320 /*
321 * This task already has access to memory reserves and is being killed.
a373966d 322 * Don't allow any other task to have access to the reserves unless
862e3073 323 * the task has MMF_OOM_SKIP because chances that it would release
a373966d 324 * any memory is quite low.
462607ec 325 */
862e3073
MH
326 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
327 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
7c5f64f8
VD
328 goto next;
329 goto abort;
a373966d 330 }
462607ec 331
e1e12d2f
DR
332 /*
333 * If task is allocating a lot of memory and has been marked to be
334 * killed first if it triggers an oom, then select it.
335 */
7c5f64f8 336 if (oom_task_origin(task)) {
9066e5cf 337 points = LONG_MAX;
7c5f64f8
VD
338 goto select;
339 }
e1e12d2f 340
ac311a14 341 points = oom_badness(task, oc->totalpages);
9066e5cf 342 if (points == LONG_MIN || points < oc->chosen_points)
7c5f64f8
VD
343 goto next;
344
7c5f64f8
VD
345select:
346 if (oc->chosen)
347 put_task_struct(oc->chosen);
348 get_task_struct(task);
349 oc->chosen = task;
350 oc->chosen_points = points;
351next:
352 return 0;
353abort:
354 if (oc->chosen)
355 put_task_struct(oc->chosen);
356 oc->chosen = (void *)-1UL;
357 return 1;
462607ec
DR
358}
359
1da177e4 360/*
7c5f64f8
VD
361 * Simple selection loop. We choose the process with the highest number of
362 * 'points'. In case scan was aborted, oc->chosen is set to -1.
1da177e4 363 */
7c5f64f8 364static void select_bad_process(struct oom_control *oc)
1da177e4 365{
9066e5cf
YS
366 oc->chosen_points = LONG_MIN;
367
7c5f64f8
VD
368 if (is_memcg_oom(oc))
369 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
370 else {
371 struct task_struct *p;
d49ad935 372
7c5f64f8
VD
373 rcu_read_lock();
374 for_each_process(p)
375 if (oom_evaluate_task(p, oc))
376 break;
377 rcu_read_unlock();
1da4db0c 378 }
1da177e4
LT
379}
380
5eee7e1c
SB
381static int dump_task(struct task_struct *p, void *arg)
382{
383 struct oom_control *oc = arg;
384 struct task_struct *task;
385
ac311a14
SB
386 if (oom_unkillable_task(p))
387 return 0;
388
389 /* p may not have freeable memory in nodemask */
390 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
5eee7e1c
SB
391 return 0;
392
393 task = find_lock_task_mm(p);
394 if (!task) {
395 /*
f8159c13
TY
396 * All of p's threads have already detached their mm's. There's
397 * no need to report them; they can't be oom killed anyway.
5eee7e1c
SB
398 */
399 return 0;
400 }
401
402 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
403 task->pid, from_kuid(&init_user_ns, task_uid(task)),
404 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
405 mm_pgtables_bytes(task->mm),
406 get_mm_counter(task->mm, MM_SWAPENTS),
407 task->signal->oom_score_adj, task->comm);
408 task_unlock(task);
409
410 return 0;
411}
412
fef1bdd6 413/**
1b578df0 414 * dump_tasks - dump current memory state of all system tasks
5eee7e1c 415 * @oc: pointer to struct oom_control
1b578df0 416 *
e85bfd3a
DR
417 * Dumps the current memory state of all eligible tasks. Tasks not in the same
418 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
419 * are not shown.
af5b0f6a
KS
420 * State information includes task's pid, uid, tgid, vm size, rss,
421 * pgtables_bytes, swapents, oom_score_adj value, and name.
fef1bdd6 422 */
5eee7e1c 423static void dump_tasks(struct oom_control *oc)
fef1bdd6 424{
c3b78b11
RF
425 pr_info("Tasks state (memory values in pages):\n");
426 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
fef1bdd6 427
5eee7e1c
SB
428 if (is_memcg_oom(oc))
429 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
430 else {
431 struct task_struct *p;
c55db957 432
5eee7e1c
SB
433 rcu_read_lock();
434 for_each_process(p)
435 dump_task(p, oc);
436 rcu_read_unlock();
c55db957 437 }
fef1bdd6
DR
438}
439
ef8444ea 440static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
441{
442 /* one line summary of the oom killer context. */
443 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
444 oom_constraint_text[oc->constraint],
445 nodemask_pr_args(oc->nodemask));
446 cpuset_print_current_mems_allowed();
f0c867d9 447 mem_cgroup_print_oom_context(oc->memcg, victim);
ef8444ea 448 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
449 from_kuid(&init_user_ns, task_uid(victim)));
450}
451
2a966b77 452static void dump_header(struct oom_control *oc, struct task_struct *p)
1b604d75 453{
ef8444ea 454 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
455 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
0205f755 456 current->signal->oom_score_adj);
9254990f
MH
457 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
458 pr_warn("COMPACTION is disabled!!!\n");
a0795cd4 459
1b604d75 460 dump_stack();
852d8be0 461 if (is_memcg_oom(oc))
f0c867d9 462 mem_cgroup_print_oom_meminfo(oc->memcg);
852d8be0 463 else {
974f4367 464 __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask));
259b3633 465 if (should_dump_unreclaim_slab())
852d8be0
YS
466 dump_unreclaimable_slab();
467 }
1b604d75 468 if (sysctl_oom_dump_tasks)
5eee7e1c 469 dump_tasks(oc);
ef8444ea 470 if (p)
471 dump_oom_summary(oc, p);
1b604d75
DR
472}
473
5695be14 474/*
c32b3cbe 475 * Number of OOM victims in flight
5695be14 476 */
c32b3cbe
MH
477static atomic_t oom_victims = ATOMIC_INIT(0);
478static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
5695be14 479
7c5f64f8 480static bool oom_killer_disabled __read_mostly;
5695be14 481
bc448e89
MH
482#define K(x) ((x) << (PAGE_SHIFT-10))
483
3ef22dff
MH
484/*
485 * task->mm can be NULL if the task is the exited group leader. So to
486 * determine whether the task is using a particular mm, we examine all the
487 * task's threads: if one of those is using this mm then this task was also
488 * using it.
489 */
44a70ade 490bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
3ef22dff
MH
491{
492 struct task_struct *t;
493
494 for_each_thread(p, t) {
495 struct mm_struct *t_mm = READ_ONCE(t->mm);
496 if (t_mm)
497 return t_mm == mm;
498 }
499 return false;
500}
501
aac45363
MH
502#ifdef CONFIG_MMU
503/*
504 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
505 * victim (if that is possible) to help the OOM killer to move on.
506 */
507static struct task_struct *oom_reaper_th;
aac45363 508static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
29c696e1 509static struct task_struct *oom_reaper_list;
03049269
MH
510static DEFINE_SPINLOCK(oom_reaper_lock);
511
bf3980c8 512static bool __oom_reap_task_mm(struct mm_struct *mm)
aac45363 513{
aac45363 514 struct vm_area_struct *vma;
93065ac7 515 bool ret = true;
e1c2c775 516 VMA_ITERATOR(vmi, mm, 0);
27ae357f
DR
517
518 /*
519 * Tell all users of get_user/copy_from_user etc... that the content
520 * is no longer stable. No barriers really needed because unmapping
521 * should imply barriers already and the reader would hit a page fault
522 * if it stumbled over a reaped memory.
523 */
524 set_bit(MMF_UNSTABLE, &mm->flags);
525
e1c2c775 526 for_each_vma(vmi, vma) {
a213e5cf 527 if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
27ae357f
DR
528 continue;
529
530 /*
531 * Only anonymous pages have a good chance to be dropped
532 * without additional steps which we cannot afford as we
533 * are OOM already.
534 *
535 * We do not even care about fs backed pages because all
536 * which are reclaimable have already been reclaimed and
537 * we do not want to block exit_mmap by keeping mm ref
538 * count elevated without a good reason.
539 */
540 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
ac46d4f3 541 struct mmu_notifier_range range;
27ae357f
DR
542 struct mmu_gather tlb;
543
6f4f13e8 544 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
7d4a8be0 545 mm, vma->vm_start,
ac46d4f3 546 vma->vm_end);
a72afd87 547 tlb_gather_mmu(&tlb, mm);
ac46d4f3 548 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
ae8eba8b 549 tlb_finish_mmu(&tlb);
93065ac7
MH
550 ret = false;
551 continue;
552 }
ac46d4f3
JG
553 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
554 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 555 tlb_finish_mmu(&tlb);
27ae357f
DR
556 }
557 }
93065ac7
MH
558
559 return ret;
27ae357f
DR
560}
561
431f42fd
MH
562/*
563 * Reaps the address space of the give task.
564 *
565 * Returns true on success and false if none or part of the address space
566 * has been reclaimed and the caller should retry later.
567 */
27ae357f
DR
568static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
569{
aac45363
MH
570 bool ret = true;
571
d8ed45c5 572 if (!mmap_read_trylock(mm)) {
422580c3 573 trace_skip_task_reaping(tsk->pid);
af5679fb 574 return false;
4d4bbd85
MH
575 }
576
e5e3f4c4 577 /*
21292580
AA
578 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
579 * work on the mm anymore. The check for MMF_OOM_SKIP must run
3e4e28c5
ML
580 * under mmap_lock for reading because it serializes against the
581 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
e5e3f4c4 582 */
21292580 583 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
422580c3 584 trace_skip_task_reaping(tsk->pid);
431f42fd 585 goto out_unlock;
aac45363
MH
586 }
587
422580c3
RG
588 trace_start_task_reaping(tsk->pid);
589
93065ac7 590 /* failed to reap part of the address space. Try again later */
431f42fd
MH
591 ret = __oom_reap_task_mm(mm);
592 if (!ret)
593 goto out_finish;
aac45363 594
bc448e89
MH
595 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
596 task_pid_nr(tsk), tsk->comm,
597 K(get_mm_counter(mm, MM_ANONPAGES)),
598 K(get_mm_counter(mm, MM_FILEPAGES)),
599 K(get_mm_counter(mm, MM_SHMEMPAGES)));
431f42fd
MH
600out_finish:
601 trace_finish_task_reaping(tsk->pid);
602out_unlock:
d8ed45c5 603 mmap_read_unlock(mm);
36324a99 604
aac45363
MH
605 return ret;
606}
607
bc448e89 608#define MAX_OOM_REAP_RETRIES 10
36324a99 609static void oom_reap_task(struct task_struct *tsk)
aac45363
MH
610{
611 int attempts = 0;
26db62f1 612 struct mm_struct *mm = tsk->signal->oom_mm;
aac45363 613
3e4e28c5 614 /* Retry the mmap_read_trylock(mm) a few times */
27ae357f 615 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
aac45363
MH
616 schedule_timeout_idle(HZ/10);
617
97b1255c
TH
618 if (attempts <= MAX_OOM_REAP_RETRIES ||
619 test_bit(MMF_OOM_SKIP, &mm->flags))
7ebffa45 620 goto done;
11a410d5 621
7ebffa45
TH
622 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
623 task_pid_nr(tsk), tsk->comm);
8a7ff02a 624 sched_show_task(tsk);
7ebffa45 625 debug_show_all_locks();
bc448e89 626
7ebffa45 627done:
449d777d 628 tsk->oom_reaper_list = NULL;
449d777d 629
26db62f1
MH
630 /*
631 * Hide this mm from OOM killer because it has been either reaped or
3e4e28c5 632 * somebody can't call mmap_write_unlock(mm).
26db62f1 633 */
862e3073 634 set_bit(MMF_OOM_SKIP, &mm->flags);
26db62f1 635
e4a38402 636 /* Drop a reference taken by queue_oom_reaper */
36324a99 637 put_task_struct(tsk);
aac45363
MH
638}
639
640static int oom_reaper(void *unused)
641{
3723929e
SA
642 set_freezable();
643
aac45363 644 while (true) {
03049269 645 struct task_struct *tsk = NULL;
aac45363 646
29c696e1 647 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
e4a38402 648 spin_lock_irq(&oom_reaper_lock);
29c696e1
VD
649 if (oom_reaper_list != NULL) {
650 tsk = oom_reaper_list;
651 oom_reaper_list = tsk->oom_reaper_list;
03049269 652 }
e4a38402 653 spin_unlock_irq(&oom_reaper_lock);
03049269
MH
654
655 if (tsk)
656 oom_reap_task(tsk);
aac45363
MH
657 }
658
659 return 0;
660}
661
e4a38402 662static void wake_oom_reaper(struct timer_list *timer)
aac45363 663{
e4a38402
NP
664 struct task_struct *tsk = container_of(timer, struct task_struct,
665 oom_reaper_timer);
666 struct mm_struct *mm = tsk->signal->oom_mm;
667 unsigned long flags;
aac45363 668
e4a38402
NP
669 /* The victim managed to terminate on its own - see exit_mmap */
670 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
671 put_task_struct(tsk);
672 return;
673 }
aac45363 674
e4a38402 675 spin_lock_irqsave(&oom_reaper_lock, flags);
29c696e1
VD
676 tsk->oom_reaper_list = oom_reaper_list;
677 oom_reaper_list = tsk;
e4a38402 678 spin_unlock_irqrestore(&oom_reaper_lock, flags);
422580c3 679 trace_wake_reaper(tsk->pid);
03049269 680 wake_up(&oom_reaper_wait);
aac45363
MH
681}
682
e4a38402
NP
683/*
684 * Give the OOM victim time to exit naturally before invoking the oom_reaping.
685 * The timers timeout is arbitrary... the longer it is, the longer the worst
686 * case scenario for the OOM can take. If it is too small, the oom_reaper can
687 * get in the way and release resources needed by the process exit path.
688 * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
689 * before the exit path is able to wake the futex waiters.
690 */
691#define OOM_REAPER_DELAY (2*HZ)
692static void queue_oom_reaper(struct task_struct *tsk)
693{
694 /* mm is already queued? */
695 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
696 return;
697
698 get_task_struct(tsk);
699 timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
700 tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
701 add_timer(&tsk->oom_reaper_timer);
702}
703
a19cad06
AM
704#ifdef CONFIG_SYSCTL
705static struct ctl_table vm_oom_kill_table[] = {
706 {
707 .procname = "panic_on_oom",
708 .data = &sysctl_panic_on_oom,
709 .maxlen = sizeof(sysctl_panic_on_oom),
710 .mode = 0644,
711 .proc_handler = proc_dointvec_minmax,
712 .extra1 = SYSCTL_ZERO,
713 .extra2 = SYSCTL_TWO,
714 },
715 {
716 .procname = "oom_kill_allocating_task",
717 .data = &sysctl_oom_kill_allocating_task,
718 .maxlen = sizeof(sysctl_oom_kill_allocating_task),
719 .mode = 0644,
720 .proc_handler = proc_dointvec,
721 },
722 {
723 .procname = "oom_dump_tasks",
724 .data = &sysctl_oom_dump_tasks,
725 .maxlen = sizeof(sysctl_oom_dump_tasks),
726 .mode = 0644,
727 .proc_handler = proc_dointvec,
728 },
729 {}
730};
731#endif
732
aac45363
MH
733static int __init oom_init(void)
734{
735 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
43fe219a 736#ifdef CONFIG_SYSCTL
737 register_sysctl_init("vm", vm_oom_kill_table);
738#endif
aac45363
MH
739 return 0;
740}
741subsys_initcall(oom_init)
7c5f64f8 742#else
e4a38402 743static inline void queue_oom_reaper(struct task_struct *tsk)
7c5f64f8
VD
744{
745}
746#endif /* CONFIG_MMU */
aac45363 747
49550b60 748/**
16e95196 749 * mark_oom_victim - mark the given task as OOM victim
49550b60 750 * @tsk: task to mark
c32b3cbe 751 *
dc56401f 752 * Has to be called with oom_lock held and never after
c32b3cbe 753 * oom has been disabled already.
26db62f1
MH
754 *
755 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
756 * under task_lock or operate on the current).
49550b60 757 */
7c5f64f8 758static void mark_oom_victim(struct task_struct *tsk)
49550b60 759{
26db62f1
MH
760 struct mm_struct *mm = tsk->mm;
761
c32b3cbe
MH
762 WARN_ON(oom_killer_disabled);
763 /* OOM killer might race with memcg OOM */
764 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
765 return;
26db62f1 766
26db62f1 767 /* oom_mm is bound to the signal struct life time. */
b3541d91 768 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
f1f10076 769 mmgrab(tsk->signal->oom_mm);
26db62f1 770
63a8ca9b
MH
771 /*
772 * Make sure that the task is woken up from uninterruptible sleep
773 * if it is frozen because OOM killer wouldn't be able to free
774 * any memory and livelock. freezing_slow_path will tell the freezer
775 * that TIF_MEMDIE tasks should be ignored.
776 */
777 __thaw_task(tsk);
c32b3cbe 778 atomic_inc(&oom_victims);
422580c3 779 trace_mark_victim(tsk->pid);
49550b60
MH
780}
781
782/**
16e95196 783 * exit_oom_victim - note the exit of an OOM victim
49550b60 784 */
38531201 785void exit_oom_victim(void)
49550b60 786{
38531201 787 clear_thread_flag(TIF_MEMDIE);
c32b3cbe 788
c38f1025 789 if (!atomic_dec_return(&oom_victims))
c32b3cbe 790 wake_up_all(&oom_victims_wait);
c32b3cbe
MH
791}
792
7d2e7a22
MH
793/**
794 * oom_killer_enable - enable OOM killer
795 */
796void oom_killer_enable(void)
797{
798 oom_killer_disabled = false;
d75da004 799 pr_info("OOM killer enabled.\n");
7d2e7a22
MH
800}
801
c32b3cbe
MH
802/**
803 * oom_killer_disable - disable OOM killer
7d2e7a22 804 * @timeout: maximum timeout to wait for oom victims in jiffies
c32b3cbe
MH
805 *
806 * Forces all page allocations to fail rather than trigger OOM killer.
7d2e7a22
MH
807 * Will block and wait until all OOM victims are killed or the given
808 * timeout expires.
c32b3cbe
MH
809 *
810 * The function cannot be called when there are runnable user tasks because
811 * the userspace would see unexpected allocation failures as a result. Any
812 * new usage of this function should be consulted with MM people.
813 *
814 * Returns true if successful and false if the OOM killer cannot be
815 * disabled.
816 */
7d2e7a22 817bool oom_killer_disable(signed long timeout)
c32b3cbe 818{
7d2e7a22
MH
819 signed long ret;
820
c32b3cbe 821 /*
6afcf289
TH
822 * Make sure to not race with an ongoing OOM killer. Check that the
823 * current is not killed (possibly due to sharing the victim's memory).
c32b3cbe 824 */
6afcf289 825 if (mutex_lock_killable(&oom_lock))
c32b3cbe 826 return false;
c32b3cbe 827 oom_killer_disabled = true;
dc56401f 828 mutex_unlock(&oom_lock);
c32b3cbe 829
7d2e7a22
MH
830 ret = wait_event_interruptible_timeout(oom_victims_wait,
831 !atomic_read(&oom_victims), timeout);
832 if (ret <= 0) {
833 oom_killer_enable();
834 return false;
835 }
d75da004 836 pr_info("OOM killer disabled.\n");
c32b3cbe
MH
837
838 return true;
839}
840
1af8bb43
MH
841static inline bool __task_will_free_mem(struct task_struct *task)
842{
843 struct signal_struct *sig = task->signal;
844
845 /*
d67e03e3 846 * A coredumping process may sleep for an extended period in
92307383 847 * coredump_task_exit(), so the oom killer cannot assume that
d67e03e3 848 * the process will promptly exit and release memory.
1af8bb43 849 */
98b24b16 850 if (sig->core_state)
1af8bb43
MH
851 return false;
852
853 if (sig->flags & SIGNAL_GROUP_EXIT)
854 return true;
855
856 if (thread_group_empty(task) && (task->flags & PF_EXITING))
857 return true;
858
859 return false;
860}
861
862/*
863 * Checks whether the given task is dying or exiting and likely to
864 * release its address space. This means that all threads and processes
865 * sharing the same mm have to be killed or exiting.
091f362c
MH
866 * Caller has to make sure that task->mm is stable (hold task_lock or
867 * it operates on the current).
1af8bb43 868 */
7c5f64f8 869static bool task_will_free_mem(struct task_struct *task)
1af8bb43 870{
091f362c 871 struct mm_struct *mm = task->mm;
1af8bb43 872 struct task_struct *p;
f33e6f06 873 bool ret = true;
1af8bb43 874
1af8bb43 875 /*
091f362c
MH
876 * Skip tasks without mm because it might have passed its exit_mm and
877 * exit_oom_victim. oom_reaper could have rescued that but do not rely
878 * on that for now. We can consider find_lock_task_mm in future.
1af8bb43 879 */
091f362c 880 if (!mm)
1af8bb43
MH
881 return false;
882
091f362c
MH
883 if (!__task_will_free_mem(task))
884 return false;
696453e6
MH
885
886 /*
887 * This task has already been drained by the oom reaper so there are
888 * only small chances it will free some more
889 */
862e3073 890 if (test_bit(MMF_OOM_SKIP, &mm->flags))
696453e6 891 return false;
696453e6 892
091f362c 893 if (atomic_read(&mm->mm_users) <= 1)
1af8bb43 894 return true;
1af8bb43
MH
895
896 /*
5870c2e1
MH
897 * Make sure that all tasks which share the mm with the given tasks
898 * are dying as well to make sure that a) nobody pins its mm and
899 * b) the task is also reapable by the oom reaper.
1af8bb43
MH
900 */
901 rcu_read_lock();
902 for_each_process(p) {
903 if (!process_shares_mm(p, mm))
904 continue;
905 if (same_thread_group(task, p))
906 continue;
907 ret = __task_will_free_mem(p);
908 if (!ret)
909 break;
910 }
911 rcu_read_unlock();
1af8bb43
MH
912
913 return ret;
914}
915
bbbe4802 916static void __oom_kill_process(struct task_struct *victim, const char *message)
1da177e4 917{
5989ad7b 918 struct task_struct *p;
647f2bdf 919 struct mm_struct *mm;
bb29902a 920 bool can_oom_reap = true;
1da177e4 921
6b0c81b3
DR
922 p = find_lock_task_mm(victim);
923 if (!p) {
619b5b46
YS
924 pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
925 message, task_pid_nr(victim), victim->comm);
6b0c81b3 926 put_task_struct(victim);
647f2bdf 927 return;
6b0c81b3
DR
928 } else if (victim != p) {
929 get_task_struct(p);
930 put_task_struct(victim);
931 victim = p;
932 }
647f2bdf 933
880b7689 934 /* Get a reference to safely compare mm after task_unlock(victim) */
647f2bdf 935 mm = victim->mm;
f1f10076 936 mmgrab(mm);
8e675f7a
KK
937
938 /* Raise event before sending signal: task reaper must see this */
939 count_vm_event(OOM_KILL);
fe6bdfc8 940 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
8e675f7a 941
426fb5e7 942 /*
cd04ae1e
MH
943 * We should send SIGKILL before granting access to memory reserves
944 * in order to prevent the OOM victim from depleting the memory
945 * reserves from the user space under its control.
426fb5e7 946 */
079b22dc 947 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
16e95196 948 mark_oom_victim(victim);
70cb6d26
EC
949 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
950 message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
951 K(get_mm_counter(mm, MM_ANONPAGES)),
952 K(get_mm_counter(mm, MM_FILEPAGES)),
953 K(get_mm_counter(mm, MM_SHMEMPAGES)),
954 from_kuid(&init_user_ns, task_uid(victim)),
941f762b 955 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
647f2bdf
DR
956 task_unlock(victim);
957
958 /*
959 * Kill all user processes sharing victim->mm in other thread groups, if
960 * any. They don't get access to memory reserves, though, to avoid
c1e8d7c6 961 * depletion of all memory. This prevents mm->mmap_lock livelock when an
647f2bdf
DR
962 * oom killed thread cannot exit because it requires the semaphore and
963 * its contended by another thread trying to allocate memory itself.
964 * That thread will now get access to memory reserves since it has a
965 * pending fatal signal.
966 */
4d4048be 967 rcu_read_lock();
c319025a 968 for_each_process(p) {
4d7b3394 969 if (!process_shares_mm(p, mm))
c319025a
ON
970 continue;
971 if (same_thread_group(p, victim))
972 continue;
1b51e65e 973 if (is_global_init(p)) {
aac45363 974 can_oom_reap = false;
862e3073 975 set_bit(MMF_OOM_SKIP, &mm->flags);
a373966d
MH
976 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
977 task_pid_nr(victim), victim->comm,
978 task_pid_nr(p), p->comm);
c319025a 979 continue;
aac45363 980 }
1b51e65e 981 /*
4c9c3809 982 * No kthread_use_mm() user needs to read from the userspace so
f5678e7f 983 * we are ok to reap it.
1b51e65e
MH
984 */
985 if (unlikely(p->flags & PF_KTHREAD))
986 continue;
079b22dc 987 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
c319025a 988 }
6b0c81b3 989 rcu_read_unlock();
647f2bdf 990
aac45363 991 if (can_oom_reap)
e4a38402 992 queue_oom_reaper(victim);
aac45363 993
880b7689 994 mmdrop(mm);
6b0c81b3 995 put_task_struct(victim);
1da177e4 996}
647f2bdf 997#undef K
1da177e4 998
3d8b38eb
RG
999/*
1000 * Kill provided task unless it's secured by setting
1001 * oom_score_adj to OOM_SCORE_ADJ_MIN.
1002 */
bbbe4802 1003static int oom_kill_memcg_member(struct task_struct *task, void *message)
3d8b38eb 1004{
d342a0b3
TH
1005 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
1006 !is_global_init(task)) {
3d8b38eb 1007 get_task_struct(task);
bbbe4802 1008 __oom_kill_process(task, message);
3d8b38eb
RG
1009 }
1010 return 0;
1011}
1012
5989ad7b
RG
1013static void oom_kill_process(struct oom_control *oc, const char *message)
1014{
bbbe4802 1015 struct task_struct *victim = oc->chosen;
3d8b38eb 1016 struct mem_cgroup *oom_group;
5989ad7b
RG
1017 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1018 DEFAULT_RATELIMIT_BURST);
1019
1020 /*
1021 * If the task is already exiting, don't alarm the sysadmin or kill
1022 * its children or threads, just give it access to memory reserves
1023 * so it can die quickly
1024 */
bbbe4802
SB
1025 task_lock(victim);
1026 if (task_will_free_mem(victim)) {
1027 mark_oom_victim(victim);
e4a38402 1028 queue_oom_reaper(victim);
bbbe4802
SB
1029 task_unlock(victim);
1030 put_task_struct(victim);
5989ad7b
RG
1031 return;
1032 }
bbbe4802 1033 task_unlock(victim);
5989ad7b
RG
1034
1035 if (__ratelimit(&oom_rs))
bbbe4802 1036 dump_header(oc, victim);
5989ad7b 1037
3d8b38eb
RG
1038 /*
1039 * Do we need to kill the entire memory cgroup?
1040 * Or even one of the ancestor memory cgroups?
1041 * Check this out before killing the victim task.
1042 */
1043 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1044
bbbe4802 1045 __oom_kill_process(victim, message);
3d8b38eb
RG
1046
1047 /*
1048 * If necessary, kill all tasks in the selected memory cgroup.
1049 */
1050 if (oom_group) {
b6bf9abb 1051 memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL);
3d8b38eb 1052 mem_cgroup_print_oom_group(oom_group);
bbbe4802 1053 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
68d68ff6 1054 (void *)message);
3d8b38eb
RG
1055 mem_cgroup_put(oom_group);
1056 }
5989ad7b
RG
1057}
1058
309ed882
DR
1059/*
1060 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1061 */
432b1de0 1062static void check_panic_on_oom(struct oom_control *oc)
309ed882
DR
1063{
1064 if (likely(!sysctl_panic_on_oom))
1065 return;
1066 if (sysctl_panic_on_oom != 2) {
1067 /*
1068 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1069 * does not panic for cpuset, mempolicy, or memcg allocation
1070 * failures.
1071 */
432b1de0 1072 if (oc->constraint != CONSTRAINT_NONE)
309ed882
DR
1073 return;
1074 }
071a4bef 1075 /* Do not panic for oom kills triggered by sysrq */
db2a0dd7 1076 if (is_sysrq_oom(oc))
071a4bef 1077 return;
2a966b77 1078 dump_header(oc, NULL);
309ed882
DR
1079 panic("Out of memory: %s panic_on_oom is enabled\n",
1080 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1081}
1082
8bc719d3
MS
1083static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1084
1085int register_oom_notifier(struct notifier_block *nb)
1086{
1087 return blocking_notifier_chain_register(&oom_notify_list, nb);
1088}
1089EXPORT_SYMBOL_GPL(register_oom_notifier);
1090
1091int unregister_oom_notifier(struct notifier_block *nb)
1092{
1093 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1094}
1095EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1096
1da177e4 1097/**
6e0fc46d
DR
1098 * out_of_memory - kill the "best" process when we run out of memory
1099 * @oc: pointer to struct oom_control
1da177e4
LT
1100 *
1101 * If we run out of memory, we have the choice between either
1102 * killing a random task (bad), letting the system crash (worse)
1103 * OR try to be smart about which process to kill. Note that we
1104 * don't have to be perfect here, we just have to be good.
1105 */
6e0fc46d 1106bool out_of_memory(struct oom_control *oc)
1da177e4 1107{
8bc719d3
MS
1108 unsigned long freed = 0;
1109
dc56401f
JW
1110 if (oom_killer_disabled)
1111 return false;
1112
7c5f64f8
VD
1113 if (!is_memcg_oom(oc)) {
1114 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
f530243a 1115 if (freed > 0 && !is_sysrq_oom(oc))
7c5f64f8
VD
1116 /* Got some memory back in the last second. */
1117 return true;
1118 }
1da177e4 1119
7b98c2e4 1120 /*
9ff4868e
DR
1121 * If current has a pending SIGKILL or is exiting, then automatically
1122 * select it. The goal is to allow it to allocate so that it may
1123 * quickly exit and free its memory.
7b98c2e4 1124 */
091f362c 1125 if (task_will_free_mem(current)) {
16e95196 1126 mark_oom_victim(current);
e4a38402 1127 queue_oom_reaper(current);
75e8f8b2 1128 return true;
7b98c2e4
DR
1129 }
1130
3da88fb3
MH
1131 /*
1132 * The OOM killer does not compensate for IO-less reclaim.
1133 * pagefault_out_of_memory lost its gfp context so we have to
1134 * make sure exclude 0 mask - all other users should have at least
f9c64562
TH
1135 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1136 * invoke the OOM killer even if it is a GFP_NOFS allocation.
3da88fb3 1137 */
f9c64562 1138 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
3da88fb3
MH
1139 return true;
1140
9b0f8b04
CL
1141 /*
1142 * Check if there were limitations on the allocation (only relevant for
7c5f64f8 1143 * NUMA and memcg) that may require different handling.
9b0f8b04 1144 */
432b1de0
YS
1145 oc->constraint = constrained_alloc(oc);
1146 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
6e0fc46d 1147 oc->nodemask = NULL;
432b1de0 1148 check_panic_on_oom(oc);
0aad4b31 1149
7c5f64f8 1150 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
ac311a14
SB
1151 current->mm && !oom_unkillable_task(current) &&
1152 oom_cpuset_eligible(current, oc) &&
121d1ba0 1153 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
6b0c81b3 1154 get_task_struct(current);
7c5f64f8
VD
1155 oc->chosen = current;
1156 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
75e8f8b2 1157 return true;
0aad4b31
DR
1158 }
1159
7c5f64f8 1160 select_bad_process(oc);
3100dab2
JW
1161 /* Found nothing?!?! */
1162 if (!oc->chosen) {
2a966b77 1163 dump_header(oc, NULL);
3100dab2
JW
1164 pr_warn("Out of memory and no killable processes...\n");
1165 /*
1166 * If we got here due to an actual allocation at the
1167 * system level, we cannot survive this and will enter
1168 * an endless loop in the allocator. Bail out now.
1169 */
1170 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1171 panic("System is deadlocked on memory\n");
0aad4b31 1172 }
9bfe5ded 1173 if (oc->chosen && oc->chosen != (void *)-1UL)
7c5f64f8
VD
1174 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1175 "Memory cgroup out of memory");
7c5f64f8 1176 return !!oc->chosen;
c32b3cbe
MH
1177}
1178
e3658932 1179/*
60e2793d
MH
1180 * The pagefault handler calls here because some allocation has failed. We have
1181 * to take care of the memcg OOM here because this is the only safe context without
1182 * any locks held but let the oom killer triggered from the allocation context care
1183 * about the global OOM.
e3658932
DR
1184 */
1185void pagefault_out_of_memory(void)
1186{
60e2793d
MH
1187 static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1188 DEFAULT_RATELIMIT_BURST);
6e0fc46d 1189
49426420 1190 if (mem_cgroup_oom_synchronize(true))
dc56401f 1191 return;
3812c8c8 1192
0b28179a 1193 if (fatal_signal_pending(current))
dc56401f 1194 return;
0b28179a 1195
60e2793d
MH
1196 if (__ratelimit(&pfoom_rs))
1197 pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
e3658932 1198}
884a7e59
SB
1199
1200SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1201{
1202#ifdef CONFIG_MMU
1203 struct mm_struct *mm = NULL;
1204 struct task_struct *task;
1205 struct task_struct *p;
1206 unsigned int f_flags;
337546e8 1207 bool reap = false;
884a7e59
SB
1208 long ret = 0;
1209
1210 if (flags)
1211 return -EINVAL;
1212
ee9955d6
CB
1213 task = pidfd_get_task(pidfd, &f_flags);
1214 if (IS_ERR(task))
1215 return PTR_ERR(task);
884a7e59
SB
1216
1217 /*
1218 * Make sure to choose a thread which still has a reference to mm
1219 * during the group exit
1220 */
1221 p = find_lock_task_mm(task);
1222 if (!p) {
1223 ret = -ESRCH;
1224 goto put_task;
1225 }
1226
ba535c1c
SB
1227 mm = p->mm;
1228 mmgrab(mm);
1229
1230 if (task_will_free_mem(p))
1231 reap = true;
1232 else {
1233 /* Error only if the work has not been done already */
1234 if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1235 ret = -EINVAL;
884a7e59
SB
1236 }
1237 task_unlock(p);
1238
1239 if (!reap)
1240 goto drop_mm;
1241
1242 if (mmap_read_lock_killable(mm)) {
1243 ret = -EINTR;
1244 goto drop_mm;
1245 }
ba535c1c
SB
1246 /*
1247 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1248 * possible change in exit_mmap is seen
1249 */
1250 if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
884a7e59
SB
1251 ret = -EAGAIN;
1252 mmap_read_unlock(mm);
1253
1254drop_mm:
ba535c1c 1255 mmdrop(mm);
884a7e59
SB
1256put_task:
1257 put_task_struct(task);
884a7e59
SB
1258 return ret;
1259#else
1260 return -ENOSYS;
1261#endif /* CONFIG_MMU */
1262}