]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/mempolicy.c
mm/mempolicy: unify the create() func for bind/interleave/prefer-many policies
[thirdparty/linux.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
b27abacc
DH
34 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
1da177e4
LT
37 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
1da177e4
LT
69*/
70
b1de0d13
MH
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
1da177e4 73#include <linux/mempolicy.h>
a520110e 74#include <linux/pagewalk.h>
1da177e4
LT
75#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
6e84f315 79#include <linux/sched/mm.h>
6a3827d7 80#include <linux/sched/numa_balancing.h>
f719ff9b 81#include <linux/sched/task.h>
1da177e4
LT
82#include <linux/nodemask.h>
83#include <linux/cpuset.h>
1da177e4
LT
84#include <linux/slab.h>
85#include <linux/string.h>
b95f1b31 86#include <linux/export.h>
b488893a 87#include <linux/nsproxy.h>
1da177e4
LT
88#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
31367466 91#include <linux/ptrace.h>
dc9aa5b9 92#include <linux/swap.h>
1a75a6c8
CL
93#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
b20a3503 95#include <linux/migrate.h>
62b61f61 96#include <linux/ksm.h>
95a402c3 97#include <linux/rmap.h>
86c3a764 98#include <linux/security.h>
dbcb0f19 99#include <linux/syscalls.h>
095f1fc4 100#include <linux/ctype.h>
6d9c285a 101#include <linux/mm_inline.h>
b24f53a0 102#include <linux/mmu_notifier.h>
b1de0d13 103#include <linux/printk.h>
c8633798 104#include <linux/swapops.h>
dc9aa5b9 105
1da177e4 106#include <asm/tlbflush.h>
7c0f6ba6 107#include <linux/uaccess.h>
1da177e4 108
62695a84
NP
109#include "internal.h"
110
38e35860 111/* Internal flags */
dc9aa5b9 112#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 113#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 114
fcc234f8
PE
115static struct kmem_cache *policy_cache;
116static struct kmem_cache *sn_cache;
1da177e4 117
1da177e4
LT
118/* Highest zone. An specific allocation for a zone below that is not
119 policied. */
6267276f 120enum zone_type policy_zone = 0;
1da177e4 121
bea904d5
LS
122/*
123 * run-time system-wide default policy => local allocation
124 */
e754d79d 125static struct mempolicy default_policy = {
1da177e4 126 .refcnt = ATOMIC_INIT(1), /* never free it */
7858d7bc 127 .mode = MPOL_LOCAL,
1da177e4
LT
128};
129
5606e387
MG
130static struct mempolicy preferred_node_policy[MAX_NUMNODES];
131
b2ca916c
DW
132/**
133 * numa_map_to_online_node - Find closest online node
f6e92f40 134 * @node: Node id to start the search
b2ca916c
DW
135 *
136 * Lookup the next closest node by distance if @nid is not online.
137 */
138int numa_map_to_online_node(int node)
139{
4fcbe96e 140 int min_dist = INT_MAX, dist, n, min_node;
b2ca916c 141
4fcbe96e
DW
142 if (node == NUMA_NO_NODE || node_online(node))
143 return node;
b2ca916c
DW
144
145 min_node = node;
4fcbe96e
DW
146 for_each_online_node(n) {
147 dist = node_distance(node, n);
148 if (dist < min_dist) {
149 min_dist = dist;
150 min_node = n;
b2ca916c
DW
151 }
152 }
153
154 return min_node;
155}
156EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157
74d2c3a0 158struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
159{
160 struct mempolicy *pol = p->mempolicy;
f15ca78e 161 int node;
5606e387 162
f15ca78e
ON
163 if (pol)
164 return pol;
5606e387 165
f15ca78e
ON
166 node = numa_node_id();
167 if (node != NUMA_NO_NODE) {
168 pol = &preferred_node_policy[node];
169 /* preferred_node_policy is not initialised early in boot */
170 if (pol->mode)
171 return pol;
5606e387
MG
172 }
173
f15ca78e 174 return &default_policy;
5606e387
MG
175}
176
37012946
DR
177static const struct mempolicy_operations {
178 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 179 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
180} mpol_ops[MPOL_MAX];
181
f5b087b5
DR
182static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183{
6d556294 184 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
185}
186
187static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
188 const nodemask_t *rel)
189{
190 nodemask_t tmp;
191 nodes_fold(tmp, *orig, nodes_weight(*rel));
192 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
193}
194
be897d48 195static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
196{
197 if (nodes_empty(*nodes))
198 return -EINVAL;
269fbe72 199 pol->nodes = *nodes;
37012946
DR
200 return 0;
201}
202
203static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
204{
7858d7bc
FT
205 if (nodes_empty(*nodes))
206 return -EINVAL;
269fbe72
BW
207
208 nodes_clear(pol->nodes);
209 node_set(first_node(*nodes), pol->nodes);
37012946
DR
210 return 0;
211}
212
58568d2a
MX
213/*
214 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
215 * any, for the new policy. mpol_new() has already validated the nodes
7858d7bc 216 * parameter with respect to the policy mode and flags.
58568d2a
MX
217 *
218 * Must be called holding task's alloc_lock to protect task's mems_allowed
c1e8d7c6 219 * and mempolicy. May also be called holding the mmap_lock for write.
58568d2a 220 */
4bfc4495
KH
221static int mpol_set_nodemask(struct mempolicy *pol,
222 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 223{
58568d2a
MX
224 int ret;
225
7858d7bc
FT
226 /*
227 * Default (pol==NULL) resp. local memory policies are not a
228 * subject of any remapping. They also do not need any special
229 * constructor.
230 */
231 if (!pol || pol->mode == MPOL_LOCAL)
58568d2a 232 return 0;
7858d7bc 233
01f13bd6 234 /* Check N_MEMORY */
4bfc4495 235 nodes_and(nsc->mask1,
01f13bd6 236 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
237
238 VM_BUG_ON(!nodes);
4bfc4495 239
7858d7bc
FT
240 if (pol->flags & MPOL_F_RELATIVE_NODES)
241 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
242 else
243 nodes_and(nsc->mask2, *nodes, nsc->mask1);
58568d2a 244
7858d7bc
FT
245 if (mpol_store_user_nodemask(pol))
246 pol->w.user_nodemask = *nodes;
4bfc4495 247 else
7858d7bc
FT
248 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
249
250 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
58568d2a
MX
251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
028fec41
DR
258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
1da177e4
LT
260{
261 struct mempolicy *policy;
262
028fec41 263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 265
3e1f0645
DR
266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
37012946 268 return ERR_PTR(-EINVAL);
d3a71033 269 return NULL;
37012946 270 }
3e1f0645
DR
271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
7858d7bc
FT
283
284 mode = MPOL_LOCAL;
3e1f0645 285 }
479e2802 286 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
287 if (!nodes_empty(*nodes) ||
288 (flags & MPOL_F_STATIC_NODES) ||
289 (flags & MPOL_F_RELATIVE_NODES))
479e2802 290 return ERR_PTR(-EINVAL);
3e1f0645
DR
291 } else if (nodes_empty(*nodes))
292 return ERR_PTR(-EINVAL);
1da177e4
LT
293 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
294 if (!policy)
295 return ERR_PTR(-ENOMEM);
296 atomic_set(&policy->refcnt, 1);
45c4745a 297 policy->mode = mode;
3e1f0645 298 policy->flags = flags;
37012946 299
1da177e4 300 return policy;
37012946
DR
301}
302
52cd3b07
LS
303/* Slow path of a mpol destructor. */
304void __mpol_put(struct mempolicy *p)
305{
306 if (!atomic_dec_and_test(&p->refcnt))
307 return;
52cd3b07
LS
308 kmem_cache_free(policy_cache, p);
309}
310
213980c0 311static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
312{
313}
314
213980c0 315static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
316{
317 nodemask_t tmp;
318
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 else {
269fbe72 324 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
213980c0 325 *nodes);
29b190fa 326 pol->w.cpuset_mems_allowed = *nodes;
37012946 327 }
f5b087b5 328
708c1bbc
MX
329 if (nodes_empty(tmp))
330 tmp = *nodes;
331
269fbe72 332 pol->nodes = tmp;
37012946
DR
333}
334
335static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 336 const nodemask_t *nodes)
37012946 337{
7858d7bc 338 pol->w.cpuset_mems_allowed = *nodes;
1da177e4
LT
339}
340
708c1bbc
MX
341/*
342 * mpol_rebind_policy - Migrate a policy to a different set of nodes
343 *
c1e8d7c6 344 * Per-vma policies are protected by mmap_lock. Allocations using per-task
213980c0
VB
345 * policies are protected by task->mems_allowed_seq to prevent a premature
346 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 347 */
213980c0 348static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 349{
1d0d2680
DR
350 if (!pol)
351 return;
7858d7bc 352 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
353 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
354 return;
708c1bbc 355
213980c0 356 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
357}
358
359/*
360 * Wrapper for mpol_rebind_policy() that just requires task
361 * pointer, and updates task mempolicy.
58568d2a
MX
362 *
363 * Called with task's alloc_lock held.
1d0d2680
DR
364 */
365
213980c0 366void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 367{
213980c0 368 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
369}
370
371/*
372 * Rebind each vma in mm to new nodemask.
373 *
c1e8d7c6 374 * Call holding a reference to mm. Takes mm->mmap_lock during call.
1d0d2680
DR
375 */
376
377void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
378{
379 struct vm_area_struct *vma;
380
d8ed45c5 381 mmap_write_lock(mm);
1d0d2680 382 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 383 mpol_rebind_policy(vma->vm_policy, new);
d8ed45c5 384 mmap_write_unlock(mm);
1d0d2680
DR
385}
386
37012946
DR
387static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
388 [MPOL_DEFAULT] = {
389 .rebind = mpol_rebind_default,
390 },
391 [MPOL_INTERLEAVE] = {
be897d48 392 .create = mpol_new_nodemask,
37012946
DR
393 .rebind = mpol_rebind_nodemask,
394 },
395 [MPOL_PREFERRED] = {
396 .create = mpol_new_preferred,
397 .rebind = mpol_rebind_preferred,
398 },
399 [MPOL_BIND] = {
be897d48 400 .create = mpol_new_nodemask,
37012946
DR
401 .rebind = mpol_rebind_nodemask,
402 },
7858d7bc
FT
403 [MPOL_LOCAL] = {
404 .rebind = mpol_rebind_default,
405 },
b27abacc 406 [MPOL_PREFERRED_MANY] = {
be897d48 407 .create = mpol_new_nodemask,
b27abacc
DH
408 .rebind = mpol_rebind_preferred,
409 },
37012946
DR
410};
411
a53190a4 412static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 413 unsigned long flags);
1a75a6c8 414
6f4576e3
NH
415struct queue_pages {
416 struct list_head *pagelist;
417 unsigned long flags;
418 nodemask_t *nmask;
f18da660
LX
419 unsigned long start;
420 unsigned long end;
421 struct vm_area_struct *first;
6f4576e3
NH
422};
423
88aaa2a1
NH
424/*
425 * Check if the page's nid is in qp->nmask.
426 *
427 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
428 * in the invert of qp->nmask.
429 */
430static inline bool queue_pages_required(struct page *page,
431 struct queue_pages *qp)
432{
433 int nid = page_to_nid(page);
434 unsigned long flags = qp->flags;
435
436 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
437}
438
a7f40cfe 439/*
d8835445 440 * queue_pages_pmd() has four possible return values:
e5947d23
YS
441 * 0 - pages are placed on the right node or queued successfully, or
442 * special page is met, i.e. huge zero page.
d8835445
YS
443 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
444 * specified.
445 * 2 - THP was split.
446 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
447 * existing page was already on a node that does not follow the
448 * policy.
a7f40cfe 449 */
c8633798
NH
450static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 unsigned long end, struct mm_walk *walk)
959a7e13 452 __releases(ptl)
c8633798
NH
453{
454 int ret = 0;
455 struct page *page;
456 struct queue_pages *qp = walk->private;
457 unsigned long flags;
458
459 if (unlikely(is_pmd_migration_entry(*pmd))) {
a7f40cfe 460 ret = -EIO;
c8633798
NH
461 goto unlock;
462 }
463 page = pmd_page(*pmd);
464 if (is_huge_zero_page(page)) {
465 spin_unlock(ptl);
e5947d23 466 walk->action = ACTION_CONTINUE;
c8633798
NH
467 goto out;
468 }
d8835445 469 if (!queue_pages_required(page, qp))
c8633798 470 goto unlock;
c8633798 471
c8633798
NH
472 flags = qp->flags;
473 /* go to thp migration */
a7f40cfe 474 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
a53190a4
YS
475 if (!vma_migratable(walk->vma) ||
476 migrate_page_add(page, qp->pagelist, flags)) {
d8835445 477 ret = 1;
a7f40cfe
YS
478 goto unlock;
479 }
a7f40cfe
YS
480 } else
481 ret = -EIO;
c8633798
NH
482unlock:
483 spin_unlock(ptl);
484out:
485 return ret;
486}
487
98094945
NH
488/*
489 * Scan through pages checking if pages follow certain conditions,
490 * and move them to the pagelist if they do.
d8835445
YS
491 *
492 * queue_pages_pte_range() has three possible return values:
e5947d23
YS
493 * 0 - pages are placed on the right node or queued successfully, or
494 * special page is met, i.e. zero page.
d8835445
YS
495 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
496 * specified.
497 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
498 * on a node that does not follow the policy.
98094945 499 */
6f4576e3
NH
500static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
501 unsigned long end, struct mm_walk *walk)
1da177e4 502{
6f4576e3
NH
503 struct vm_area_struct *vma = walk->vma;
504 struct page *page;
505 struct queue_pages *qp = walk->private;
506 unsigned long flags = qp->flags;
c8633798 507 int ret;
d8835445 508 bool has_unmovable = false;
3f088420 509 pte_t *pte, *mapped_pte;
705e87c0 510 spinlock_t *ptl;
941150a3 511
c8633798
NH
512 ptl = pmd_trans_huge_lock(pmd, vma);
513 if (ptl) {
514 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
d8835445 515 if (ret != 2)
a7f40cfe 516 return ret;
248db92d 517 }
d8835445 518 /* THP was split, fall through to pte walk */
91612e0d 519
337d9abf
NH
520 if (pmd_trans_unstable(pmd))
521 return 0;
94723aaf 522
3f088420 523 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
6f4576e3 524 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 525 if (!pte_present(*pte))
1da177e4 526 continue;
6aab341e
LT
527 page = vm_normal_page(vma, addr, *pte);
528 if (!page)
1da177e4 529 continue;
053837fc 530 /*
62b61f61
HD
531 * vm_normal_page() filters out zero pages, but there might
532 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 533 */
b79bc0a0 534 if (PageReserved(page))
f4598c8b 535 continue;
88aaa2a1 536 if (!queue_pages_required(page, qp))
38e35860 537 continue;
a7f40cfe 538 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
d8835445
YS
539 /* MPOL_MF_STRICT must be specified if we get here */
540 if (!vma_migratable(vma)) {
541 has_unmovable = true;
a7f40cfe 542 break;
d8835445 543 }
a53190a4
YS
544
545 /*
546 * Do not abort immediately since there may be
547 * temporary off LRU pages in the range. Still
548 * need migrate other LRU pages.
549 */
550 if (migrate_page_add(page, qp->pagelist, flags))
551 has_unmovable = true;
a7f40cfe
YS
552 } else
553 break;
6f4576e3 554 }
3f088420 555 pte_unmap_unlock(mapped_pte, ptl);
6f4576e3 556 cond_resched();
d8835445
YS
557
558 if (has_unmovable)
559 return 1;
560
a7f40cfe 561 return addr != end ? -EIO : 0;
91612e0d
HD
562}
563
6f4576e3
NH
564static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
565 unsigned long addr, unsigned long end,
566 struct mm_walk *walk)
e2d8cf40 567{
dcf17635 568 int ret = 0;
e2d8cf40 569#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 570 struct queue_pages *qp = walk->private;
dcf17635 571 unsigned long flags = (qp->flags & MPOL_MF_VALID);
e2d8cf40 572 struct page *page;
cb900f41 573 spinlock_t *ptl;
d4c54919 574 pte_t entry;
e2d8cf40 575
6f4576e3
NH
576 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
577 entry = huge_ptep_get(pte);
d4c54919
NH
578 if (!pte_present(entry))
579 goto unlock;
580 page = pte_page(entry);
88aaa2a1 581 if (!queue_pages_required(page, qp))
e2d8cf40 582 goto unlock;
dcf17635
LX
583
584 if (flags == MPOL_MF_STRICT) {
585 /*
586 * STRICT alone means only detecting misplaced page and no
587 * need to further check other vma.
588 */
589 ret = -EIO;
590 goto unlock;
591 }
592
593 if (!vma_migratable(walk->vma)) {
594 /*
595 * Must be STRICT with MOVE*, otherwise .test_walk() have
596 * stopped walking current vma.
597 * Detecting misplaced page but allow migrating pages which
598 * have been queued.
599 */
600 ret = 1;
601 goto unlock;
602 }
603
e2d8cf40
NH
604 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
605 if (flags & (MPOL_MF_MOVE_ALL) ||
dcf17635
LX
606 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
607 if (!isolate_huge_page(page, qp->pagelist) &&
608 (flags & MPOL_MF_STRICT))
609 /*
610 * Failed to isolate page but allow migrating pages
611 * which have been queued.
612 */
613 ret = 1;
614 }
e2d8cf40 615unlock:
cb900f41 616 spin_unlock(ptl);
e2d8cf40
NH
617#else
618 BUG();
619#endif
dcf17635 620 return ret;
1da177e4
LT
621}
622
5877231f 623#ifdef CONFIG_NUMA_BALANCING
b24f53a0 624/*
4b10e7d5
MG
625 * This is used to mark a range of virtual addresses to be inaccessible.
626 * These are later cleared by a NUMA hinting fault. Depending on these
627 * faults, pages may be migrated for better NUMA placement.
628 *
629 * This is assuming that NUMA faults are handled using PROT_NONE. If
630 * an architecture makes a different choice, it will need further
631 * changes to the core.
b24f53a0 632 */
4b10e7d5
MG
633unsigned long change_prot_numa(struct vm_area_struct *vma,
634 unsigned long addr, unsigned long end)
b24f53a0 635{
4b10e7d5 636 int nr_updated;
b24f53a0 637
58705444 638 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
03c5a6e1
MG
639 if (nr_updated)
640 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 641
4b10e7d5 642 return nr_updated;
b24f53a0
LS
643}
644#else
645static unsigned long change_prot_numa(struct vm_area_struct *vma,
646 unsigned long addr, unsigned long end)
647{
648 return 0;
649}
5877231f 650#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 651
6f4576e3
NH
652static int queue_pages_test_walk(unsigned long start, unsigned long end,
653 struct mm_walk *walk)
654{
655 struct vm_area_struct *vma = walk->vma;
656 struct queue_pages *qp = walk->private;
657 unsigned long endvma = vma->vm_end;
658 unsigned long flags = qp->flags;
659
a18b3ac2 660 /* range check first */
ce33135c 661 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
f18da660
LX
662
663 if (!qp->first) {
664 qp->first = vma;
665 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
666 (qp->start < vma->vm_start))
667 /* hole at head side of range */
a18b3ac2
LX
668 return -EFAULT;
669 }
f18da660
LX
670 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
671 ((vma->vm_end < qp->end) &&
672 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
673 /* hole at middle or tail of range */
674 return -EFAULT;
a18b3ac2 675
a7f40cfe
YS
676 /*
677 * Need check MPOL_MF_STRICT to return -EIO if possible
678 * regardless of vma_migratable
679 */
680 if (!vma_migratable(vma) &&
681 !(flags & MPOL_MF_STRICT))
48684a65
NH
682 return 1;
683
6f4576e3
NH
684 if (endvma > end)
685 endvma = end;
6f4576e3 686
6f4576e3
NH
687 if (flags & MPOL_MF_LAZY) {
688 /* Similar to task_numa_work, skip inaccessible VMAs */
3122e80e 689 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
4355c018 690 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
691 change_prot_numa(vma, start, endvma);
692 return 1;
693 }
694
77bf45e7 695 /* queue pages from current vma */
a7f40cfe 696 if (flags & MPOL_MF_VALID)
6f4576e3
NH
697 return 0;
698 return 1;
699}
700
7b86ac33
CH
701static const struct mm_walk_ops queue_pages_walk_ops = {
702 .hugetlb_entry = queue_pages_hugetlb,
703 .pmd_entry = queue_pages_pte_range,
704 .test_walk = queue_pages_test_walk,
705};
706
dc9aa5b9 707/*
98094945
NH
708 * Walk through page tables and collect pages to be migrated.
709 *
710 * If pages found in a given range are on a set of nodes (determined by
711 * @nodes and @flags,) it's isolated and queued to the pagelist which is
d8835445
YS
712 * passed via @private.
713 *
714 * queue_pages_range() has three possible return values:
715 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
716 * specified.
717 * 0 - queue pages successfully or no misplaced page.
a85dfc30
YS
718 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
719 * memory range specified by nodemask and maxnode points outside
720 * your accessible address space (-EFAULT)
dc9aa5b9 721 */
d05f0cdc 722static int
98094945 723queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
724 nodemask_t *nodes, unsigned long flags,
725 struct list_head *pagelist)
1da177e4 726{
f18da660 727 int err;
6f4576e3
NH
728 struct queue_pages qp = {
729 .pagelist = pagelist,
730 .flags = flags,
731 .nmask = nodes,
f18da660
LX
732 .start = start,
733 .end = end,
734 .first = NULL,
6f4576e3 735 };
6f4576e3 736
f18da660
LX
737 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
738
739 if (!qp.first)
740 /* whole range in hole */
741 err = -EFAULT;
742
743 return err;
1da177e4
LT
744}
745
869833f2
KM
746/*
747 * Apply policy to a single VMA
c1e8d7c6 748 * This must be called with the mmap_lock held for writing.
869833f2
KM
749 */
750static int vma_replace_policy(struct vm_area_struct *vma,
751 struct mempolicy *pol)
8d34694c 752{
869833f2
KM
753 int err;
754 struct mempolicy *old;
755 struct mempolicy *new;
8d34694c
KM
756
757 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
758 vma->vm_start, vma->vm_end, vma->vm_pgoff,
759 vma->vm_ops, vma->vm_file,
760 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
761
869833f2
KM
762 new = mpol_dup(pol);
763 if (IS_ERR(new))
764 return PTR_ERR(new);
765
766 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 767 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
768 if (err)
769 goto err_out;
8d34694c 770 }
869833f2
KM
771
772 old = vma->vm_policy;
c1e8d7c6 773 vma->vm_policy = new; /* protected by mmap_lock */
869833f2
KM
774 mpol_put(old);
775
776 return 0;
777 err_out:
778 mpol_put(new);
8d34694c
KM
779 return err;
780}
781
1da177e4 782/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
783static int mbind_range(struct mm_struct *mm, unsigned long start,
784 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
785{
786 struct vm_area_struct *next;
9d8cebd4
KM
787 struct vm_area_struct *prev;
788 struct vm_area_struct *vma;
789 int err = 0;
e26a5114 790 pgoff_t pgoff;
9d8cebd4
KM
791 unsigned long vmstart;
792 unsigned long vmend;
1da177e4 793
097d5910 794 vma = find_vma(mm, start);
f18da660 795 VM_BUG_ON(!vma);
9d8cebd4 796
097d5910 797 prev = vma->vm_prev;
e26a5114
KM
798 if (start > vma->vm_start)
799 prev = vma;
800
9d8cebd4 801 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 802 next = vma->vm_next;
9d8cebd4
KM
803 vmstart = max(start, vma->vm_start);
804 vmend = min(end, vma->vm_end);
805
e26a5114
KM
806 if (mpol_equal(vma_policy(vma), new_pol))
807 continue;
808
809 pgoff = vma->vm_pgoff +
810 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 811 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
812 vma->anon_vma, vma->vm_file, pgoff,
813 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
814 if (prev) {
815 vma = prev;
816 next = vma->vm_next;
3964acd0
ON
817 if (mpol_equal(vma_policy(vma), new_pol))
818 continue;
819 /* vma_merge() joined vma && vma->next, case 8 */
820 goto replace;
9d8cebd4
KM
821 }
822 if (vma->vm_start != vmstart) {
823 err = split_vma(vma->vm_mm, vma, vmstart, 1);
824 if (err)
825 goto out;
826 }
827 if (vma->vm_end != vmend) {
828 err = split_vma(vma->vm_mm, vma, vmend, 0);
829 if (err)
830 goto out;
831 }
3964acd0 832 replace:
869833f2 833 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
834 if (err)
835 goto out;
1da177e4 836 }
9d8cebd4
KM
837
838 out:
1da177e4
LT
839 return err;
840}
841
1da177e4 842/* Set the process memory policy */
028fec41
DR
843static long do_set_mempolicy(unsigned short mode, unsigned short flags,
844 nodemask_t *nodes)
1da177e4 845{
58568d2a 846 struct mempolicy *new, *old;
4bfc4495 847 NODEMASK_SCRATCH(scratch);
58568d2a 848 int ret;
1da177e4 849
4bfc4495
KH
850 if (!scratch)
851 return -ENOMEM;
f4e53d91 852
4bfc4495
KH
853 new = mpol_new(mode, flags, nodes);
854 if (IS_ERR(new)) {
855 ret = PTR_ERR(new);
856 goto out;
857 }
2c7c3a7d 858
bda420b9
HY
859 if (flags & MPOL_F_NUMA_BALANCING) {
860 if (new && new->mode == MPOL_BIND) {
861 new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
862 } else {
863 ret = -EINVAL;
864 mpol_put(new);
865 goto out;
866 }
867 }
868
4bfc4495 869 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a 870 if (ret) {
58568d2a 871 mpol_put(new);
4bfc4495 872 goto out;
58568d2a 873 }
78b132e9 874 task_lock(current);
58568d2a 875 old = current->mempolicy;
1da177e4 876 current->mempolicy = new;
45816682
VB
877 if (new && new->mode == MPOL_INTERLEAVE)
878 current->il_prev = MAX_NUMNODES-1;
58568d2a 879 task_unlock(current);
58568d2a 880 mpol_put(old);
4bfc4495
KH
881 ret = 0;
882out:
883 NODEMASK_SCRATCH_FREE(scratch);
884 return ret;
1da177e4
LT
885}
886
bea904d5
LS
887/*
888 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
889 *
890 * Called with task's alloc_lock held
bea904d5
LS
891 */
892static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 893{
dfcd3c0d 894 nodes_clear(*nodes);
bea904d5
LS
895 if (p == &default_policy)
896 return;
897
45c4745a 898 switch (p->mode) {
19770b32 899 case MPOL_BIND:
1da177e4 900 case MPOL_INTERLEAVE:
269fbe72 901 case MPOL_PREFERRED:
b27abacc 902 case MPOL_PREFERRED_MANY:
269fbe72 903 *nodes = p->nodes;
1da177e4 904 break;
7858d7bc
FT
905 case MPOL_LOCAL:
906 /* return empty node mask for local allocation */
907 break;
1da177e4
LT
908 default:
909 BUG();
910 }
911}
912
3b9aadf7 913static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4 914{
ba841078 915 struct page *p = NULL;
1da177e4
LT
916 int err;
917
3b9aadf7
AA
918 int locked = 1;
919 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
2d3a36a4 920 if (err > 0) {
1da177e4
LT
921 err = page_to_nid(p);
922 put_page(p);
923 }
3b9aadf7 924 if (locked)
d8ed45c5 925 mmap_read_unlock(mm);
1da177e4
LT
926 return err;
927}
928
1da177e4 929/* Retrieve NUMA policy */
dbcb0f19
AB
930static long do_get_mempolicy(int *policy, nodemask_t *nmask,
931 unsigned long addr, unsigned long flags)
1da177e4 932{
8bccd85f 933 int err;
1da177e4
LT
934 struct mm_struct *mm = current->mm;
935 struct vm_area_struct *vma = NULL;
3b9aadf7 936 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 937
754af6f5
LS
938 if (flags &
939 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 940 return -EINVAL;
754af6f5
LS
941
942 if (flags & MPOL_F_MEMS_ALLOWED) {
943 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
944 return -EINVAL;
945 *policy = 0; /* just so it's initialized */
58568d2a 946 task_lock(current);
754af6f5 947 *nmask = cpuset_current_mems_allowed;
58568d2a 948 task_unlock(current);
754af6f5
LS
949 return 0;
950 }
951
1da177e4 952 if (flags & MPOL_F_ADDR) {
bea904d5
LS
953 /*
954 * Do NOT fall back to task policy if the
955 * vma/shared policy at addr is NULL. We
956 * want to return MPOL_DEFAULT in this case.
957 */
d8ed45c5 958 mmap_read_lock(mm);
33e3575c 959 vma = vma_lookup(mm, addr);
1da177e4 960 if (!vma) {
d8ed45c5 961 mmap_read_unlock(mm);
1da177e4
LT
962 return -EFAULT;
963 }
964 if (vma->vm_ops && vma->vm_ops->get_policy)
965 pol = vma->vm_ops->get_policy(vma, addr);
966 else
967 pol = vma->vm_policy;
968 } else if (addr)
969 return -EINVAL;
970
971 if (!pol)
bea904d5 972 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
973
974 if (flags & MPOL_F_NODE) {
975 if (flags & MPOL_F_ADDR) {
3b9aadf7
AA
976 /*
977 * Take a refcount on the mpol, lookup_node()
baf2f90b 978 * will drop the mmap_lock, so after calling
3b9aadf7
AA
979 * lookup_node() only "pol" remains valid, "vma"
980 * is stale.
981 */
982 pol_refcount = pol;
983 vma = NULL;
984 mpol_get(pol);
985 err = lookup_node(mm, addr);
1da177e4
LT
986 if (err < 0)
987 goto out;
8bccd85f 988 *policy = err;
1da177e4 989 } else if (pol == current->mempolicy &&
45c4745a 990 pol->mode == MPOL_INTERLEAVE) {
269fbe72 991 *policy = next_node_in(current->il_prev, pol->nodes);
1da177e4
LT
992 } else {
993 err = -EINVAL;
994 goto out;
995 }
bea904d5
LS
996 } else {
997 *policy = pol == &default_policy ? MPOL_DEFAULT :
998 pol->mode;
d79df630
DR
999 /*
1000 * Internal mempolicy flags must be masked off before exposing
1001 * the policy to userspace.
1002 */
1003 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 1004 }
1da177e4 1005
1da177e4 1006 err = 0;
58568d2a 1007 if (nmask) {
c6b6ef8b
LS
1008 if (mpol_store_user_nodemask(pol)) {
1009 *nmask = pol->w.user_nodemask;
1010 } else {
1011 task_lock(current);
1012 get_policy_nodemask(pol, nmask);
1013 task_unlock(current);
1014 }
58568d2a 1015 }
1da177e4
LT
1016
1017 out:
52cd3b07 1018 mpol_cond_put(pol);
1da177e4 1019 if (vma)
d8ed45c5 1020 mmap_read_unlock(mm);
3b9aadf7
AA
1021 if (pol_refcount)
1022 mpol_put(pol_refcount);
1da177e4
LT
1023 return err;
1024}
1025
b20a3503 1026#ifdef CONFIG_MIGRATION
6ce3c4c0 1027/*
c8633798 1028 * page migration, thp tail pages can be passed.
6ce3c4c0 1029 */
a53190a4 1030static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 1031 unsigned long flags)
6ce3c4c0 1032{
c8633798 1033 struct page *head = compound_head(page);
6ce3c4c0 1034 /*
fc301289 1035 * Avoid migrating a page that is shared with others.
6ce3c4c0 1036 */
c8633798
NH
1037 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038 if (!isolate_lru_page(head)) {
1039 list_add_tail(&head->lru, pagelist);
1040 mod_node_page_state(page_pgdat(head),
9de4f22a 1041 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 1042 thp_nr_pages(head));
a53190a4
YS
1043 } else if (flags & MPOL_MF_STRICT) {
1044 /*
1045 * Non-movable page may reach here. And, there may be
1046 * temporary off LRU pages or non-LRU movable pages.
1047 * Treat them as unmovable pages since they can't be
1048 * isolated, so they can't be moved at the moment. It
1049 * should return -EIO for this case too.
1050 */
1051 return -EIO;
62695a84
NP
1052 }
1053 }
a53190a4
YS
1054
1055 return 0;
7e2ab150 1056}
6ce3c4c0 1057
7e2ab150
CL
1058/*
1059 * Migrate pages from one node to a target node.
1060 * Returns error or the number of pages not migrated.
1061 */
dbcb0f19
AB
1062static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063 int flags)
7e2ab150
CL
1064{
1065 nodemask_t nmask;
1066 LIST_HEAD(pagelist);
1067 int err = 0;
a0976311
JK
1068 struct migration_target_control mtc = {
1069 .nid = dest,
1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071 };
7e2ab150
CL
1072
1073 nodes_clear(nmask);
1074 node_set(source, nmask);
6ce3c4c0 1075
08270807
MK
1076 /*
1077 * This does not "check" the range but isolates all pages that
1078 * need migration. Between passing in the full user address
1079 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1080 */
1081 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1082 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1083 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1084
cf608ac1 1085 if (!list_empty(&pagelist)) {
a0976311 1086 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
5ac95884 1087 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
cf608ac1 1088 if (err)
e2d8cf40 1089 putback_movable_pages(&pagelist);
cf608ac1 1090 }
95a402c3 1091
7e2ab150 1092 return err;
6ce3c4c0
CL
1093}
1094
39743889 1095/*
7e2ab150
CL
1096 * Move pages between the two nodesets so as to preserve the physical
1097 * layout as much as possible.
39743889
CL
1098 *
1099 * Returns the number of page that could not be moved.
1100 */
0ce72d4f
AM
1101int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1102 const nodemask_t *to, int flags)
39743889 1103{
7e2ab150 1104 int busy = 0;
f555befd 1105 int err = 0;
7e2ab150 1106 nodemask_t tmp;
39743889 1107
361a2a22 1108 lru_cache_disable();
0aedadf9 1109
d8ed45c5 1110 mmap_read_lock(mm);
39743889 1111
da0aa138
KM
1112 /*
1113 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1114 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1115 * bit in 'tmp', and return that <source, dest> pair for migration.
1116 * The pair of nodemasks 'to' and 'from' define the map.
1117 *
1118 * If no pair of bits is found that way, fallback to picking some
1119 * pair of 'source' and 'dest' bits that are not the same. If the
1120 * 'source' and 'dest' bits are the same, this represents a node
1121 * that will be migrating to itself, so no pages need move.
1122 *
1123 * If no bits are left in 'tmp', or if all remaining bits left
1124 * in 'tmp' correspond to the same bit in 'to', return false
1125 * (nothing left to migrate).
1126 *
1127 * This lets us pick a pair of nodes to migrate between, such that
1128 * if possible the dest node is not already occupied by some other
1129 * source node, minimizing the risk of overloading the memory on a
1130 * node that would happen if we migrated incoming memory to a node
1131 * before migrating outgoing memory source that same node.
1132 *
1133 * A single scan of tmp is sufficient. As we go, we remember the
1134 * most recent <s, d> pair that moved (s != d). If we find a pair
1135 * that not only moved, but what's better, moved to an empty slot
1136 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1137 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1138 * most recent <s, d> pair that moved. If we get all the way through
1139 * the scan of tmp without finding any node that moved, much less
1140 * moved to an empty node, then there is nothing left worth migrating.
1141 */
d4984711 1142
0ce72d4f 1143 tmp = *from;
7e2ab150 1144 while (!nodes_empty(tmp)) {
68d68ff6 1145 int s, d;
b76ac7e7 1146 int source = NUMA_NO_NODE;
7e2ab150
CL
1147 int dest = 0;
1148
1149 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1150
1151 /*
1152 * do_migrate_pages() tries to maintain the relative
1153 * node relationship of the pages established between
1154 * threads and memory areas.
1155 *
1156 * However if the number of source nodes is not equal to
1157 * the number of destination nodes we can not preserve
1158 * this node relative relationship. In that case, skip
1159 * copying memory from a node that is in the destination
1160 * mask.
1161 *
1162 * Example: [2,3,4] -> [3,4,5] moves everything.
1163 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1164 */
1165
0ce72d4f
AM
1166 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1167 (node_isset(s, *to)))
4a5b18cc
LW
1168 continue;
1169
0ce72d4f 1170 d = node_remap(s, *from, *to);
7e2ab150
CL
1171 if (s == d)
1172 continue;
1173
1174 source = s; /* Node moved. Memorize */
1175 dest = d;
1176
1177 /* dest not in remaining from nodes? */
1178 if (!node_isset(dest, tmp))
1179 break;
1180 }
b76ac7e7 1181 if (source == NUMA_NO_NODE)
7e2ab150
CL
1182 break;
1183
1184 node_clear(source, tmp);
1185 err = migrate_to_node(mm, source, dest, flags);
1186 if (err > 0)
1187 busy += err;
1188 if (err < 0)
1189 break;
39743889 1190 }
d8ed45c5 1191 mmap_read_unlock(mm);
d479960e 1192
361a2a22 1193 lru_cache_enable();
7e2ab150
CL
1194 if (err < 0)
1195 return err;
1196 return busy;
b20a3503
CL
1197
1198}
1199
3ad33b24
LS
1200/*
1201 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1202 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1203 * Search forward from there, if not. N.B., this assumes that the
1204 * list of pages handed to migrate_pages()--which is how we get here--
1205 * is in virtual address order.
1206 */
666feb21 1207static struct page *new_page(struct page *page, unsigned long start)
95a402c3 1208{
d05f0cdc 1209 struct vm_area_struct *vma;
3f649ab7 1210 unsigned long address;
95a402c3 1211
d05f0cdc 1212 vma = find_vma(current->mm, start);
3ad33b24
LS
1213 while (vma) {
1214 address = page_address_in_vma(page, vma);
1215 if (address != -EFAULT)
1216 break;
1217 vma = vma->vm_next;
1218 }
11c731e8
WL
1219
1220 if (PageHuge(page)) {
389c8178
MH
1221 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1222 vma, address);
94723aaf 1223 } else if (PageTransHuge(page)) {
c8633798
NH
1224 struct page *thp;
1225
19deb769
DR
1226 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1227 HPAGE_PMD_ORDER);
c8633798
NH
1228 if (!thp)
1229 return NULL;
1230 prep_transhuge_page(thp);
1231 return thp;
11c731e8 1232 }
0bf598d8 1233 /*
11c731e8 1234 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1235 */
0f556856
MH
1236 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1237 vma, address);
95a402c3 1238}
b20a3503
CL
1239#else
1240
a53190a4 1241static int migrate_page_add(struct page *page, struct list_head *pagelist,
b20a3503
CL
1242 unsigned long flags)
1243{
a53190a4 1244 return -EIO;
39743889
CL
1245}
1246
0ce72d4f
AM
1247int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1248 const nodemask_t *to, int flags)
b20a3503
CL
1249{
1250 return -ENOSYS;
1251}
95a402c3 1252
666feb21 1253static struct page *new_page(struct page *page, unsigned long start)
95a402c3
CL
1254{
1255 return NULL;
1256}
b20a3503
CL
1257#endif
1258
dbcb0f19 1259static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1260 unsigned short mode, unsigned short mode_flags,
1261 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1262{
6ce3c4c0
CL
1263 struct mm_struct *mm = current->mm;
1264 struct mempolicy *new;
1265 unsigned long end;
1266 int err;
d8835445 1267 int ret;
6ce3c4c0
CL
1268 LIST_HEAD(pagelist);
1269
b24f53a0 1270 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1271 return -EINVAL;
74c00241 1272 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1273 return -EPERM;
1274
1275 if (start & ~PAGE_MASK)
1276 return -EINVAL;
1277
1278 if (mode == MPOL_DEFAULT)
1279 flags &= ~MPOL_MF_STRICT;
1280
1281 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1282 end = start + len;
1283
1284 if (end < start)
1285 return -EINVAL;
1286 if (end == start)
1287 return 0;
1288
028fec41 1289 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1290 if (IS_ERR(new))
1291 return PTR_ERR(new);
1292
b24f53a0
LS
1293 if (flags & MPOL_MF_LAZY)
1294 new->flags |= MPOL_F_MOF;
1295
6ce3c4c0
CL
1296 /*
1297 * If we are using the default policy then operation
1298 * on discontinuous address spaces is okay after all
1299 */
1300 if (!new)
1301 flags |= MPOL_MF_DISCONTIG_OK;
1302
028fec41
DR
1303 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1304 start, start + len, mode, mode_flags,
00ef2d2f 1305 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1306
0aedadf9
CL
1307 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1308
361a2a22 1309 lru_cache_disable();
0aedadf9 1310 }
4bfc4495
KH
1311 {
1312 NODEMASK_SCRATCH(scratch);
1313 if (scratch) {
d8ed45c5 1314 mmap_write_lock(mm);
4bfc4495 1315 err = mpol_set_nodemask(new, nmask, scratch);
4bfc4495 1316 if (err)
d8ed45c5 1317 mmap_write_unlock(mm);
4bfc4495
KH
1318 } else
1319 err = -ENOMEM;
1320 NODEMASK_SCRATCH_FREE(scratch);
1321 }
b05ca738
KM
1322 if (err)
1323 goto mpol_out;
1324
d8835445 1325 ret = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1326 flags | MPOL_MF_INVERT, &pagelist);
d8835445
YS
1327
1328 if (ret < 0) {
a85dfc30 1329 err = ret;
d8835445
YS
1330 goto up_out;
1331 }
1332
1333 err = mbind_range(mm, start, end, new);
7e2ab150 1334
b24f53a0
LS
1335 if (!err) {
1336 int nr_failed = 0;
1337
cf608ac1 1338 if (!list_empty(&pagelist)) {
b24f53a0 1339 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc 1340 nr_failed = migrate_pages(&pagelist, new_page, NULL,
5ac95884 1341 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
cf608ac1 1342 if (nr_failed)
74060e4d 1343 putback_movable_pages(&pagelist);
cf608ac1 1344 }
6ce3c4c0 1345
d8835445 1346 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
6ce3c4c0 1347 err = -EIO;
a85dfc30 1348 } else {
d8835445 1349up_out:
a85dfc30
YS
1350 if (!list_empty(&pagelist))
1351 putback_movable_pages(&pagelist);
1352 }
1353
d8ed45c5 1354 mmap_write_unlock(mm);
d8835445 1355mpol_out:
f0be3d32 1356 mpol_put(new);
d479960e 1357 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
361a2a22 1358 lru_cache_enable();
6ce3c4c0
CL
1359 return err;
1360}
1361
8bccd85f
CL
1362/*
1363 * User space interface with variable sized bitmaps for nodelists.
1364 */
1365
1366/* Copy a node mask from user space. */
39743889 1367static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1368 unsigned long maxnode)
1369{
1370 unsigned long k;
56521e7a 1371 unsigned long t;
8bccd85f
CL
1372 unsigned long nlongs;
1373 unsigned long endmask;
1374
1375 --maxnode;
1376 nodes_clear(*nodes);
1377 if (maxnode == 0 || !nmask)
1378 return 0;
a9c930ba 1379 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1380 return -EINVAL;
8bccd85f
CL
1381
1382 nlongs = BITS_TO_LONGS(maxnode);
1383 if ((maxnode % BITS_PER_LONG) == 0)
1384 endmask = ~0UL;
1385 else
1386 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1387
56521e7a
YX
1388 /*
1389 * When the user specified more nodes than supported just check
1390 * if the non supported part is all zero.
1391 *
1392 * If maxnode have more longs than MAX_NUMNODES, check
1393 * the bits in that area first. And then go through to
1394 * check the rest bits which equal or bigger than MAX_NUMNODES.
1395 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1396 */
8bccd85f 1397 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f 1398 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8bccd85f
CL
1399 if (get_user(t, nmask + k))
1400 return -EFAULT;
1401 if (k == nlongs - 1) {
1402 if (t & endmask)
1403 return -EINVAL;
1404 } else if (t)
1405 return -EINVAL;
1406 }
1407 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1408 endmask = ~0UL;
1409 }
1410
56521e7a
YX
1411 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1412 unsigned long valid_mask = endmask;
1413
1414 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1415 if (get_user(t, nmask + nlongs - 1))
1416 return -EFAULT;
1417 if (t & valid_mask)
1418 return -EINVAL;
1419 }
1420
8bccd85f
CL
1421 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1422 return -EFAULT;
1423 nodes_addr(*nodes)[nlongs-1] &= endmask;
1424 return 0;
1425}
1426
1427/* Copy a kernel node mask to user space */
1428static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1429 nodemask_t *nodes)
1430{
1431 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1432 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
8bccd85f
CL
1433
1434 if (copy > nbytes) {
1435 if (copy > PAGE_SIZE)
1436 return -EINVAL;
1437 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1438 return -EFAULT;
1439 copy = nbytes;
1440 }
1441 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1442}
1443
95837924
FT
1444/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1445static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1446{
1447 *flags = *mode & MPOL_MODE_FLAGS;
1448 *mode &= ~MPOL_MODE_FLAGS;
b27abacc 1449
a38a59fd 1450 if ((unsigned int)(*mode) >= MPOL_MAX)
95837924
FT
1451 return -EINVAL;
1452 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1453 return -EINVAL;
1454
1455 return 0;
1456}
1457
e7dc9ad6
DB
1458static long kernel_mbind(unsigned long start, unsigned long len,
1459 unsigned long mode, const unsigned long __user *nmask,
1460 unsigned long maxnode, unsigned int flags)
8bccd85f 1461{
95837924 1462 unsigned short mode_flags;
8bccd85f 1463 nodemask_t nodes;
95837924 1464 int lmode = mode;
8bccd85f
CL
1465 int err;
1466
057d3389 1467 start = untagged_addr(start);
95837924
FT
1468 err = sanitize_mpol_flags(&lmode, &mode_flags);
1469 if (err)
1470 return err;
1471
8bccd85f
CL
1472 err = get_nodes(&nodes, nmask, maxnode);
1473 if (err)
1474 return err;
95837924
FT
1475
1476 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
8bccd85f
CL
1477}
1478
e7dc9ad6
DB
1479SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1480 unsigned long, mode, const unsigned long __user *, nmask,
1481 unsigned long, maxnode, unsigned int, flags)
1482{
1483 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1484}
1485
8bccd85f 1486/* Set the process memory policy */
af03c4ac
DB
1487static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1488 unsigned long maxnode)
8bccd85f 1489{
95837924 1490 unsigned short mode_flags;
8bccd85f 1491 nodemask_t nodes;
95837924
FT
1492 int lmode = mode;
1493 int err;
1494
1495 err = sanitize_mpol_flags(&lmode, &mode_flags);
1496 if (err)
1497 return err;
8bccd85f 1498
8bccd85f
CL
1499 err = get_nodes(&nodes, nmask, maxnode);
1500 if (err)
1501 return err;
95837924
FT
1502
1503 return do_set_mempolicy(lmode, mode_flags, &nodes);
8bccd85f
CL
1504}
1505
af03c4ac
DB
1506SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1507 unsigned long, maxnode)
1508{
1509 return kernel_set_mempolicy(mode, nmask, maxnode);
1510}
1511
b6e9b0ba
DB
1512static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1513 const unsigned long __user *old_nodes,
1514 const unsigned long __user *new_nodes)
39743889 1515{
596d7cfa 1516 struct mm_struct *mm = NULL;
39743889 1517 struct task_struct *task;
39743889
CL
1518 nodemask_t task_nodes;
1519 int err;
596d7cfa
KM
1520 nodemask_t *old;
1521 nodemask_t *new;
1522 NODEMASK_SCRATCH(scratch);
1523
1524 if (!scratch)
1525 return -ENOMEM;
39743889 1526
596d7cfa
KM
1527 old = &scratch->mask1;
1528 new = &scratch->mask2;
1529
1530 err = get_nodes(old, old_nodes, maxnode);
39743889 1531 if (err)
596d7cfa 1532 goto out;
39743889 1533
596d7cfa 1534 err = get_nodes(new, new_nodes, maxnode);
39743889 1535 if (err)
596d7cfa 1536 goto out;
39743889
CL
1537
1538 /* Find the mm_struct */
55cfaa3c 1539 rcu_read_lock();
228ebcbe 1540 task = pid ? find_task_by_vpid(pid) : current;
39743889 1541 if (!task) {
55cfaa3c 1542 rcu_read_unlock();
596d7cfa
KM
1543 err = -ESRCH;
1544 goto out;
39743889 1545 }
3268c63e 1546 get_task_struct(task);
39743889 1547
596d7cfa 1548 err = -EINVAL;
39743889
CL
1549
1550 /*
31367466
OE
1551 * Check if this process has the right to modify the specified process.
1552 * Use the regular "ptrace_may_access()" checks.
39743889 1553 */
31367466 1554 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1555 rcu_read_unlock();
39743889 1556 err = -EPERM;
3268c63e 1557 goto out_put;
39743889 1558 }
c69e8d9c 1559 rcu_read_unlock();
39743889
CL
1560
1561 task_nodes = cpuset_mems_allowed(task);
1562 /* Is the user allowed to access the target nodes? */
596d7cfa 1563 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1564 err = -EPERM;
3268c63e 1565 goto out_put;
39743889
CL
1566 }
1567
0486a38b
YX
1568 task_nodes = cpuset_mems_allowed(current);
1569 nodes_and(*new, *new, task_nodes);
1570 if (nodes_empty(*new))
1571 goto out_put;
1572
86c3a764
DQ
1573 err = security_task_movememory(task);
1574 if (err)
3268c63e 1575 goto out_put;
86c3a764 1576
3268c63e
CL
1577 mm = get_task_mm(task);
1578 put_task_struct(task);
f2a9ef88
SL
1579
1580 if (!mm) {
3268c63e 1581 err = -EINVAL;
f2a9ef88
SL
1582 goto out;
1583 }
1584
1585 err = do_migrate_pages(mm, old, new,
1586 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1587
1588 mmput(mm);
1589out:
596d7cfa
KM
1590 NODEMASK_SCRATCH_FREE(scratch);
1591
39743889 1592 return err;
3268c63e
CL
1593
1594out_put:
1595 put_task_struct(task);
1596 goto out;
1597
39743889
CL
1598}
1599
b6e9b0ba
DB
1600SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1601 const unsigned long __user *, old_nodes,
1602 const unsigned long __user *, new_nodes)
1603{
1604 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1605}
1606
39743889 1607
8bccd85f 1608/* Retrieve NUMA policy */
af03c4ac
DB
1609static int kernel_get_mempolicy(int __user *policy,
1610 unsigned long __user *nmask,
1611 unsigned long maxnode,
1612 unsigned long addr,
1613 unsigned long flags)
8bccd85f 1614{
dbcb0f19 1615 int err;
3f649ab7 1616 int pval;
8bccd85f
CL
1617 nodemask_t nodes;
1618
050c17f2 1619 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1620 return -EINVAL;
1621
4605f057
WH
1622 addr = untagged_addr(addr);
1623
8bccd85f
CL
1624 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1625
1626 if (err)
1627 return err;
1628
1629 if (policy && put_user(pval, policy))
1630 return -EFAULT;
1631
1632 if (nmask)
1633 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1634
1635 return err;
1636}
1637
af03c4ac
DB
1638SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1639 unsigned long __user *, nmask, unsigned long, maxnode,
1640 unsigned long, addr, unsigned long, flags)
1641{
1642 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1643}
1644
1da177e4
LT
1645#ifdef CONFIG_COMPAT
1646
c93e0f6c
HC
1647COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1648 compat_ulong_t __user *, nmask,
1649 compat_ulong_t, maxnode,
1650 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1651{
1652 long err;
1653 unsigned long __user *nm = NULL;
1654 unsigned long nr_bits, alloc_size;
1655 DECLARE_BITMAP(bm, MAX_NUMNODES);
1656
050c17f2 1657 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1da177e4
LT
1658 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1659
1660 if (nmask)
1661 nm = compat_alloc_user_space(alloc_size);
1662
af03c4ac 1663 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1da177e4
LT
1664
1665 if (!err && nmask) {
2bbff6c7
KH
1666 unsigned long copy_size;
1667 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1668 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1669 /* ensure entire bitmap is zeroed */
1670 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1671 err |= compat_put_bitmap(nmask, bm, nr_bits);
1672 }
1673
1674 return err;
1675}
1676
c93e0f6c
HC
1677COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1678 compat_ulong_t, maxnode)
1da177e4 1679{
1da177e4
LT
1680 unsigned long __user *nm = NULL;
1681 unsigned long nr_bits, alloc_size;
1682 DECLARE_BITMAP(bm, MAX_NUMNODES);
1683
1684 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1685 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1686
1687 if (nmask) {
cf01fb99
CS
1688 if (compat_get_bitmap(bm, nmask, nr_bits))
1689 return -EFAULT;
1da177e4 1690 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1691 if (copy_to_user(nm, bm, alloc_size))
1692 return -EFAULT;
1da177e4
LT
1693 }
1694
af03c4ac 1695 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1da177e4
LT
1696}
1697
c93e0f6c
HC
1698COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1699 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1700 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1701{
1da177e4
LT
1702 unsigned long __user *nm = NULL;
1703 unsigned long nr_bits, alloc_size;
dfcd3c0d 1704 nodemask_t bm;
1da177e4
LT
1705
1706 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1707 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1708
1709 if (nmask) {
cf01fb99
CS
1710 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1711 return -EFAULT;
1da177e4 1712 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1713 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1714 return -EFAULT;
1da177e4
LT
1715 }
1716
e7dc9ad6 1717 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1da177e4
LT
1718}
1719
b6e9b0ba
DB
1720COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1721 compat_ulong_t, maxnode,
1722 const compat_ulong_t __user *, old_nodes,
1723 const compat_ulong_t __user *, new_nodes)
1724{
1725 unsigned long __user *old = NULL;
1726 unsigned long __user *new = NULL;
1727 nodemask_t tmp_mask;
1728 unsigned long nr_bits;
1729 unsigned long size;
1730
1731 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1732 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1733 if (old_nodes) {
1734 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1735 return -EFAULT;
1736 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1737 if (new_nodes)
1738 new = old + size / sizeof(unsigned long);
1739 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1740 return -EFAULT;
1741 }
1742 if (new_nodes) {
1743 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1744 return -EFAULT;
1745 if (new == NULL)
1746 new = compat_alloc_user_space(size);
1747 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1748 return -EFAULT;
1749 }
1750 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1751}
1752
1753#endif /* CONFIG_COMPAT */
1da177e4 1754
20ca87f2
LX
1755bool vma_migratable(struct vm_area_struct *vma)
1756{
1757 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1758 return false;
1759
1760 /*
1761 * DAX device mappings require predictable access latency, so avoid
1762 * incurring periodic faults.
1763 */
1764 if (vma_is_dax(vma))
1765 return false;
1766
1767 if (is_vm_hugetlb_page(vma) &&
1768 !hugepage_migration_supported(hstate_vma(vma)))
1769 return false;
1770
1771 /*
1772 * Migration allocates pages in the highest zone. If we cannot
1773 * do so then migration (at least from node to node) is not
1774 * possible.
1775 */
1776 if (vma->vm_file &&
1777 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1778 < policy_zone)
1779 return false;
1780 return true;
1781}
1782
74d2c3a0
ON
1783struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1784 unsigned long addr)
1da177e4 1785{
8d90274b 1786 struct mempolicy *pol = NULL;
1da177e4
LT
1787
1788 if (vma) {
480eccf9 1789 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1790 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1791 } else if (vma->vm_policy) {
1da177e4 1792 pol = vma->vm_policy;
00442ad0
MG
1793
1794 /*
1795 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1796 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1797 * count on these policies which will be dropped by
1798 * mpol_cond_put() later
1799 */
1800 if (mpol_needs_cond_ref(pol))
1801 mpol_get(pol);
1802 }
1da177e4 1803 }
f15ca78e 1804
74d2c3a0
ON
1805 return pol;
1806}
1807
1808/*
dd6eecb9 1809 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1810 * @vma: virtual memory area whose policy is sought
1811 * @addr: address in @vma for shared policy lookup
1812 *
1813 * Returns effective policy for a VMA at specified address.
dd6eecb9 1814 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1815 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1816 * count--added by the get_policy() vm_op, as appropriate--to protect against
1817 * freeing by another task. It is the caller's responsibility to free the
1818 * extra reference for shared policies.
1819 */
ac79f78d 1820static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
dd6eecb9 1821 unsigned long addr)
74d2c3a0
ON
1822{
1823 struct mempolicy *pol = __get_vma_policy(vma, addr);
1824
8d90274b 1825 if (!pol)
dd6eecb9 1826 pol = get_task_policy(current);
8d90274b 1827
1da177e4
LT
1828 return pol;
1829}
1830
6b6482bb 1831bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1832{
6b6482bb 1833 struct mempolicy *pol;
fc314724 1834
6b6482bb
ON
1835 if (vma->vm_ops && vma->vm_ops->get_policy) {
1836 bool ret = false;
fc314724 1837
6b6482bb
ON
1838 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1839 if (pol && (pol->flags & MPOL_F_MOF))
1840 ret = true;
1841 mpol_cond_put(pol);
8d90274b 1842
6b6482bb 1843 return ret;
fc314724
MG
1844 }
1845
6b6482bb 1846 pol = vma->vm_policy;
8d90274b 1847 if (!pol)
6b6482bb 1848 pol = get_task_policy(current);
8d90274b 1849
fc314724
MG
1850 return pol->flags & MPOL_F_MOF;
1851}
1852
d3eb1570
LJ
1853static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1854{
1855 enum zone_type dynamic_policy_zone = policy_zone;
1856
1857 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1858
1859 /*
269fbe72 1860 * if policy->nodes has movable memory only,
d3eb1570
LJ
1861 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1862 *
269fbe72 1863 * policy->nodes is intersect with node_states[N_MEMORY].
f0953a1b 1864 * so if the following test fails, it implies
269fbe72 1865 * policy->nodes has movable memory only.
d3eb1570 1866 */
269fbe72 1867 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
d3eb1570
LJ
1868 dynamic_policy_zone = ZONE_MOVABLE;
1869
1870 return zone >= dynamic_policy_zone;
1871}
1872
52cd3b07
LS
1873/*
1874 * Return a nodemask representing a mempolicy for filtering nodes for
1875 * page allocation
1876 */
8ca39e68 1877nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32 1878{
b27abacc
DH
1879 int mode = policy->mode;
1880
19770b32 1881 /* Lower zones don't get a nodemask applied for MPOL_BIND */
b27abacc
DH
1882 if (unlikely(mode == MPOL_BIND) &&
1883 apply_policy_zone(policy, gfp_zone(gfp)) &&
1884 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1885 return &policy->nodes;
1886
1887 if (mode == MPOL_PREFERRED_MANY)
269fbe72 1888 return &policy->nodes;
19770b32
MG
1889
1890 return NULL;
1891}
1892
b27abacc
DH
1893/*
1894 * Return the preferred node id for 'prefer' mempolicy, and return
1895 * the given id for all other policies.
1896 *
1897 * policy_node() is always coupled with policy_nodemask(), which
1898 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1899 */
f8fd5253 1900static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1da177e4 1901{
7858d7bc 1902 if (policy->mode == MPOL_PREFERRED) {
269fbe72 1903 nd = first_node(policy->nodes);
7858d7bc 1904 } else {
19770b32 1905 /*
6d840958
MH
1906 * __GFP_THISNODE shouldn't even be used with the bind policy
1907 * because we might easily break the expectation to stay on the
1908 * requested node and not break the policy.
19770b32 1909 */
6d840958 1910 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1911 }
6d840958 1912
04ec6264 1913 return nd;
1da177e4
LT
1914}
1915
1916/* Do dynamic interleaving for a process */
1917static unsigned interleave_nodes(struct mempolicy *policy)
1918{
45816682 1919 unsigned next;
1da177e4
LT
1920 struct task_struct *me = current;
1921
269fbe72 1922 next = next_node_in(me->il_prev, policy->nodes);
f5b087b5 1923 if (next < MAX_NUMNODES)
45816682
VB
1924 me->il_prev = next;
1925 return next;
1da177e4
LT
1926}
1927
dc85da15
CL
1928/*
1929 * Depending on the memory policy provide a node from which to allocate the
1930 * next slab entry.
1931 */
2a389610 1932unsigned int mempolicy_slab_node(void)
dc85da15 1933{
e7b691b0 1934 struct mempolicy *policy;
2a389610 1935 int node = numa_mem_id();
e7b691b0
AK
1936
1937 if (in_interrupt())
2a389610 1938 return node;
e7b691b0
AK
1939
1940 policy = current->mempolicy;
7858d7bc 1941 if (!policy)
2a389610 1942 return node;
bea904d5
LS
1943
1944 switch (policy->mode) {
1945 case MPOL_PREFERRED:
269fbe72 1946 return first_node(policy->nodes);
765c4507 1947
dc85da15
CL
1948 case MPOL_INTERLEAVE:
1949 return interleave_nodes(policy);
1950
b27abacc
DH
1951 case MPOL_BIND:
1952 case MPOL_PREFERRED_MANY:
1953 {
c33d6c06
MG
1954 struct zoneref *z;
1955
dc85da15
CL
1956 /*
1957 * Follow bind policy behavior and start allocation at the
1958 * first node.
1959 */
19770b32 1960 struct zonelist *zonelist;
19770b32 1961 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1962 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06 1963 z = first_zones_zonelist(zonelist, highest_zoneidx,
269fbe72 1964 &policy->nodes);
c1093b74 1965 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1966 }
7858d7bc
FT
1967 case MPOL_LOCAL:
1968 return node;
dc85da15 1969
dc85da15 1970 default:
bea904d5 1971 BUG();
dc85da15
CL
1972 }
1973}
1974
fee83b3a
AM
1975/*
1976 * Do static interleaving for a VMA with known offset @n. Returns the n'th
269fbe72 1977 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
fee83b3a
AM
1978 * number of present nodes.
1979 */
98c70baa 1980static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1981{
269fbe72 1982 unsigned nnodes = nodes_weight(pol->nodes);
f5b087b5 1983 unsigned target;
fee83b3a
AM
1984 int i;
1985 int nid;
1da177e4 1986
f5b087b5
DR
1987 if (!nnodes)
1988 return numa_node_id();
fee83b3a 1989 target = (unsigned int)n % nnodes;
269fbe72 1990 nid = first_node(pol->nodes);
fee83b3a 1991 for (i = 0; i < target; i++)
269fbe72 1992 nid = next_node(nid, pol->nodes);
1da177e4
LT
1993 return nid;
1994}
1995
5da7ca86
CL
1996/* Determine a node number for interleave */
1997static inline unsigned interleave_nid(struct mempolicy *pol,
1998 struct vm_area_struct *vma, unsigned long addr, int shift)
1999{
2000 if (vma) {
2001 unsigned long off;
2002
3b98b087
NA
2003 /*
2004 * for small pages, there is no difference between
2005 * shift and PAGE_SHIFT, so the bit-shift is safe.
2006 * for huge pages, since vm_pgoff is in units of small
2007 * pages, we need to shift off the always 0 bits to get
2008 * a useful offset.
2009 */
2010 BUG_ON(shift < PAGE_SHIFT);
2011 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 2012 off += (addr - vma->vm_start) >> shift;
98c70baa 2013 return offset_il_node(pol, off);
5da7ca86
CL
2014 } else
2015 return interleave_nodes(pol);
2016}
2017
00ac59ad 2018#ifdef CONFIG_HUGETLBFS
480eccf9 2019/*
04ec6264 2020 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2021 * @vma: virtual memory area whose policy is sought
2022 * @addr: address in @vma for shared policy lookup and interleave policy
2023 * @gfp_flags: for requested zone
2024 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
b27abacc 2025 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
480eccf9 2026 *
04ec6264 2027 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07 2028 * to the struct mempolicy for conditional unref after allocation.
b27abacc
DH
2029 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2030 * to the mempolicy's @nodemask for filtering the zonelist.
c0ff7453 2031 *
d26914d1 2032 * Must be protected by read_mems_allowed_begin()
480eccf9 2033 */
04ec6264
VB
2034int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2035 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2036{
04ec6264 2037 int nid;
b27abacc 2038 int mode;
5da7ca86 2039
dd6eecb9 2040 *mpol = get_vma_policy(vma, addr);
b27abacc
DH
2041 *nodemask = NULL;
2042 mode = (*mpol)->mode;
5da7ca86 2043
b27abacc 2044 if (unlikely(mode == MPOL_INTERLEAVE)) {
04ec6264
VB
2045 nid = interleave_nid(*mpol, vma, addr,
2046 huge_page_shift(hstate_vma(vma)));
52cd3b07 2047 } else {
04ec6264 2048 nid = policy_node(gfp_flags, *mpol, numa_node_id());
b27abacc 2049 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
269fbe72 2050 *nodemask = &(*mpol)->nodes;
480eccf9 2051 }
04ec6264 2052 return nid;
5da7ca86 2053}
06808b08
LS
2054
2055/*
2056 * init_nodemask_of_mempolicy
2057 *
2058 * If the current task's mempolicy is "default" [NULL], return 'false'
2059 * to indicate default policy. Otherwise, extract the policy nodemask
2060 * for 'bind' or 'interleave' policy into the argument nodemask, or
2061 * initialize the argument nodemask to contain the single node for
2062 * 'preferred' or 'local' policy and return 'true' to indicate presence
2063 * of non-default mempolicy.
2064 *
2065 * We don't bother with reference counting the mempolicy [mpol_get/put]
2066 * because the current task is examining it's own mempolicy and a task's
2067 * mempolicy is only ever changed by the task itself.
2068 *
2069 * N.B., it is the caller's responsibility to free a returned nodemask.
2070 */
2071bool init_nodemask_of_mempolicy(nodemask_t *mask)
2072{
2073 struct mempolicy *mempolicy;
06808b08
LS
2074
2075 if (!(mask && current->mempolicy))
2076 return false;
2077
c0ff7453 2078 task_lock(current);
06808b08
LS
2079 mempolicy = current->mempolicy;
2080 switch (mempolicy->mode) {
2081 case MPOL_PREFERRED:
b27abacc 2082 case MPOL_PREFERRED_MANY:
06808b08 2083 case MPOL_BIND:
06808b08 2084 case MPOL_INTERLEAVE:
269fbe72 2085 *mask = mempolicy->nodes;
7858d7bc
FT
2086 break;
2087
2088 case MPOL_LOCAL:
269fbe72 2089 init_nodemask_of_node(mask, numa_node_id());
06808b08
LS
2090 break;
2091
2092 default:
2093 BUG();
2094 }
c0ff7453 2095 task_unlock(current);
06808b08
LS
2096
2097 return true;
2098}
00ac59ad 2099#endif
5da7ca86 2100
6f48d0eb 2101/*
b26e517a 2102 * mempolicy_in_oom_domain
6f48d0eb 2103 *
b26e517a
FT
2104 * If tsk's mempolicy is "bind", check for intersection between mask and
2105 * the policy nodemask. Otherwise, return true for all other policies
2106 * including "interleave", as a tsk with "interleave" policy may have
2107 * memory allocated from all nodes in system.
6f48d0eb
DR
2108 *
2109 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2110 */
b26e517a 2111bool mempolicy_in_oom_domain(struct task_struct *tsk,
6f48d0eb
DR
2112 const nodemask_t *mask)
2113{
2114 struct mempolicy *mempolicy;
2115 bool ret = true;
2116
2117 if (!mask)
2118 return ret;
b26e517a 2119
6f48d0eb
DR
2120 task_lock(tsk);
2121 mempolicy = tsk->mempolicy;
b26e517a 2122 if (mempolicy && mempolicy->mode == MPOL_BIND)
269fbe72 2123 ret = nodes_intersects(mempolicy->nodes, *mask);
6f48d0eb 2124 task_unlock(tsk);
b26e517a 2125
6f48d0eb
DR
2126 return ret;
2127}
2128
1da177e4
LT
2129/* Allocate a page in interleaved policy.
2130 Own path because it needs to do special accounting. */
662f3a0b
AK
2131static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2132 unsigned nid)
1da177e4 2133{
1da177e4
LT
2134 struct page *page;
2135
84172f4b 2136 page = __alloc_pages(gfp, order, nid, NULL);
4518085e
KW
2137 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2138 if (!static_branch_likely(&vm_numa_stat_key))
2139 return page;
de55c8b2
AR
2140 if (page && page_to_nid(page) == nid) {
2141 preempt_disable();
f19298b9 2142 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
de55c8b2
AR
2143 preempt_enable();
2144 }
1da177e4
LT
2145 return page;
2146}
2147
4c54d949
FT
2148static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2149 int nid, struct mempolicy *pol)
2150{
2151 struct page *page;
2152 gfp_t preferred_gfp;
2153
2154 /*
2155 * This is a two pass approach. The first pass will only try the
2156 * preferred nodes but skip the direct reclaim and allow the
2157 * allocation to fail, while the second pass will try all the
2158 * nodes in system.
2159 */
2160 preferred_gfp = gfp | __GFP_NOWARN;
2161 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2162 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2163 if (!page)
2164 page = __alloc_pages(gfp, order, numa_node_id(), NULL);
2165
2166 return page;
2167}
2168
1da177e4 2169/**
eb350739
MWO
2170 * alloc_pages_vma - Allocate a page for a VMA.
2171 * @gfp: GFP flags.
2172 * @order: Order of the GFP allocation.
2173 * @vma: Pointer to VMA or NULL if not available.
2174 * @addr: Virtual address of the allocation. Must be inside @vma.
2175 * @node: Which node to prefer for allocation (modulo policy).
2176 * @hugepage: For hugepages try only the preferred node if possible.
1da177e4 2177 *
eb350739
MWO
2178 * Allocate a page for a specific address in @vma, using the appropriate
2179 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2180 * of the mm_struct of the VMA to prevent it from going away. Should be
2181 * used for all allocations for pages that will be mapped into user space.
1da177e4 2182 *
eb350739 2183 * Return: The page on success or NULL if allocation fails.
1da177e4 2184 */
eb350739 2185struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19deb769 2186 unsigned long addr, int node, bool hugepage)
1da177e4 2187{
cc9a6c87 2188 struct mempolicy *pol;
c0ff7453 2189 struct page *page;
04ec6264 2190 int preferred_nid;
be97a41b 2191 nodemask_t *nmask;
cc9a6c87 2192
dd6eecb9 2193 pol = get_vma_policy(vma, addr);
1da177e4 2194
0867a57c
VB
2195 if (pol->mode == MPOL_INTERLEAVE) {
2196 unsigned nid;
2197
2198 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2199 mpol_cond_put(pol);
2200 page = alloc_page_interleave(gfp, order, nid);
2201 goto out;
19deb769
DR
2202 }
2203
4c54d949
FT
2204 if (pol->mode == MPOL_PREFERRED_MANY) {
2205 page = alloc_pages_preferred_many(gfp, order, node, pol);
2206 mpol_cond_put(pol);
2207 goto out;
2208 }
2209
19deb769
DR
2210 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2211 int hpage_node = node;
2212
2213 /*
2214 * For hugepage allocation and non-interleave policy which
2215 * allows the current node (or other explicitly preferred
2216 * node) we only try to allocate from the current/preferred
2217 * node and don't fall back to other nodes, as the cost of
2218 * remote accesses would likely offset THP benefits.
2219 *
b27abacc 2220 * If the policy is interleave or does not allow the current
19deb769
DR
2221 * node in its nodemask, we allocate the standard way.
2222 */
7858d7bc 2223 if (pol->mode == MPOL_PREFERRED)
269fbe72 2224 hpage_node = first_node(pol->nodes);
19deb769
DR
2225
2226 nmask = policy_nodemask(gfp, pol);
2227 if (!nmask || node_isset(hpage_node, *nmask)) {
2228 mpol_cond_put(pol);
cc638f32
VB
2229 /*
2230 * First, try to allocate THP only on local node, but
2231 * don't reclaim unnecessarily, just compact.
2232 */
19deb769 2233 page = __alloc_pages_node(hpage_node,
cc638f32 2234 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
76e654cc
DR
2235
2236 /*
2237 * If hugepage allocations are configured to always
2238 * synchronous compact or the vma has been madvised
2239 * to prefer hugepage backing, retry allowing remote
cc638f32 2240 * memory with both reclaim and compact as well.
76e654cc
DR
2241 */
2242 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2243 page = __alloc_pages_node(hpage_node,
cc638f32 2244 gfp, order);
76e654cc 2245
19deb769
DR
2246 goto out;
2247 }
356ff8a9
DR
2248 }
2249
be97a41b 2250 nmask = policy_nodemask(gfp, pol);
04ec6264 2251 preferred_nid = policy_node(gfp, pol, node);
84172f4b 2252 page = __alloc_pages(gfp, order, preferred_nid, nmask);
d51e9894 2253 mpol_cond_put(pol);
be97a41b 2254out:
c0ff7453 2255 return page;
1da177e4 2256}
69262215 2257EXPORT_SYMBOL(alloc_pages_vma);
1da177e4
LT
2258
2259/**
6421ec76
MWO
2260 * alloc_pages - Allocate pages.
2261 * @gfp: GFP flags.
2262 * @order: Power of two of number of pages to allocate.
1da177e4 2263 *
6421ec76
MWO
2264 * Allocate 1 << @order contiguous pages. The physical address of the
2265 * first page is naturally aligned (eg an order-3 allocation will be aligned
2266 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2267 * process is honoured when in process context.
1da177e4 2268 *
6421ec76
MWO
2269 * Context: Can be called from any context, providing the appropriate GFP
2270 * flags are used.
2271 * Return: The page on success or NULL if allocation fails.
1da177e4 2272 */
d7f946d0 2273struct page *alloc_pages(gfp_t gfp, unsigned order)
1da177e4 2274{
8d90274b 2275 struct mempolicy *pol = &default_policy;
c0ff7453 2276 struct page *page;
1da177e4 2277
8d90274b
ON
2278 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2279 pol = get_task_policy(current);
52cd3b07
LS
2280
2281 /*
2282 * No reference counting needed for current->mempolicy
2283 * nor system default_policy
2284 */
45c4745a 2285 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453 2286 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
4c54d949
FT
2287 else if (pol->mode == MPOL_PREFERRED_MANY)
2288 page = alloc_pages_preferred_many(gfp, order,
2289 numa_node_id(), pol);
c0ff7453 2290 else
84172f4b 2291 page = __alloc_pages(gfp, order,
04ec6264 2292 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2293 policy_nodemask(gfp, pol));
cc9a6c87 2294
c0ff7453 2295 return page;
1da177e4 2296}
d7f946d0 2297EXPORT_SYMBOL(alloc_pages);
1da177e4 2298
ef0855d3
ON
2299int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2300{
2301 struct mempolicy *pol = mpol_dup(vma_policy(src));
2302
2303 if (IS_ERR(pol))
2304 return PTR_ERR(pol);
2305 dst->vm_policy = pol;
2306 return 0;
2307}
2308
4225399a 2309/*
846a16bf 2310 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2311 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2312 * with the mems_allowed returned by cpuset_mems_allowed(). This
2313 * keeps mempolicies cpuset relative after its cpuset moves. See
2314 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2315 *
2316 * current's mempolicy may be rebinded by the other task(the task that changes
2317 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2318 */
4225399a 2319
846a16bf
LS
2320/* Slow path of a mempolicy duplicate */
2321struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2322{
2323 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2324
2325 if (!new)
2326 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2327
2328 /* task's mempolicy is protected by alloc_lock */
2329 if (old == current->mempolicy) {
2330 task_lock(current);
2331 *new = *old;
2332 task_unlock(current);
2333 } else
2334 *new = *old;
2335
4225399a
PJ
2336 if (current_cpuset_is_being_rebound()) {
2337 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2338 mpol_rebind_policy(new, &mems);
4225399a 2339 }
1da177e4 2340 atomic_set(&new->refcnt, 1);
1da177e4
LT
2341 return new;
2342}
2343
2344/* Slow path of a mempolicy comparison */
fcfb4dcc 2345bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2346{
2347 if (!a || !b)
fcfb4dcc 2348 return false;
45c4745a 2349 if (a->mode != b->mode)
fcfb4dcc 2350 return false;
19800502 2351 if (a->flags != b->flags)
fcfb4dcc 2352 return false;
19800502
BL
2353 if (mpol_store_user_nodemask(a))
2354 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2355 return false;
19800502 2356
45c4745a 2357 switch (a->mode) {
19770b32 2358 case MPOL_BIND:
1da177e4 2359 case MPOL_INTERLEAVE:
1da177e4 2360 case MPOL_PREFERRED:
b27abacc 2361 case MPOL_PREFERRED_MANY:
269fbe72 2362 return !!nodes_equal(a->nodes, b->nodes);
7858d7bc
FT
2363 case MPOL_LOCAL:
2364 return true;
1da177e4
LT
2365 default:
2366 BUG();
fcfb4dcc 2367 return false;
1da177e4
LT
2368 }
2369}
2370
1da177e4
LT
2371/*
2372 * Shared memory backing store policy support.
2373 *
2374 * Remember policies even when nobody has shared memory mapped.
2375 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2376 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2377 * for any accesses to the tree.
2378 */
2379
4a8c7bb5
NZ
2380/*
2381 * lookup first element intersecting start-end. Caller holds sp->lock for
2382 * reading or for writing
2383 */
1da177e4
LT
2384static struct sp_node *
2385sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2386{
2387 struct rb_node *n = sp->root.rb_node;
2388
2389 while (n) {
2390 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2391
2392 if (start >= p->end)
2393 n = n->rb_right;
2394 else if (end <= p->start)
2395 n = n->rb_left;
2396 else
2397 break;
2398 }
2399 if (!n)
2400 return NULL;
2401 for (;;) {
2402 struct sp_node *w = NULL;
2403 struct rb_node *prev = rb_prev(n);
2404 if (!prev)
2405 break;
2406 w = rb_entry(prev, struct sp_node, nd);
2407 if (w->end <= start)
2408 break;
2409 n = prev;
2410 }
2411 return rb_entry(n, struct sp_node, nd);
2412}
2413
4a8c7bb5
NZ
2414/*
2415 * Insert a new shared policy into the list. Caller holds sp->lock for
2416 * writing.
2417 */
1da177e4
LT
2418static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2419{
2420 struct rb_node **p = &sp->root.rb_node;
2421 struct rb_node *parent = NULL;
2422 struct sp_node *nd;
2423
2424 while (*p) {
2425 parent = *p;
2426 nd = rb_entry(parent, struct sp_node, nd);
2427 if (new->start < nd->start)
2428 p = &(*p)->rb_left;
2429 else if (new->end > nd->end)
2430 p = &(*p)->rb_right;
2431 else
2432 BUG();
2433 }
2434 rb_link_node(&new->nd, parent, p);
2435 rb_insert_color(&new->nd, &sp->root);
140d5a49 2436 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2437 new->policy ? new->policy->mode : 0);
1da177e4
LT
2438}
2439
2440/* Find shared policy intersecting idx */
2441struct mempolicy *
2442mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2443{
2444 struct mempolicy *pol = NULL;
2445 struct sp_node *sn;
2446
2447 if (!sp->root.rb_node)
2448 return NULL;
4a8c7bb5 2449 read_lock(&sp->lock);
1da177e4
LT
2450 sn = sp_lookup(sp, idx, idx+1);
2451 if (sn) {
2452 mpol_get(sn->policy);
2453 pol = sn->policy;
2454 }
4a8c7bb5 2455 read_unlock(&sp->lock);
1da177e4
LT
2456 return pol;
2457}
2458
63f74ca2
KM
2459static void sp_free(struct sp_node *n)
2460{
2461 mpol_put(n->policy);
2462 kmem_cache_free(sn_cache, n);
2463}
2464
771fb4d8
LS
2465/**
2466 * mpol_misplaced - check whether current page node is valid in policy
2467 *
b46e14ac
FF
2468 * @page: page to be checked
2469 * @vma: vm area where page mapped
2470 * @addr: virtual address where page mapped
771fb4d8
LS
2471 *
2472 * Lookup current policy node id for vma,addr and "compare to" page's
5f076944 2473 * node id. Policy determination "mimics" alloc_page_vma().
771fb4d8 2474 * Called from fault path where we know the vma and faulting address.
5f076944 2475 *
062db293
BW
2476 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2477 * policy, or a suitable node ID to allocate a replacement page from.
771fb4d8
LS
2478 */
2479int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2480{
2481 struct mempolicy *pol;
c33d6c06 2482 struct zoneref *z;
771fb4d8
LS
2483 int curnid = page_to_nid(page);
2484 unsigned long pgoff;
90572890
PZ
2485 int thiscpu = raw_smp_processor_id();
2486 int thisnid = cpu_to_node(thiscpu);
98fa15f3 2487 int polnid = NUMA_NO_NODE;
062db293 2488 int ret = NUMA_NO_NODE;
771fb4d8 2489
dd6eecb9 2490 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2491 if (!(pol->flags & MPOL_F_MOF))
2492 goto out;
2493
2494 switch (pol->mode) {
2495 case MPOL_INTERLEAVE:
771fb4d8
LS
2496 pgoff = vma->vm_pgoff;
2497 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2498 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2499 break;
2500
2501 case MPOL_PREFERRED:
b27abacc
DH
2502 if (node_isset(curnid, pol->nodes))
2503 goto out;
269fbe72 2504 polnid = first_node(pol->nodes);
7858d7bc
FT
2505 break;
2506
2507 case MPOL_LOCAL:
2508 polnid = numa_node_id();
771fb4d8
LS
2509 break;
2510
2511 case MPOL_BIND:
bda420b9
HY
2512 /* Optimize placement among multiple nodes via NUMA balancing */
2513 if (pol->flags & MPOL_F_MORON) {
269fbe72 2514 if (node_isset(thisnid, pol->nodes))
bda420b9
HY
2515 break;
2516 goto out;
2517 }
b27abacc 2518 fallthrough;
c33d6c06 2519
b27abacc 2520 case MPOL_PREFERRED_MANY:
771fb4d8 2521 /*
771fb4d8
LS
2522 * use current page if in policy nodemask,
2523 * else select nearest allowed node, if any.
2524 * If no allowed nodes, use current [!misplaced].
2525 */
269fbe72 2526 if (node_isset(curnid, pol->nodes))
771fb4d8 2527 goto out;
c33d6c06 2528 z = first_zones_zonelist(
771fb4d8
LS
2529 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2530 gfp_zone(GFP_HIGHUSER),
269fbe72 2531 &pol->nodes);
c1093b74 2532 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2533 break;
2534
2535 default:
2536 BUG();
2537 }
5606e387
MG
2538
2539 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2540 if (pol->flags & MPOL_F_MORON) {
90572890 2541 polnid = thisnid;
5606e387 2542
10f39042 2543 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2544 goto out;
e42c8ff2
MG
2545 }
2546
771fb4d8
LS
2547 if (curnid != polnid)
2548 ret = polnid;
2549out:
2550 mpol_cond_put(pol);
2551
2552 return ret;
2553}
2554
c11600e4
DR
2555/*
2556 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2557 * dropped after task->mempolicy is set to NULL so that any allocation done as
2558 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2559 * policy.
2560 */
2561void mpol_put_task_policy(struct task_struct *task)
2562{
2563 struct mempolicy *pol;
2564
2565 task_lock(task);
2566 pol = task->mempolicy;
2567 task->mempolicy = NULL;
2568 task_unlock(task);
2569 mpol_put(pol);
2570}
2571
1da177e4
LT
2572static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2573{
140d5a49 2574 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2575 rb_erase(&n->nd, &sp->root);
63f74ca2 2576 sp_free(n);
1da177e4
LT
2577}
2578
42288fe3
MG
2579static void sp_node_init(struct sp_node *node, unsigned long start,
2580 unsigned long end, struct mempolicy *pol)
2581{
2582 node->start = start;
2583 node->end = end;
2584 node->policy = pol;
2585}
2586
dbcb0f19
AB
2587static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2588 struct mempolicy *pol)
1da177e4 2589{
869833f2
KM
2590 struct sp_node *n;
2591 struct mempolicy *newpol;
1da177e4 2592
869833f2 2593 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2594 if (!n)
2595 return NULL;
869833f2
KM
2596
2597 newpol = mpol_dup(pol);
2598 if (IS_ERR(newpol)) {
2599 kmem_cache_free(sn_cache, n);
2600 return NULL;
2601 }
2602 newpol->flags |= MPOL_F_SHARED;
42288fe3 2603 sp_node_init(n, start, end, newpol);
869833f2 2604
1da177e4
LT
2605 return n;
2606}
2607
2608/* Replace a policy range. */
2609static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2610 unsigned long end, struct sp_node *new)
2611{
b22d127a 2612 struct sp_node *n;
42288fe3
MG
2613 struct sp_node *n_new = NULL;
2614 struct mempolicy *mpol_new = NULL;
b22d127a 2615 int ret = 0;
1da177e4 2616
42288fe3 2617restart:
4a8c7bb5 2618 write_lock(&sp->lock);
1da177e4
LT
2619 n = sp_lookup(sp, start, end);
2620 /* Take care of old policies in the same range. */
2621 while (n && n->start < end) {
2622 struct rb_node *next = rb_next(&n->nd);
2623 if (n->start >= start) {
2624 if (n->end <= end)
2625 sp_delete(sp, n);
2626 else
2627 n->start = end;
2628 } else {
2629 /* Old policy spanning whole new range. */
2630 if (n->end > end) {
42288fe3
MG
2631 if (!n_new)
2632 goto alloc_new;
2633
2634 *mpol_new = *n->policy;
2635 atomic_set(&mpol_new->refcnt, 1);
7880639c 2636 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2637 n->end = start;
5ca39575 2638 sp_insert(sp, n_new);
42288fe3
MG
2639 n_new = NULL;
2640 mpol_new = NULL;
1da177e4
LT
2641 break;
2642 } else
2643 n->end = start;
2644 }
2645 if (!next)
2646 break;
2647 n = rb_entry(next, struct sp_node, nd);
2648 }
2649 if (new)
2650 sp_insert(sp, new);
4a8c7bb5 2651 write_unlock(&sp->lock);
42288fe3
MG
2652 ret = 0;
2653
2654err_out:
2655 if (mpol_new)
2656 mpol_put(mpol_new);
2657 if (n_new)
2658 kmem_cache_free(sn_cache, n_new);
2659
b22d127a 2660 return ret;
42288fe3
MG
2661
2662alloc_new:
4a8c7bb5 2663 write_unlock(&sp->lock);
42288fe3
MG
2664 ret = -ENOMEM;
2665 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2666 if (!n_new)
2667 goto err_out;
2668 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2669 if (!mpol_new)
2670 goto err_out;
2671 goto restart;
1da177e4
LT
2672}
2673
71fe804b
LS
2674/**
2675 * mpol_shared_policy_init - initialize shared policy for inode
2676 * @sp: pointer to inode shared policy
2677 * @mpol: struct mempolicy to install
2678 *
2679 * Install non-NULL @mpol in inode's shared policy rb-tree.
2680 * On entry, the current task has a reference on a non-NULL @mpol.
2681 * This must be released on exit.
4bfc4495 2682 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2683 */
2684void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2685{
58568d2a
MX
2686 int ret;
2687
71fe804b 2688 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2689 rwlock_init(&sp->lock);
71fe804b
LS
2690
2691 if (mpol) {
2692 struct vm_area_struct pvma;
2693 struct mempolicy *new;
4bfc4495 2694 NODEMASK_SCRATCH(scratch);
71fe804b 2695
4bfc4495 2696 if (!scratch)
5c0c1654 2697 goto put_mpol;
71fe804b
LS
2698 /* contextualize the tmpfs mount point mempolicy */
2699 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2700 if (IS_ERR(new))
0cae3457 2701 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2702
2703 task_lock(current);
4bfc4495 2704 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2705 task_unlock(current);
15d77835 2706 if (ret)
5c0c1654 2707 goto put_new;
71fe804b
LS
2708
2709 /* Create pseudo-vma that contains just the policy */
2c4541e2 2710 vma_init(&pvma, NULL);
71fe804b
LS
2711 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2712 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2713
5c0c1654 2714put_new:
71fe804b 2715 mpol_put(new); /* drop initial ref */
0cae3457 2716free_scratch:
4bfc4495 2717 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2718put_mpol:
2719 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2720 }
2721}
2722
1da177e4
LT
2723int mpol_set_shared_policy(struct shared_policy *info,
2724 struct vm_area_struct *vma, struct mempolicy *npol)
2725{
2726 int err;
2727 struct sp_node *new = NULL;
2728 unsigned long sz = vma_pages(vma);
2729
028fec41 2730 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2731 vma->vm_pgoff,
45c4745a 2732 sz, npol ? npol->mode : -1,
028fec41 2733 npol ? npol->flags : -1,
269fbe72 2734 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2735
2736 if (npol) {
2737 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2738 if (!new)
2739 return -ENOMEM;
2740 }
2741 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2742 if (err && new)
63f74ca2 2743 sp_free(new);
1da177e4
LT
2744 return err;
2745}
2746
2747/* Free a backing policy store on inode delete. */
2748void mpol_free_shared_policy(struct shared_policy *p)
2749{
2750 struct sp_node *n;
2751 struct rb_node *next;
2752
2753 if (!p->root.rb_node)
2754 return;
4a8c7bb5 2755 write_lock(&p->lock);
1da177e4
LT
2756 next = rb_first(&p->root);
2757 while (next) {
2758 n = rb_entry(next, struct sp_node, nd);
2759 next = rb_next(&n->nd);
63f74ca2 2760 sp_delete(p, n);
1da177e4 2761 }
4a8c7bb5 2762 write_unlock(&p->lock);
1da177e4
LT
2763}
2764
1a687c2e 2765#ifdef CONFIG_NUMA_BALANCING
c297663c 2766static int __initdata numabalancing_override;
1a687c2e
MG
2767
2768static void __init check_numabalancing_enable(void)
2769{
2770 bool numabalancing_default = false;
2771
2772 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2773 numabalancing_default = true;
2774
c297663c
MG
2775 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2776 if (numabalancing_override)
2777 set_numabalancing_state(numabalancing_override == 1);
2778
b0dc2b9b 2779 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2780 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2781 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2782 set_numabalancing_state(numabalancing_default);
2783 }
2784}
2785
2786static int __init setup_numabalancing(char *str)
2787{
2788 int ret = 0;
2789 if (!str)
2790 goto out;
1a687c2e
MG
2791
2792 if (!strcmp(str, "enable")) {
c297663c 2793 numabalancing_override = 1;
1a687c2e
MG
2794 ret = 1;
2795 } else if (!strcmp(str, "disable")) {
c297663c 2796 numabalancing_override = -1;
1a687c2e
MG
2797 ret = 1;
2798 }
2799out:
2800 if (!ret)
4a404bea 2801 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2802
2803 return ret;
2804}
2805__setup("numa_balancing=", setup_numabalancing);
2806#else
2807static inline void __init check_numabalancing_enable(void)
2808{
2809}
2810#endif /* CONFIG_NUMA_BALANCING */
2811
1da177e4
LT
2812/* assumes fs == KERNEL_DS */
2813void __init numa_policy_init(void)
2814{
b71636e2
PM
2815 nodemask_t interleave_nodes;
2816 unsigned long largest = 0;
2817 int nid, prefer = 0;
2818
1da177e4
LT
2819 policy_cache = kmem_cache_create("numa_policy",
2820 sizeof(struct mempolicy),
20c2df83 2821 0, SLAB_PANIC, NULL);
1da177e4
LT
2822
2823 sn_cache = kmem_cache_create("shared_policy_node",
2824 sizeof(struct sp_node),
20c2df83 2825 0, SLAB_PANIC, NULL);
1da177e4 2826
5606e387
MG
2827 for_each_node(nid) {
2828 preferred_node_policy[nid] = (struct mempolicy) {
2829 .refcnt = ATOMIC_INIT(1),
2830 .mode = MPOL_PREFERRED,
2831 .flags = MPOL_F_MOF | MPOL_F_MORON,
269fbe72 2832 .nodes = nodemask_of_node(nid),
5606e387
MG
2833 };
2834 }
2835
b71636e2
PM
2836 /*
2837 * Set interleaving policy for system init. Interleaving is only
2838 * enabled across suitably sized nodes (default is >= 16MB), or
2839 * fall back to the largest node if they're all smaller.
2840 */
2841 nodes_clear(interleave_nodes);
01f13bd6 2842 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2843 unsigned long total_pages = node_present_pages(nid);
2844
2845 /* Preserve the largest node */
2846 if (largest < total_pages) {
2847 largest = total_pages;
2848 prefer = nid;
2849 }
2850
2851 /* Interleave this node? */
2852 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2853 node_set(nid, interleave_nodes);
2854 }
2855
2856 /* All too small, use the largest */
2857 if (unlikely(nodes_empty(interleave_nodes)))
2858 node_set(prefer, interleave_nodes);
1da177e4 2859
028fec41 2860 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2861 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2862
2863 check_numabalancing_enable();
1da177e4
LT
2864}
2865
8bccd85f 2866/* Reset policy of current process to default */
1da177e4
LT
2867void numa_default_policy(void)
2868{
028fec41 2869 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2870}
68860ec1 2871
095f1fc4
LS
2872/*
2873 * Parse and format mempolicy from/to strings
2874 */
2875
345ace9c
LS
2876static const char * const policy_modes[] =
2877{
2878 [MPOL_DEFAULT] = "default",
2879 [MPOL_PREFERRED] = "prefer",
2880 [MPOL_BIND] = "bind",
2881 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2882 [MPOL_LOCAL] = "local",
b27abacc 2883 [MPOL_PREFERRED_MANY] = "prefer (many)",
345ace9c 2884};
1a75a6c8 2885
095f1fc4
LS
2886
2887#ifdef CONFIG_TMPFS
2888/**
f2a07f40 2889 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2890 * @str: string containing mempolicy to parse
71fe804b 2891 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2892 *
2893 * Format of input:
2894 * <mode>[=<flags>][:<nodelist>]
2895 *
71fe804b 2896 * On success, returns 0, else 1
095f1fc4 2897 */
a7a88b23 2898int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2899{
71fe804b 2900 struct mempolicy *new = NULL;
f2a07f40 2901 unsigned short mode_flags;
71fe804b 2902 nodemask_t nodes;
095f1fc4
LS
2903 char *nodelist = strchr(str, ':');
2904 char *flags = strchr(str, '=');
dedf2c73 2905 int err = 1, mode;
095f1fc4 2906
c7a91bc7
DC
2907 if (flags)
2908 *flags++ = '\0'; /* terminate mode string */
2909
095f1fc4
LS
2910 if (nodelist) {
2911 /* NUL-terminate mode or flags string */
2912 *nodelist++ = '\0';
71fe804b 2913 if (nodelist_parse(nodelist, nodes))
095f1fc4 2914 goto out;
01f13bd6 2915 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2916 goto out;
71fe804b
LS
2917 } else
2918 nodes_clear(nodes);
2919
dedf2c73 2920 mode = match_string(policy_modes, MPOL_MAX, str);
2921 if (mode < 0)
095f1fc4
LS
2922 goto out;
2923
71fe804b 2924 switch (mode) {
095f1fc4 2925 case MPOL_PREFERRED:
71fe804b 2926 /*
aa9f7d51
RD
2927 * Insist on a nodelist of one node only, although later
2928 * we use first_node(nodes) to grab a single node, so here
2929 * nodelist (or nodes) cannot be empty.
71fe804b 2930 */
095f1fc4
LS
2931 if (nodelist) {
2932 char *rest = nodelist;
2933 while (isdigit(*rest))
2934 rest++;
926f2ae0
KM
2935 if (*rest)
2936 goto out;
aa9f7d51
RD
2937 if (nodes_empty(nodes))
2938 goto out;
095f1fc4
LS
2939 }
2940 break;
095f1fc4
LS
2941 case MPOL_INTERLEAVE:
2942 /*
2943 * Default to online nodes with memory if no nodelist
2944 */
2945 if (!nodelist)
01f13bd6 2946 nodes = node_states[N_MEMORY];
3f226aa1 2947 break;
71fe804b 2948 case MPOL_LOCAL:
3f226aa1 2949 /*
71fe804b 2950 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2951 */
71fe804b 2952 if (nodelist)
3f226aa1 2953 goto out;
3f226aa1 2954 break;
413b43de
RT
2955 case MPOL_DEFAULT:
2956 /*
2957 * Insist on a empty nodelist
2958 */
2959 if (!nodelist)
2960 err = 0;
2961 goto out;
b27abacc 2962 case MPOL_PREFERRED_MANY:
d69b2e63
KM
2963 case MPOL_BIND:
2964 /*
2965 * Insist on a nodelist
2966 */
2967 if (!nodelist)
2968 goto out;
095f1fc4
LS
2969 }
2970
71fe804b 2971 mode_flags = 0;
095f1fc4
LS
2972 if (flags) {
2973 /*
2974 * Currently, we only support two mutually exclusive
2975 * mode flags.
2976 */
2977 if (!strcmp(flags, "static"))
71fe804b 2978 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2979 else if (!strcmp(flags, "relative"))
71fe804b 2980 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2981 else
926f2ae0 2982 goto out;
095f1fc4 2983 }
71fe804b
LS
2984
2985 new = mpol_new(mode, mode_flags, &nodes);
2986 if (IS_ERR(new))
926f2ae0
KM
2987 goto out;
2988
f2a07f40
HD
2989 /*
2990 * Save nodes for mpol_to_str() to show the tmpfs mount options
2991 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2992 */
269fbe72
BW
2993 if (mode != MPOL_PREFERRED) {
2994 new->nodes = nodes;
2995 } else if (nodelist) {
2996 nodes_clear(new->nodes);
2997 node_set(first_node(nodes), new->nodes);
2998 } else {
7858d7bc 2999 new->mode = MPOL_LOCAL;
269fbe72 3000 }
f2a07f40
HD
3001
3002 /*
3003 * Save nodes for contextualization: this will be used to "clone"
3004 * the mempolicy in a specific context [cpuset] at a later time.
3005 */
3006 new->w.user_nodemask = nodes;
3007
926f2ae0 3008 err = 0;
71fe804b 3009
095f1fc4
LS
3010out:
3011 /* Restore string for error message */
3012 if (nodelist)
3013 *--nodelist = ':';
3014 if (flags)
3015 *--flags = '=';
71fe804b
LS
3016 if (!err)
3017 *mpol = new;
095f1fc4
LS
3018 return err;
3019}
3020#endif /* CONFIG_TMPFS */
3021
71fe804b
LS
3022/**
3023 * mpol_to_str - format a mempolicy structure for printing
3024 * @buffer: to contain formatted mempolicy string
3025 * @maxlen: length of @buffer
3026 * @pol: pointer to mempolicy to be formatted
71fe804b 3027 *
948927ee
DR
3028 * Convert @pol into a string. If @buffer is too short, truncate the string.
3029 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3030 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3031 */
948927ee 3032void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3033{
3034 char *p = buffer;
948927ee
DR
3035 nodemask_t nodes = NODE_MASK_NONE;
3036 unsigned short mode = MPOL_DEFAULT;
3037 unsigned short flags = 0;
2291990a 3038
8790c71a 3039 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3040 mode = pol->mode;
948927ee
DR
3041 flags = pol->flags;
3042 }
bea904d5 3043
1a75a6c8
CL
3044 switch (mode) {
3045 case MPOL_DEFAULT:
7858d7bc 3046 case MPOL_LOCAL:
1a75a6c8 3047 break;
1a75a6c8 3048 case MPOL_PREFERRED:
b27abacc 3049 case MPOL_PREFERRED_MANY:
1a75a6c8 3050 case MPOL_BIND:
1a75a6c8 3051 case MPOL_INTERLEAVE:
269fbe72 3052 nodes = pol->nodes;
1a75a6c8 3053 break;
1a75a6c8 3054 default:
948927ee
DR
3055 WARN_ON_ONCE(1);
3056 snprintf(p, maxlen, "unknown");
3057 return;
1a75a6c8
CL
3058 }
3059
b7a9f420 3060 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3061
fc36b8d3 3062 if (flags & MPOL_MODE_FLAGS) {
948927ee 3063 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3064
2291990a
LS
3065 /*
3066 * Currently, the only defined flags are mutually exclusive
3067 */
f5b087b5 3068 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3069 p += snprintf(p, buffer + maxlen - p, "static");
3070 else if (flags & MPOL_F_RELATIVE_NODES)
3071 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3072 }
3073
9e763e0f
TH
3074 if (!nodes_empty(nodes))
3075 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3076 nodemask_pr_args(&nodes));
1a75a6c8 3077}
20b51af1
HY
3078
3079bool numa_demotion_enabled = false;
3080
3081#ifdef CONFIG_SYSFS
3082static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
3083 struct kobj_attribute *attr, char *buf)
3084{
3085 return sysfs_emit(buf, "%s\n",
3086 numa_demotion_enabled? "true" : "false");
3087}
3088
3089static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
3090 struct kobj_attribute *attr,
3091 const char *buf, size_t count)
3092{
3093 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3094 numa_demotion_enabled = true;
3095 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3096 numa_demotion_enabled = false;
3097 else
3098 return -EINVAL;
3099
3100 return count;
3101}
3102
3103static struct kobj_attribute numa_demotion_enabled_attr =
3104 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3105 numa_demotion_enabled_store);
3106
3107static struct attribute *numa_attrs[] = {
3108 &numa_demotion_enabled_attr.attr,
3109 NULL,
3110};
3111
3112static const struct attribute_group numa_attr_group = {
3113 .attrs = numa_attrs,
3114};
3115
3116static int __init numa_init_sysfs(void)
3117{
3118 int err;
3119 struct kobject *numa_kobj;
3120
3121 numa_kobj = kobject_create_and_add("numa", mm_kobj);
3122 if (!numa_kobj) {
3123 pr_err("failed to create numa kobject\n");
3124 return -ENOMEM;
3125 }
3126 err = sysfs_create_group(numa_kobj, &numa_attr_group);
3127 if (err) {
3128 pr_err("failed to register numa group\n");
3129 goto delete_obj;
3130 }
3131 return 0;
3132
3133delete_obj:
3134 kobject_put(numa_kobj);
3135 return err;
3136}
3137subsys_initcall(numa_init_sysfs);
3138#endif