]> git.ipfire.org Git - people/ms/linux.git/blame - mm/mempolicy.c
sched/numa: Build per numa_group active node mask from numa_faults_cpu statistics
[people/ms/linux.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
1da177e4
LT
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
1da177e4
LT
76#include <linux/slab.h>
77#include <linux/string.h>
b95f1b31 78#include <linux/export.h>
b488893a 79#include <linux/nsproxy.h>
1da177e4
LT
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
dc9aa5b9 83#include <linux/swap.h>
1a75a6c8
CL
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
b20a3503 86#include <linux/migrate.h>
62b61f61 87#include <linux/ksm.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
095f1fc4 91#include <linux/ctype.h>
6d9c285a 92#include <linux/mm_inline.h>
b24f53a0 93#include <linux/mmu_notifier.h>
dc9aa5b9 94
1da177e4
LT
95#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
778d3b0f 97#include <linux/random.h>
1da177e4 98
62695a84
NP
99#include "internal.h"
100
38e35860 101/* Internal flags */
dc9aa5b9 102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 104
fcc234f8
PE
105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
1da177e4 107
1da177e4
LT
108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
6267276f 110enum zone_type policy_zone = 0;
1da177e4 111
bea904d5
LS
112/*
113 * run-time system-wide default policy => local allocation
114 */
e754d79d 115static struct mempolicy default_policy = {
1da177e4 116 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 117 .mode = MPOL_PREFERRED,
fc36b8d3 118 .flags = MPOL_F_LOCAL,
1da177e4
LT
119};
120
5606e387
MG
121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
5606e387
MG
126
127 if (!pol) {
1da6f0e1 128 int node = numa_node_id();
5606e387 129
1da6f0e1
JW
130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
5606e387
MG
139 }
140
141 return pol;
142}
143
37012946
DR
144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
708c1bbc
MX
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
37012946
DR
162} mpol_ops[MPOL_MAX];
163
19770b32 164/* Check that the nodemask contains at least one populated zone */
37012946 165static int is_valid_nodemask(const nodemask_t *nodemask)
1da177e4 166{
d3eb1570 167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1da177e4
LT
168}
169
f5b087b5
DR
170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
6d556294 172 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
181}
182
37012946
DR
183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
fc36b8d3 194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
58568d2a
MX
210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
4bfc4495
KH
219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 221{
58568d2a
MX
222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
01f13bd6 227 /* Check N_MEMORY */
4bfc4495 228 nodes_and(nsc->mask1,
01f13bd6 229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
4bfc4495 236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
58568d2a 237 else
4bfc4495
KH
238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
58568d2a
MX
240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
4bfc4495
KH
247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
028fec41
DR
258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
1da177e4
LT
260{
261 struct mempolicy *policy;
262
028fec41 263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 265
3e1f0645
DR
266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
37012946 268 return ERR_PTR(-EINVAL);
d3a71033 269 return NULL;
37012946 270 }
3e1f0645
DR
271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
3e1f0645 283 }
479e2802
PZ
284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
3e1f0645
DR
288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
1da177e4
LT
290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
45c4745a 294 policy->mode = mode;
3e1f0645 295 policy->flags = flags;
37012946 296
1da177e4 297 return policy;
37012946
DR
298}
299
52cd3b07
LS
300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
52cd3b07
LS
305 kmem_cache_free(policy_cache, p);
306}
307
708c1bbc
MX
308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
37012946
DR
310{
311}
312
708c1bbc
MX
313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
37012946
DR
321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
708c1bbc
MX
329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
37012946 342 }
f5b087b5 343
708c1bbc
MX
344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
37012946
DR
354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
708c1bbc
MX
364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
37012946
DR
366{
367 nodemask_t tmp;
368
37012946
DR
369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
fc36b8d3 372 if (node_isset(node, *nodes)) {
37012946 373 pol->v.preferred_node = node;
fc36b8d3
LS
374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
37012946
DR
377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
fc36b8d3 380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
1da177e4
LT
386}
387
708c1bbc
MX
388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
1d0d2680 406{
1d0d2680
DR
407 if (!pol)
408 return;
89c522c7 409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
1d0d2680
DR
410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
708c1bbc
MX
412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
1d0d2680
DR
427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
58568d2a
MX
432 *
433 * Called with task's alloc_lock held.
1d0d2680
DR
434 */
435
708c1bbc
MX
436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
1d0d2680 438{
708c1bbc 439 mpol_rebind_policy(tsk->mempolicy, new, step);
1d0d2680
DR
440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
708c1bbc 454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
1d0d2680
DR
455 up_write(&mm->mmap_sem);
456}
457
37012946
DR
458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
fc301289
CL
476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
1a75a6c8 478
98094945
NH
479/*
480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do.
482 */
483static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
38e35860 486 void *private)
1da177e4 487{
91612e0d
HD
488 pte_t *orig_pte;
489 pte_t *pte;
705e87c0 490 spinlock_t *ptl;
941150a3 491
705e87c0 492 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 493 do {
6aab341e 494 struct page *page;
25ba77c1 495 int nid;
91612e0d
HD
496
497 if (!pte_present(*pte))
1da177e4 498 continue;
6aab341e
LT
499 page = vm_normal_page(vma, addr, *pte);
500 if (!page)
1da177e4 501 continue;
053837fc 502 /*
62b61f61
HD
503 * vm_normal_page() filters out zero pages, but there might
504 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 505 */
b79bc0a0 506 if (PageReserved(page))
f4598c8b 507 continue;
6aab341e 508 nid = page_to_nid(page);
38e35860
CL
509 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
510 continue;
511
b1f72d18 512 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 513 migrate_page_add(page, private, flags);
38e35860
CL
514 else
515 break;
91612e0d 516 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 517 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
518 return addr != end;
519}
520
98094945
NH
521static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
522 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
e2d8cf40
NH
523 void *private)
524{
525#ifdef CONFIG_HUGETLB_PAGE
526 int nid;
527 struct page *page;
cb900f41 528 spinlock_t *ptl;
e2d8cf40 529
cb900f41 530 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
e2d8cf40
NH
531 page = pte_page(huge_ptep_get((pte_t *)pmd));
532 nid = page_to_nid(page);
533 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
534 goto unlock;
535 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
536 if (flags & (MPOL_MF_MOVE_ALL) ||
537 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
538 isolate_huge_page(page, private);
539unlock:
cb900f41 540 spin_unlock(ptl);
e2d8cf40
NH
541#else
542 BUG();
543#endif
544}
545
98094945 546static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
547 unsigned long addr, unsigned long end,
548 const nodemask_t *nodes, unsigned long flags,
38e35860 549 void *private)
91612e0d
HD
550{
551 pmd_t *pmd;
552 unsigned long next;
553
554 pmd = pmd_offset(pud, addr);
555 do {
556 next = pmd_addr_end(addr, end);
e2d8cf40
NH
557 if (!pmd_present(*pmd))
558 continue;
559 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
98094945 560 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
e2d8cf40
NH
561 flags, private);
562 continue;
563 }
e180377f 564 split_huge_page_pmd(vma, addr, pmd);
1a5a9906 565 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
91612e0d 566 continue;
98094945 567 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
38e35860 568 flags, private))
91612e0d
HD
569 return -EIO;
570 } while (pmd++, addr = next, addr != end);
571 return 0;
572}
573
98094945 574static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
575 unsigned long addr, unsigned long end,
576 const nodemask_t *nodes, unsigned long flags,
38e35860 577 void *private)
91612e0d
HD
578{
579 pud_t *pud;
580 unsigned long next;
581
582 pud = pud_offset(pgd, addr);
583 do {
584 next = pud_addr_end(addr, end);
e2d8cf40
NH
585 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
586 continue;
91612e0d
HD
587 if (pud_none_or_clear_bad(pud))
588 continue;
98094945 589 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
38e35860 590 flags, private))
91612e0d
HD
591 return -EIO;
592 } while (pud++, addr = next, addr != end);
593 return 0;
594}
595
98094945 596static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
597 unsigned long addr, unsigned long end,
598 const nodemask_t *nodes, unsigned long flags,
38e35860 599 void *private)
91612e0d
HD
600{
601 pgd_t *pgd;
602 unsigned long next;
603
b5810039 604 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
605 do {
606 next = pgd_addr_end(addr, end);
607 if (pgd_none_or_clear_bad(pgd))
608 continue;
98094945 609 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
38e35860 610 flags, private))
91612e0d
HD
611 return -EIO;
612 } while (pgd++, addr = next, addr != end);
613 return 0;
1da177e4
LT
614}
615
b24f53a0
LS
616#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
617/*
4b10e7d5
MG
618 * This is used to mark a range of virtual addresses to be inaccessible.
619 * These are later cleared by a NUMA hinting fault. Depending on these
620 * faults, pages may be migrated for better NUMA placement.
621 *
622 * This is assuming that NUMA faults are handled using PROT_NONE. If
623 * an architecture makes a different choice, it will need further
624 * changes to the core.
b24f53a0 625 */
4b10e7d5
MG
626unsigned long change_prot_numa(struct vm_area_struct *vma,
627 unsigned long addr, unsigned long end)
b24f53a0 628{
4b10e7d5
MG
629 int nr_updated;
630 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
b24f53a0 631
4b10e7d5 632 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
03c5a6e1
MG
633 if (nr_updated)
634 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 635
4b10e7d5 636 return nr_updated;
b24f53a0
LS
637}
638#else
639static unsigned long change_prot_numa(struct vm_area_struct *vma,
640 unsigned long addr, unsigned long end)
641{
642 return 0;
643}
644#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
645
dc9aa5b9 646/*
98094945
NH
647 * Walk through page tables and collect pages to be migrated.
648 *
649 * If pages found in a given range are on a set of nodes (determined by
650 * @nodes and @flags,) it's isolated and queued to the pagelist which is
651 * passed via @private.)
dc9aa5b9 652 */
1da177e4 653static struct vm_area_struct *
98094945 654queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 655 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
656{
657 int err;
658 struct vm_area_struct *first, *vma, *prev;
659
053837fc 660
1da177e4
LT
661 first = find_vma(mm, start);
662 if (!first)
663 return ERR_PTR(-EFAULT);
664 prev = NULL;
665 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
b24f53a0
LS
666 unsigned long endvma = vma->vm_end;
667
668 if (endvma > end)
669 endvma = end;
670 if (vma->vm_start > start)
671 start = vma->vm_start;
672
dc9aa5b9
CL
673 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
674 if (!vma->vm_next && vma->vm_end < end)
675 return ERR_PTR(-EFAULT);
676 if (prev && prev->vm_end < vma->vm_start)
677 return ERR_PTR(-EFAULT);
678 }
b24f53a0 679
b24f53a0
LS
680 if (flags & MPOL_MF_LAZY) {
681 change_prot_numa(vma, start, endvma);
682 goto next;
683 }
684
685 if ((flags & MPOL_MF_STRICT) ||
dc9aa5b9 686 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
b24f53a0 687 vma_migratable(vma))) {
dc9aa5b9 688
98094945 689 err = queue_pages_pgd_range(vma, start, endvma, nodes,
38e35860 690 flags, private);
1da177e4
LT
691 if (err) {
692 first = ERR_PTR(err);
693 break;
694 }
695 }
b24f53a0 696next:
1da177e4
LT
697 prev = vma;
698 }
699 return first;
700}
701
869833f2
KM
702/*
703 * Apply policy to a single VMA
704 * This must be called with the mmap_sem held for writing.
705 */
706static int vma_replace_policy(struct vm_area_struct *vma,
707 struct mempolicy *pol)
8d34694c 708{
869833f2
KM
709 int err;
710 struct mempolicy *old;
711 struct mempolicy *new;
8d34694c
KM
712
713 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
714 vma->vm_start, vma->vm_end, vma->vm_pgoff,
715 vma->vm_ops, vma->vm_file,
716 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
717
869833f2
KM
718 new = mpol_dup(pol);
719 if (IS_ERR(new))
720 return PTR_ERR(new);
721
722 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 723 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
724 if (err)
725 goto err_out;
8d34694c 726 }
869833f2
KM
727
728 old = vma->vm_policy;
729 vma->vm_policy = new; /* protected by mmap_sem */
730 mpol_put(old);
731
732 return 0;
733 err_out:
734 mpol_put(new);
8d34694c
KM
735 return err;
736}
737
1da177e4 738/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
739static int mbind_range(struct mm_struct *mm, unsigned long start,
740 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
741{
742 struct vm_area_struct *next;
9d8cebd4
KM
743 struct vm_area_struct *prev;
744 struct vm_area_struct *vma;
745 int err = 0;
e26a5114 746 pgoff_t pgoff;
9d8cebd4
KM
747 unsigned long vmstart;
748 unsigned long vmend;
1da177e4 749
097d5910 750 vma = find_vma(mm, start);
9d8cebd4
KM
751 if (!vma || vma->vm_start > start)
752 return -EFAULT;
753
097d5910 754 prev = vma->vm_prev;
e26a5114
KM
755 if (start > vma->vm_start)
756 prev = vma;
757
9d8cebd4 758 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 759 next = vma->vm_next;
9d8cebd4
KM
760 vmstart = max(start, vma->vm_start);
761 vmend = min(end, vma->vm_end);
762
e26a5114
KM
763 if (mpol_equal(vma_policy(vma), new_pol))
764 continue;
765
766 pgoff = vma->vm_pgoff +
767 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 768 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
e26a5114 769 vma->anon_vma, vma->vm_file, pgoff,
8aacc9f5 770 new_pol);
9d8cebd4
KM
771 if (prev) {
772 vma = prev;
773 next = vma->vm_next;
3964acd0
ON
774 if (mpol_equal(vma_policy(vma), new_pol))
775 continue;
776 /* vma_merge() joined vma && vma->next, case 8 */
777 goto replace;
9d8cebd4
KM
778 }
779 if (vma->vm_start != vmstart) {
780 err = split_vma(vma->vm_mm, vma, vmstart, 1);
781 if (err)
782 goto out;
783 }
784 if (vma->vm_end != vmend) {
785 err = split_vma(vma->vm_mm, vma, vmend, 0);
786 if (err)
787 goto out;
788 }
3964acd0 789 replace:
869833f2 790 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
791 if (err)
792 goto out;
1da177e4 793 }
9d8cebd4
KM
794
795 out:
1da177e4
LT
796 return err;
797}
798
c61afb18
PJ
799/*
800 * Update task->flags PF_MEMPOLICY bit: set iff non-default
801 * mempolicy. Allows more rapid checking of this (combined perhaps
802 * with other PF_* flag bits) on memory allocation hot code paths.
803 *
804 * If called from outside this file, the task 'p' should -only- be
805 * a newly forked child not yet visible on the task list, because
806 * manipulating the task flags of a visible task is not safe.
807 *
808 * The above limitation is why this routine has the funny name
809 * mpol_fix_fork_child_flag().
810 *
811 * It is also safe to call this with a task pointer of current,
812 * which the static wrapper mpol_set_task_struct_flag() does,
813 * for use within this file.
814 */
815
816void mpol_fix_fork_child_flag(struct task_struct *p)
817{
818 if (p->mempolicy)
819 p->flags |= PF_MEMPOLICY;
820 else
821 p->flags &= ~PF_MEMPOLICY;
822}
823
824static void mpol_set_task_struct_flag(void)
825{
826 mpol_fix_fork_child_flag(current);
827}
828
1da177e4 829/* Set the process memory policy */
028fec41
DR
830static long do_set_mempolicy(unsigned short mode, unsigned short flags,
831 nodemask_t *nodes)
1da177e4 832{
58568d2a 833 struct mempolicy *new, *old;
f4e53d91 834 struct mm_struct *mm = current->mm;
4bfc4495 835 NODEMASK_SCRATCH(scratch);
58568d2a 836 int ret;
1da177e4 837
4bfc4495
KH
838 if (!scratch)
839 return -ENOMEM;
f4e53d91 840
4bfc4495
KH
841 new = mpol_new(mode, flags, nodes);
842 if (IS_ERR(new)) {
843 ret = PTR_ERR(new);
844 goto out;
845 }
f4e53d91
LS
846 /*
847 * prevent changing our mempolicy while show_numa_maps()
848 * is using it.
849 * Note: do_set_mempolicy() can be called at init time
850 * with no 'mm'.
851 */
852 if (mm)
853 down_write(&mm->mmap_sem);
58568d2a 854 task_lock(current);
4bfc4495 855 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
856 if (ret) {
857 task_unlock(current);
858 if (mm)
859 up_write(&mm->mmap_sem);
860 mpol_put(new);
4bfc4495 861 goto out;
58568d2a
MX
862 }
863 old = current->mempolicy;
1da177e4 864 current->mempolicy = new;
c61afb18 865 mpol_set_task_struct_flag();
45c4745a 866 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 867 nodes_weight(new->v.nodes))
dfcd3c0d 868 current->il_next = first_node(new->v.nodes);
58568d2a 869 task_unlock(current);
f4e53d91
LS
870 if (mm)
871 up_write(&mm->mmap_sem);
872
58568d2a 873 mpol_put(old);
4bfc4495
KH
874 ret = 0;
875out:
876 NODEMASK_SCRATCH_FREE(scratch);
877 return ret;
1da177e4
LT
878}
879
bea904d5
LS
880/*
881 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
882 *
883 * Called with task's alloc_lock held
bea904d5
LS
884 */
885static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 886{
dfcd3c0d 887 nodes_clear(*nodes);
bea904d5
LS
888 if (p == &default_policy)
889 return;
890
45c4745a 891 switch (p->mode) {
19770b32
MG
892 case MPOL_BIND:
893 /* Fall through */
1da177e4 894 case MPOL_INTERLEAVE:
dfcd3c0d 895 *nodes = p->v.nodes;
1da177e4
LT
896 break;
897 case MPOL_PREFERRED:
fc36b8d3 898 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 899 node_set(p->v.preferred_node, *nodes);
53f2556b 900 /* else return empty node mask for local allocation */
1da177e4
LT
901 break;
902 default:
903 BUG();
904 }
905}
906
907static int lookup_node(struct mm_struct *mm, unsigned long addr)
908{
909 struct page *p;
910 int err;
911
912 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
913 if (err >= 0) {
914 err = page_to_nid(p);
915 put_page(p);
916 }
917 return err;
918}
919
1da177e4 920/* Retrieve NUMA policy */
dbcb0f19
AB
921static long do_get_mempolicy(int *policy, nodemask_t *nmask,
922 unsigned long addr, unsigned long flags)
1da177e4 923{
8bccd85f 924 int err;
1da177e4
LT
925 struct mm_struct *mm = current->mm;
926 struct vm_area_struct *vma = NULL;
927 struct mempolicy *pol = current->mempolicy;
928
754af6f5
LS
929 if (flags &
930 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 931 return -EINVAL;
754af6f5
LS
932
933 if (flags & MPOL_F_MEMS_ALLOWED) {
934 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
935 return -EINVAL;
936 *policy = 0; /* just so it's initialized */
58568d2a 937 task_lock(current);
754af6f5 938 *nmask = cpuset_current_mems_allowed;
58568d2a 939 task_unlock(current);
754af6f5
LS
940 return 0;
941 }
942
1da177e4 943 if (flags & MPOL_F_ADDR) {
bea904d5
LS
944 /*
945 * Do NOT fall back to task policy if the
946 * vma/shared policy at addr is NULL. We
947 * want to return MPOL_DEFAULT in this case.
948 */
1da177e4
LT
949 down_read(&mm->mmap_sem);
950 vma = find_vma_intersection(mm, addr, addr+1);
951 if (!vma) {
952 up_read(&mm->mmap_sem);
953 return -EFAULT;
954 }
955 if (vma->vm_ops && vma->vm_ops->get_policy)
956 pol = vma->vm_ops->get_policy(vma, addr);
957 else
958 pol = vma->vm_policy;
959 } else if (addr)
960 return -EINVAL;
961
962 if (!pol)
bea904d5 963 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
964
965 if (flags & MPOL_F_NODE) {
966 if (flags & MPOL_F_ADDR) {
967 err = lookup_node(mm, addr);
968 if (err < 0)
969 goto out;
8bccd85f 970 *policy = err;
1da177e4 971 } else if (pol == current->mempolicy &&
45c4745a 972 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 973 *policy = current->il_next;
1da177e4
LT
974 } else {
975 err = -EINVAL;
976 goto out;
977 }
bea904d5
LS
978 } else {
979 *policy = pol == &default_policy ? MPOL_DEFAULT :
980 pol->mode;
d79df630
DR
981 /*
982 * Internal mempolicy flags must be masked off before exposing
983 * the policy to userspace.
984 */
985 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 986 }
1da177e4
LT
987
988 if (vma) {
989 up_read(&current->mm->mmap_sem);
990 vma = NULL;
991 }
992
1da177e4 993 err = 0;
58568d2a 994 if (nmask) {
c6b6ef8b
LS
995 if (mpol_store_user_nodemask(pol)) {
996 *nmask = pol->w.user_nodemask;
997 } else {
998 task_lock(current);
999 get_policy_nodemask(pol, nmask);
1000 task_unlock(current);
1001 }
58568d2a 1002 }
1da177e4
LT
1003
1004 out:
52cd3b07 1005 mpol_cond_put(pol);
1da177e4
LT
1006 if (vma)
1007 up_read(&current->mm->mmap_sem);
1008 return err;
1009}
1010
b20a3503 1011#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
1012/*
1013 * page migration
1014 */
fc301289
CL
1015static void migrate_page_add(struct page *page, struct list_head *pagelist,
1016 unsigned long flags)
6ce3c4c0
CL
1017{
1018 /*
fc301289 1019 * Avoid migrating a page that is shared with others.
6ce3c4c0 1020 */
62695a84
NP
1021 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1022 if (!isolate_lru_page(page)) {
1023 list_add_tail(&page->lru, pagelist);
6d9c285a
KM
1024 inc_zone_page_state(page, NR_ISOLATED_ANON +
1025 page_is_file_cache(page));
62695a84
NP
1026 }
1027 }
7e2ab150 1028}
6ce3c4c0 1029
742755a1 1030static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 1031{
e2d8cf40
NH
1032 if (PageHuge(page))
1033 return alloc_huge_page_node(page_hstate(compound_head(page)),
1034 node);
1035 else
1036 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
1037}
1038
7e2ab150
CL
1039/*
1040 * Migrate pages from one node to a target node.
1041 * Returns error or the number of pages not migrated.
1042 */
dbcb0f19
AB
1043static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1044 int flags)
7e2ab150
CL
1045{
1046 nodemask_t nmask;
1047 LIST_HEAD(pagelist);
1048 int err = 0;
1049
1050 nodes_clear(nmask);
1051 node_set(source, nmask);
6ce3c4c0 1052
08270807
MK
1053 /*
1054 * This does not "check" the range but isolates all pages that
1055 * need migration. Between passing in the full user address
1056 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1057 */
1058 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1059 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1060 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1061
cf608ac1 1062 if (!list_empty(&pagelist)) {
7f0f2496 1063 err = migrate_pages(&pagelist, new_node_page, dest,
9c620e2b 1064 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1065 if (err)
e2d8cf40 1066 putback_movable_pages(&pagelist);
cf608ac1 1067 }
95a402c3 1068
7e2ab150 1069 return err;
6ce3c4c0
CL
1070}
1071
39743889 1072/*
7e2ab150
CL
1073 * Move pages between the two nodesets so as to preserve the physical
1074 * layout as much as possible.
39743889
CL
1075 *
1076 * Returns the number of page that could not be moved.
1077 */
0ce72d4f
AM
1078int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1079 const nodemask_t *to, int flags)
39743889 1080{
7e2ab150 1081 int busy = 0;
0aedadf9 1082 int err;
7e2ab150 1083 nodemask_t tmp;
39743889 1084
0aedadf9
CL
1085 err = migrate_prep();
1086 if (err)
1087 return err;
1088
53f2556b 1089 down_read(&mm->mmap_sem);
39743889 1090
0ce72d4f 1091 err = migrate_vmas(mm, from, to, flags);
7b2259b3
CL
1092 if (err)
1093 goto out;
1094
da0aa138
KM
1095 /*
1096 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1097 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1098 * bit in 'tmp', and return that <source, dest> pair for migration.
1099 * The pair of nodemasks 'to' and 'from' define the map.
1100 *
1101 * If no pair of bits is found that way, fallback to picking some
1102 * pair of 'source' and 'dest' bits that are not the same. If the
1103 * 'source' and 'dest' bits are the same, this represents a node
1104 * that will be migrating to itself, so no pages need move.
1105 *
1106 * If no bits are left in 'tmp', or if all remaining bits left
1107 * in 'tmp' correspond to the same bit in 'to', return false
1108 * (nothing left to migrate).
1109 *
1110 * This lets us pick a pair of nodes to migrate between, such that
1111 * if possible the dest node is not already occupied by some other
1112 * source node, minimizing the risk of overloading the memory on a
1113 * node that would happen if we migrated incoming memory to a node
1114 * before migrating outgoing memory source that same node.
1115 *
1116 * A single scan of tmp is sufficient. As we go, we remember the
1117 * most recent <s, d> pair that moved (s != d). If we find a pair
1118 * that not only moved, but what's better, moved to an empty slot
1119 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1120 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1121 * most recent <s, d> pair that moved. If we get all the way through
1122 * the scan of tmp without finding any node that moved, much less
1123 * moved to an empty node, then there is nothing left worth migrating.
1124 */
d4984711 1125
0ce72d4f 1126 tmp = *from;
7e2ab150
CL
1127 while (!nodes_empty(tmp)) {
1128 int s,d;
b76ac7e7 1129 int source = NUMA_NO_NODE;
7e2ab150
CL
1130 int dest = 0;
1131
1132 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1133
1134 /*
1135 * do_migrate_pages() tries to maintain the relative
1136 * node relationship of the pages established between
1137 * threads and memory areas.
1138 *
1139 * However if the number of source nodes is not equal to
1140 * the number of destination nodes we can not preserve
1141 * this node relative relationship. In that case, skip
1142 * copying memory from a node that is in the destination
1143 * mask.
1144 *
1145 * Example: [2,3,4] -> [3,4,5] moves everything.
1146 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1147 */
1148
0ce72d4f
AM
1149 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1150 (node_isset(s, *to)))
4a5b18cc
LW
1151 continue;
1152
0ce72d4f 1153 d = node_remap(s, *from, *to);
7e2ab150
CL
1154 if (s == d)
1155 continue;
1156
1157 source = s; /* Node moved. Memorize */
1158 dest = d;
1159
1160 /* dest not in remaining from nodes? */
1161 if (!node_isset(dest, tmp))
1162 break;
1163 }
b76ac7e7 1164 if (source == NUMA_NO_NODE)
7e2ab150
CL
1165 break;
1166
1167 node_clear(source, tmp);
1168 err = migrate_to_node(mm, source, dest, flags);
1169 if (err > 0)
1170 busy += err;
1171 if (err < 0)
1172 break;
39743889 1173 }
7b2259b3 1174out:
39743889 1175 up_read(&mm->mmap_sem);
7e2ab150
CL
1176 if (err < 0)
1177 return err;
1178 return busy;
b20a3503
CL
1179
1180}
1181
3ad33b24
LS
1182/*
1183 * Allocate a new page for page migration based on vma policy.
1184 * Start assuming that page is mapped by vma pointed to by @private.
1185 * Search forward from there, if not. N.B., this assumes that the
1186 * list of pages handed to migrate_pages()--which is how we get here--
1187 * is in virtual address order.
1188 */
742755a1 1189static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
1190{
1191 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 1192 unsigned long uninitialized_var(address);
95a402c3 1193
3ad33b24
LS
1194 while (vma) {
1195 address = page_address_in_vma(page, vma);
1196 if (address != -EFAULT)
1197 break;
1198 vma = vma->vm_next;
1199 }
11c731e8
WL
1200
1201 if (PageHuge(page)) {
1202 if (vma)
1203 return alloc_huge_page_noerr(vma, address, 1);
1204 else
1205 return NULL;
1206 }
0bf598d8 1207 /*
11c731e8 1208 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1209 */
3ad33b24 1210 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 1211}
b20a3503
CL
1212#else
1213
1214static void migrate_page_add(struct page *page, struct list_head *pagelist,
1215 unsigned long flags)
1216{
39743889
CL
1217}
1218
0ce72d4f
AM
1219int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1220 const nodemask_t *to, int flags)
b20a3503
CL
1221{
1222 return -ENOSYS;
1223}
95a402c3 1224
69939749 1225static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
1226{
1227 return NULL;
1228}
b20a3503
CL
1229#endif
1230
dbcb0f19 1231static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1232 unsigned short mode, unsigned short mode_flags,
1233 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
1234{
1235 struct vm_area_struct *vma;
1236 struct mm_struct *mm = current->mm;
1237 struct mempolicy *new;
1238 unsigned long end;
1239 int err;
1240 LIST_HEAD(pagelist);
1241
b24f53a0 1242 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1243 return -EINVAL;
74c00241 1244 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1245 return -EPERM;
1246
1247 if (start & ~PAGE_MASK)
1248 return -EINVAL;
1249
1250 if (mode == MPOL_DEFAULT)
1251 flags &= ~MPOL_MF_STRICT;
1252
1253 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1254 end = start + len;
1255
1256 if (end < start)
1257 return -EINVAL;
1258 if (end == start)
1259 return 0;
1260
028fec41 1261 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1262 if (IS_ERR(new))
1263 return PTR_ERR(new);
1264
b24f53a0
LS
1265 if (flags & MPOL_MF_LAZY)
1266 new->flags |= MPOL_F_MOF;
1267
6ce3c4c0
CL
1268 /*
1269 * If we are using the default policy then operation
1270 * on discontinuous address spaces is okay after all
1271 */
1272 if (!new)
1273 flags |= MPOL_MF_DISCONTIG_OK;
1274
028fec41
DR
1275 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1276 start, start + len, mode, mode_flags,
00ef2d2f 1277 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1278
0aedadf9
CL
1279 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1280
1281 err = migrate_prep();
1282 if (err)
b05ca738 1283 goto mpol_out;
0aedadf9 1284 }
4bfc4495
KH
1285 {
1286 NODEMASK_SCRATCH(scratch);
1287 if (scratch) {
1288 down_write(&mm->mmap_sem);
1289 task_lock(current);
1290 err = mpol_set_nodemask(new, nmask, scratch);
1291 task_unlock(current);
1292 if (err)
1293 up_write(&mm->mmap_sem);
1294 } else
1295 err = -ENOMEM;
1296 NODEMASK_SCRATCH_FREE(scratch);
1297 }
b05ca738
KM
1298 if (err)
1299 goto mpol_out;
1300
98094945 1301 vma = queue_pages_range(mm, start, end, nmask,
6ce3c4c0
CL
1302 flags | MPOL_MF_INVERT, &pagelist);
1303
b24f53a0 1304 err = PTR_ERR(vma); /* maybe ... */
a720094d 1305 if (!IS_ERR(vma))
9d8cebd4 1306 err = mbind_range(mm, start, end, new);
7e2ab150 1307
b24f53a0
LS
1308 if (!err) {
1309 int nr_failed = 0;
1310
cf608ac1 1311 if (!list_empty(&pagelist)) {
b24f53a0 1312 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
95a402c3 1313 nr_failed = migrate_pages(&pagelist, new_vma_page,
9c620e2b
HD
1314 (unsigned long)vma,
1315 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1316 if (nr_failed)
74060e4d 1317 putback_movable_pages(&pagelist);
cf608ac1 1318 }
6ce3c4c0 1319
b24f53a0 1320 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1321 err = -EIO;
ab8a3e14 1322 } else
b0e5fd73 1323 putback_movable_pages(&pagelist);
b20a3503 1324
6ce3c4c0 1325 up_write(&mm->mmap_sem);
b05ca738 1326 mpol_out:
f0be3d32 1327 mpol_put(new);
6ce3c4c0
CL
1328 return err;
1329}
1330
8bccd85f
CL
1331/*
1332 * User space interface with variable sized bitmaps for nodelists.
1333 */
1334
1335/* Copy a node mask from user space. */
39743889 1336static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1337 unsigned long maxnode)
1338{
1339 unsigned long k;
1340 unsigned long nlongs;
1341 unsigned long endmask;
1342
1343 --maxnode;
1344 nodes_clear(*nodes);
1345 if (maxnode == 0 || !nmask)
1346 return 0;
a9c930ba 1347 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1348 return -EINVAL;
8bccd85f
CL
1349
1350 nlongs = BITS_TO_LONGS(maxnode);
1351 if ((maxnode % BITS_PER_LONG) == 0)
1352 endmask = ~0UL;
1353 else
1354 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1355
1356 /* When the user specified more nodes than supported just check
1357 if the non supported part is all zero. */
1358 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1359 if (nlongs > PAGE_SIZE/sizeof(long))
1360 return -EINVAL;
1361 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1362 unsigned long t;
1363 if (get_user(t, nmask + k))
1364 return -EFAULT;
1365 if (k == nlongs - 1) {
1366 if (t & endmask)
1367 return -EINVAL;
1368 } else if (t)
1369 return -EINVAL;
1370 }
1371 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1372 endmask = ~0UL;
1373 }
1374
1375 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1376 return -EFAULT;
1377 nodes_addr(*nodes)[nlongs-1] &= endmask;
1378 return 0;
1379}
1380
1381/* Copy a kernel node mask to user space */
1382static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1383 nodemask_t *nodes)
1384{
1385 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1386 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1387
1388 if (copy > nbytes) {
1389 if (copy > PAGE_SIZE)
1390 return -EINVAL;
1391 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1392 return -EFAULT;
1393 copy = nbytes;
1394 }
1395 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1396}
1397
938bb9f5
HC
1398SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1399 unsigned long, mode, unsigned long __user *, nmask,
1400 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1401{
1402 nodemask_t nodes;
1403 int err;
028fec41 1404 unsigned short mode_flags;
8bccd85f 1405
028fec41
DR
1406 mode_flags = mode & MPOL_MODE_FLAGS;
1407 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1408 if (mode >= MPOL_MAX)
1409 return -EINVAL;
4c50bc01
DR
1410 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1411 (mode_flags & MPOL_F_RELATIVE_NODES))
1412 return -EINVAL;
8bccd85f
CL
1413 err = get_nodes(&nodes, nmask, maxnode);
1414 if (err)
1415 return err;
028fec41 1416 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1417}
1418
1419/* Set the process memory policy */
938bb9f5
HC
1420SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1421 unsigned long, maxnode)
8bccd85f
CL
1422{
1423 int err;
1424 nodemask_t nodes;
028fec41 1425 unsigned short flags;
8bccd85f 1426
028fec41
DR
1427 flags = mode & MPOL_MODE_FLAGS;
1428 mode &= ~MPOL_MODE_FLAGS;
1429 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1430 return -EINVAL;
4c50bc01
DR
1431 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1432 return -EINVAL;
8bccd85f
CL
1433 err = get_nodes(&nodes, nmask, maxnode);
1434 if (err)
1435 return err;
028fec41 1436 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1437}
1438
938bb9f5
HC
1439SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1440 const unsigned long __user *, old_nodes,
1441 const unsigned long __user *, new_nodes)
39743889 1442{
c69e8d9c 1443 const struct cred *cred = current_cred(), *tcred;
596d7cfa 1444 struct mm_struct *mm = NULL;
39743889 1445 struct task_struct *task;
39743889
CL
1446 nodemask_t task_nodes;
1447 int err;
596d7cfa
KM
1448 nodemask_t *old;
1449 nodemask_t *new;
1450 NODEMASK_SCRATCH(scratch);
1451
1452 if (!scratch)
1453 return -ENOMEM;
39743889 1454
596d7cfa
KM
1455 old = &scratch->mask1;
1456 new = &scratch->mask2;
1457
1458 err = get_nodes(old, old_nodes, maxnode);
39743889 1459 if (err)
596d7cfa 1460 goto out;
39743889 1461
596d7cfa 1462 err = get_nodes(new, new_nodes, maxnode);
39743889 1463 if (err)
596d7cfa 1464 goto out;
39743889
CL
1465
1466 /* Find the mm_struct */
55cfaa3c 1467 rcu_read_lock();
228ebcbe 1468 task = pid ? find_task_by_vpid(pid) : current;
39743889 1469 if (!task) {
55cfaa3c 1470 rcu_read_unlock();
596d7cfa
KM
1471 err = -ESRCH;
1472 goto out;
39743889 1473 }
3268c63e 1474 get_task_struct(task);
39743889 1475
596d7cfa 1476 err = -EINVAL;
39743889
CL
1477
1478 /*
1479 * Check if this process has the right to modify the specified
1480 * process. The right exists if the process has administrative
7f927fcc 1481 * capabilities, superuser privileges or the same
39743889
CL
1482 * userid as the target process.
1483 */
c69e8d9c 1484 tcred = __task_cred(task);
b38a86eb
EB
1485 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1486 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74c00241 1487 !capable(CAP_SYS_NICE)) {
c69e8d9c 1488 rcu_read_unlock();
39743889 1489 err = -EPERM;
3268c63e 1490 goto out_put;
39743889 1491 }
c69e8d9c 1492 rcu_read_unlock();
39743889
CL
1493
1494 task_nodes = cpuset_mems_allowed(task);
1495 /* Is the user allowed to access the target nodes? */
596d7cfa 1496 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1497 err = -EPERM;
3268c63e 1498 goto out_put;
39743889
CL
1499 }
1500
01f13bd6 1501 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1502 err = -EINVAL;
3268c63e 1503 goto out_put;
3b42d28b
CL
1504 }
1505
86c3a764
DQ
1506 err = security_task_movememory(task);
1507 if (err)
3268c63e 1508 goto out_put;
86c3a764 1509
3268c63e
CL
1510 mm = get_task_mm(task);
1511 put_task_struct(task);
f2a9ef88
SL
1512
1513 if (!mm) {
3268c63e 1514 err = -EINVAL;
f2a9ef88
SL
1515 goto out;
1516 }
1517
1518 err = do_migrate_pages(mm, old, new,
1519 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1520
1521 mmput(mm);
1522out:
596d7cfa
KM
1523 NODEMASK_SCRATCH_FREE(scratch);
1524
39743889 1525 return err;
3268c63e
CL
1526
1527out_put:
1528 put_task_struct(task);
1529 goto out;
1530
39743889
CL
1531}
1532
1533
8bccd85f 1534/* Retrieve NUMA policy */
938bb9f5
HC
1535SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1536 unsigned long __user *, nmask, unsigned long, maxnode,
1537 unsigned long, addr, unsigned long, flags)
8bccd85f 1538{
dbcb0f19
AB
1539 int err;
1540 int uninitialized_var(pval);
8bccd85f
CL
1541 nodemask_t nodes;
1542
1543 if (nmask != NULL && maxnode < MAX_NUMNODES)
1544 return -EINVAL;
1545
1546 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1547
1548 if (err)
1549 return err;
1550
1551 if (policy && put_user(pval, policy))
1552 return -EFAULT;
1553
1554 if (nmask)
1555 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1556
1557 return err;
1558}
1559
1da177e4
LT
1560#ifdef CONFIG_COMPAT
1561
1562asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1563 compat_ulong_t __user *nmask,
1564 compat_ulong_t maxnode,
1565 compat_ulong_t addr, compat_ulong_t flags)
1566{
1567 long err;
1568 unsigned long __user *nm = NULL;
1569 unsigned long nr_bits, alloc_size;
1570 DECLARE_BITMAP(bm, MAX_NUMNODES);
1571
1572 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1573 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1574
1575 if (nmask)
1576 nm = compat_alloc_user_space(alloc_size);
1577
1578 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1579
1580 if (!err && nmask) {
2bbff6c7
KH
1581 unsigned long copy_size;
1582 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1583 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1584 /* ensure entire bitmap is zeroed */
1585 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1586 err |= compat_put_bitmap(nmask, bm, nr_bits);
1587 }
1588
1589 return err;
1590}
1591
1592asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1593 compat_ulong_t maxnode)
1594{
1595 long err = 0;
1596 unsigned long __user *nm = NULL;
1597 unsigned long nr_bits, alloc_size;
1598 DECLARE_BITMAP(bm, MAX_NUMNODES);
1599
1600 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1601 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1602
1603 if (nmask) {
1604 err = compat_get_bitmap(bm, nmask, nr_bits);
1605 nm = compat_alloc_user_space(alloc_size);
1606 err |= copy_to_user(nm, bm, alloc_size);
1607 }
1608
1609 if (err)
1610 return -EFAULT;
1611
1612 return sys_set_mempolicy(mode, nm, nr_bits+1);
1613}
1614
1615asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1616 compat_ulong_t mode, compat_ulong_t __user *nmask,
1617 compat_ulong_t maxnode, compat_ulong_t flags)
1618{
1619 long err = 0;
1620 unsigned long __user *nm = NULL;
1621 unsigned long nr_bits, alloc_size;
dfcd3c0d 1622 nodemask_t bm;
1da177e4
LT
1623
1624 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1625 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1626
1627 if (nmask) {
dfcd3c0d 1628 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1629 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1630 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1631 }
1632
1633 if (err)
1634 return -EFAULT;
1635
1636 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1637}
1638
1639#endif
1640
480eccf9
LS
1641/*
1642 * get_vma_policy(@task, @vma, @addr)
1643 * @task - task for fallback if vma policy == default
1644 * @vma - virtual memory area whose policy is sought
1645 * @addr - address in @vma for shared policy lookup
1646 *
1647 * Returns effective policy for a VMA at specified address.
1648 * Falls back to @task or system default policy, as necessary.
32f8516a
DR
1649 * Current or other task's task mempolicy and non-shared vma policies must be
1650 * protected by task_lock(task) by the caller.
52cd3b07
LS
1651 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1652 * count--added by the get_policy() vm_op, as appropriate--to protect against
1653 * freeing by another task. It is the caller's responsibility to free the
1654 * extra reference for shared policies.
480eccf9 1655 */
d98f6cb6 1656struct mempolicy *get_vma_policy(struct task_struct *task,
48fce342 1657 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1658{
5606e387 1659 struct mempolicy *pol = get_task_policy(task);
1da177e4
LT
1660
1661 if (vma) {
480eccf9 1662 if (vma->vm_ops && vma->vm_ops->get_policy) {
ae4d8c16
LS
1663 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1664 addr);
1665 if (vpol)
1666 pol = vpol;
00442ad0 1667 } else if (vma->vm_policy) {
1da177e4 1668 pol = vma->vm_policy;
00442ad0
MG
1669
1670 /*
1671 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1672 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1673 * count on these policies which will be dropped by
1674 * mpol_cond_put() later
1675 */
1676 if (mpol_needs_cond_ref(pol))
1677 mpol_get(pol);
1678 }
1da177e4
LT
1679 }
1680 if (!pol)
1681 pol = &default_policy;
1682 return pol;
1683}
1684
fc314724
MG
1685bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1686{
1687 struct mempolicy *pol = get_task_policy(task);
1688 if (vma) {
1689 if (vma->vm_ops && vma->vm_ops->get_policy) {
1690 bool ret = false;
1691
1692 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1693 if (pol && (pol->flags & MPOL_F_MOF))
1694 ret = true;
1695 mpol_cond_put(pol);
1696
1697 return ret;
1698 } else if (vma->vm_policy) {
1699 pol = vma->vm_policy;
1700 }
1701 }
1702
1703 if (!pol)
1704 return default_policy.flags & MPOL_F_MOF;
1705
1706 return pol->flags & MPOL_F_MOF;
1707}
1708
d3eb1570
LJ
1709static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1710{
1711 enum zone_type dynamic_policy_zone = policy_zone;
1712
1713 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1714
1715 /*
1716 * if policy->v.nodes has movable memory only,
1717 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1718 *
1719 * policy->v.nodes is intersect with node_states[N_MEMORY].
1720 * so if the following test faile, it implies
1721 * policy->v.nodes has movable memory only.
1722 */
1723 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1724 dynamic_policy_zone = ZONE_MOVABLE;
1725
1726 return zone >= dynamic_policy_zone;
1727}
1728
52cd3b07
LS
1729/*
1730 * Return a nodemask representing a mempolicy for filtering nodes for
1731 * page allocation
1732 */
1733static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1734{
1735 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1736 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1737 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1738 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1739 return &policy->v.nodes;
1740
1741 return NULL;
1742}
1743
52cd3b07 1744/* Return a zonelist indicated by gfp for node representing a mempolicy */
2f5f9486
AK
1745static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1746 int nd)
1da177e4 1747{
45c4745a 1748 switch (policy->mode) {
1da177e4 1749 case MPOL_PREFERRED:
fc36b8d3
LS
1750 if (!(policy->flags & MPOL_F_LOCAL))
1751 nd = policy->v.preferred_node;
1da177e4
LT
1752 break;
1753 case MPOL_BIND:
19770b32 1754 /*
52cd3b07
LS
1755 * Normally, MPOL_BIND allocations are node-local within the
1756 * allowed nodemask. However, if __GFP_THISNODE is set and the
6eb27e1f 1757 * current node isn't part of the mask, we use the zonelist for
52cd3b07 1758 * the first node in the mask instead.
19770b32 1759 */
19770b32
MG
1760 if (unlikely(gfp & __GFP_THISNODE) &&
1761 unlikely(!node_isset(nd, policy->v.nodes)))
1762 nd = first_node(policy->v.nodes);
1763 break;
1da177e4 1764 default:
1da177e4
LT
1765 BUG();
1766 }
0e88460d 1767 return node_zonelist(nd, gfp);
1da177e4
LT
1768}
1769
1770/* Do dynamic interleaving for a process */
1771static unsigned interleave_nodes(struct mempolicy *policy)
1772{
1773 unsigned nid, next;
1774 struct task_struct *me = current;
1775
1776 nid = me->il_next;
dfcd3c0d 1777 next = next_node(nid, policy->v.nodes);
1da177e4 1778 if (next >= MAX_NUMNODES)
dfcd3c0d 1779 next = first_node(policy->v.nodes);
f5b087b5
DR
1780 if (next < MAX_NUMNODES)
1781 me->il_next = next;
1da177e4
LT
1782 return nid;
1783}
1784
dc85da15
CL
1785/*
1786 * Depending on the memory policy provide a node from which to allocate the
1787 * next slab entry.
52cd3b07
LS
1788 * @policy must be protected by freeing by the caller. If @policy is
1789 * the current task's mempolicy, this protection is implicit, as only the
1790 * task can change it's policy. The system default policy requires no
1791 * such protection.
dc85da15 1792 */
e7b691b0 1793unsigned slab_node(void)
dc85da15 1794{
e7b691b0
AK
1795 struct mempolicy *policy;
1796
1797 if (in_interrupt())
1798 return numa_node_id();
1799
1800 policy = current->mempolicy;
fc36b8d3 1801 if (!policy || policy->flags & MPOL_F_LOCAL)
bea904d5
LS
1802 return numa_node_id();
1803
1804 switch (policy->mode) {
1805 case MPOL_PREFERRED:
fc36b8d3
LS
1806 /*
1807 * handled MPOL_F_LOCAL above
1808 */
1809 return policy->v.preferred_node;
765c4507 1810
dc85da15
CL
1811 case MPOL_INTERLEAVE:
1812 return interleave_nodes(policy);
1813
dd1a239f 1814 case MPOL_BIND: {
dc85da15
CL
1815 /*
1816 * Follow bind policy behavior and start allocation at the
1817 * first node.
1818 */
19770b32
MG
1819 struct zonelist *zonelist;
1820 struct zone *zone;
1821 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1822 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1823 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1824 &policy->v.nodes,
1825 &zone);
800416f7 1826 return zone ? zone->node : numa_node_id();
dd1a239f 1827 }
dc85da15 1828
dc85da15 1829 default:
bea904d5 1830 BUG();
dc85da15
CL
1831 }
1832}
1833
1da177e4
LT
1834/* Do static interleaving for a VMA with known offset. */
1835static unsigned offset_il_node(struct mempolicy *pol,
1836 struct vm_area_struct *vma, unsigned long off)
1837{
dfcd3c0d 1838 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1839 unsigned target;
1da177e4 1840 int c;
b76ac7e7 1841 int nid = NUMA_NO_NODE;
1da177e4 1842
f5b087b5
DR
1843 if (!nnodes)
1844 return numa_node_id();
1845 target = (unsigned int)off % nnodes;
1da177e4
LT
1846 c = 0;
1847 do {
dfcd3c0d 1848 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1849 c++;
1850 } while (c <= target);
1da177e4
LT
1851 return nid;
1852}
1853
5da7ca86
CL
1854/* Determine a node number for interleave */
1855static inline unsigned interleave_nid(struct mempolicy *pol,
1856 struct vm_area_struct *vma, unsigned long addr, int shift)
1857{
1858 if (vma) {
1859 unsigned long off;
1860
3b98b087
NA
1861 /*
1862 * for small pages, there is no difference between
1863 * shift and PAGE_SHIFT, so the bit-shift is safe.
1864 * for huge pages, since vm_pgoff is in units of small
1865 * pages, we need to shift off the always 0 bits to get
1866 * a useful offset.
1867 */
1868 BUG_ON(shift < PAGE_SHIFT);
1869 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1870 off += (addr - vma->vm_start) >> shift;
1871 return offset_il_node(pol, vma, off);
1872 } else
1873 return interleave_nodes(pol);
1874}
1875
778d3b0f
MH
1876/*
1877 * Return the bit number of a random bit set in the nodemask.
b76ac7e7 1878 * (returns NUMA_NO_NODE if nodemask is empty)
778d3b0f
MH
1879 */
1880int node_random(const nodemask_t *maskp)
1881{
b76ac7e7 1882 int w, bit = NUMA_NO_NODE;
778d3b0f
MH
1883
1884 w = nodes_weight(*maskp);
1885 if (w)
1886 bit = bitmap_ord_to_pos(maskp->bits,
1887 get_random_int() % w, MAX_NUMNODES);
1888 return bit;
1889}
1890
00ac59ad 1891#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1892/*
1893 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1894 * @vma = virtual memory area whose policy is sought
1895 * @addr = address in @vma for shared policy lookup and interleave policy
1896 * @gfp_flags = for requested zone
19770b32
MG
1897 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1898 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1899 *
52cd3b07
LS
1900 * Returns a zonelist suitable for a huge page allocation and a pointer
1901 * to the struct mempolicy for conditional unref after allocation.
1902 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1903 * @nodemask for filtering the zonelist.
c0ff7453
MX
1904 *
1905 * Must be protected by get_mems_allowed()
480eccf9 1906 */
396faf03 1907struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1908 gfp_t gfp_flags, struct mempolicy **mpol,
1909 nodemask_t **nodemask)
5da7ca86 1910{
480eccf9 1911 struct zonelist *zl;
5da7ca86 1912
52cd3b07 1913 *mpol = get_vma_policy(current, vma, addr);
19770b32 1914 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1915
52cd3b07
LS
1916 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1917 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1918 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07 1919 } else {
2f5f9486 1920 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1921 if ((*mpol)->mode == MPOL_BIND)
1922 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1923 }
1924 return zl;
5da7ca86 1925}
06808b08
LS
1926
1927/*
1928 * init_nodemask_of_mempolicy
1929 *
1930 * If the current task's mempolicy is "default" [NULL], return 'false'
1931 * to indicate default policy. Otherwise, extract the policy nodemask
1932 * for 'bind' or 'interleave' policy into the argument nodemask, or
1933 * initialize the argument nodemask to contain the single node for
1934 * 'preferred' or 'local' policy and return 'true' to indicate presence
1935 * of non-default mempolicy.
1936 *
1937 * We don't bother with reference counting the mempolicy [mpol_get/put]
1938 * because the current task is examining it's own mempolicy and a task's
1939 * mempolicy is only ever changed by the task itself.
1940 *
1941 * N.B., it is the caller's responsibility to free a returned nodemask.
1942 */
1943bool init_nodemask_of_mempolicy(nodemask_t *mask)
1944{
1945 struct mempolicy *mempolicy;
1946 int nid;
1947
1948 if (!(mask && current->mempolicy))
1949 return false;
1950
c0ff7453 1951 task_lock(current);
06808b08
LS
1952 mempolicy = current->mempolicy;
1953 switch (mempolicy->mode) {
1954 case MPOL_PREFERRED:
1955 if (mempolicy->flags & MPOL_F_LOCAL)
1956 nid = numa_node_id();
1957 else
1958 nid = mempolicy->v.preferred_node;
1959 init_nodemask_of_node(mask, nid);
1960 break;
1961
1962 case MPOL_BIND:
1963 /* Fall through */
1964 case MPOL_INTERLEAVE:
1965 *mask = mempolicy->v.nodes;
1966 break;
1967
1968 default:
1969 BUG();
1970 }
c0ff7453 1971 task_unlock(current);
06808b08
LS
1972
1973 return true;
1974}
00ac59ad 1975#endif
5da7ca86 1976
6f48d0eb
DR
1977/*
1978 * mempolicy_nodemask_intersects
1979 *
1980 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1981 * policy. Otherwise, check for intersection between mask and the policy
1982 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1983 * policy, always return true since it may allocate elsewhere on fallback.
1984 *
1985 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1986 */
1987bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1988 const nodemask_t *mask)
1989{
1990 struct mempolicy *mempolicy;
1991 bool ret = true;
1992
1993 if (!mask)
1994 return ret;
1995 task_lock(tsk);
1996 mempolicy = tsk->mempolicy;
1997 if (!mempolicy)
1998 goto out;
1999
2000 switch (mempolicy->mode) {
2001 case MPOL_PREFERRED:
2002 /*
2003 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2004 * allocate from, they may fallback to other nodes when oom.
2005 * Thus, it's possible for tsk to have allocated memory from
2006 * nodes in mask.
2007 */
2008 break;
2009 case MPOL_BIND:
2010 case MPOL_INTERLEAVE:
2011 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2012 break;
2013 default:
2014 BUG();
2015 }
2016out:
2017 task_unlock(tsk);
2018 return ret;
2019}
2020
1da177e4
LT
2021/* Allocate a page in interleaved policy.
2022 Own path because it needs to do special accounting. */
662f3a0b
AK
2023static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2024 unsigned nid)
1da177e4
LT
2025{
2026 struct zonelist *zl;
2027 struct page *page;
2028
0e88460d 2029 zl = node_zonelist(nid, gfp);
1da177e4 2030 page = __alloc_pages(gfp, order, zl);
dd1a239f 2031 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 2032 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
2033 return page;
2034}
2035
2036/**
0bbbc0b3 2037 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
2038 *
2039 * @gfp:
2040 * %GFP_USER user allocation.
2041 * %GFP_KERNEL kernel allocations,
2042 * %GFP_HIGHMEM highmem/user allocations,
2043 * %GFP_FS allocation should not call back into a file system.
2044 * %GFP_ATOMIC don't sleep.
2045 *
0bbbc0b3 2046 * @order:Order of the GFP allocation.
1da177e4
LT
2047 * @vma: Pointer to VMA or NULL if not available.
2048 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2049 *
2050 * This function allocates a page from the kernel page pool and applies
2051 * a NUMA policy associated with the VMA or the current process.
2052 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2053 * mm_struct of the VMA to prevent it from going away. Should be used for
2054 * all allocations for pages that will be mapped into
2055 * user space. Returns NULL when no page can be allocated.
2056 *
2057 * Should be called with the mm_sem of the vma hold.
2058 */
2059struct page *
0bbbc0b3 2060alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2f5f9486 2061 unsigned long addr, int node)
1da177e4 2062{
cc9a6c87 2063 struct mempolicy *pol;
c0ff7453 2064 struct page *page;
cc9a6c87
MG
2065 unsigned int cpuset_mems_cookie;
2066
2067retry_cpuset:
2068 pol = get_vma_policy(current, vma, addr);
2069 cpuset_mems_cookie = get_mems_allowed();
1da177e4 2070
45c4745a 2071 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1da177e4 2072 unsigned nid;
5da7ca86 2073
8eac563c 2074 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
52cd3b07 2075 mpol_cond_put(pol);
0bbbc0b3 2076 page = alloc_page_interleave(gfp, order, nid);
cc9a6c87
MG
2077 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2078 goto retry_cpuset;
2079
c0ff7453 2080 return page;
1da177e4 2081 }
212a0a6f
DR
2082 page = __alloc_pages_nodemask(gfp, order,
2083 policy_zonelist(gfp, pol, node),
0bbbc0b3 2084 policy_nodemask(gfp, pol));
212a0a6f
DR
2085 if (unlikely(mpol_needs_cond_ref(pol)))
2086 __mpol_put(pol);
cc9a6c87
MG
2087 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2088 goto retry_cpuset;
c0ff7453 2089 return page;
1da177e4
LT
2090}
2091
2092/**
2093 * alloc_pages_current - Allocate pages.
2094 *
2095 * @gfp:
2096 * %GFP_USER user allocation,
2097 * %GFP_KERNEL kernel allocation,
2098 * %GFP_HIGHMEM highmem allocation,
2099 * %GFP_FS don't call back into a file system.
2100 * %GFP_ATOMIC don't sleep.
2101 * @order: Power of two of allocation size in pages. 0 is a single page.
2102 *
2103 * Allocate a page from the kernel page pool. When not in
2104 * interrupt context and apply the current process NUMA policy.
2105 * Returns NULL when no page can be allocated.
2106 *
cf2a473c 2107 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
2108 * 1) it's ok to take cpuset_sem (can WAIT), and
2109 * 2) allocating for current task (not interrupt).
2110 */
dd0fc66f 2111struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2112{
5606e387 2113 struct mempolicy *pol = get_task_policy(current);
c0ff7453 2114 struct page *page;
cc9a6c87 2115 unsigned int cpuset_mems_cookie;
1da177e4 2116
9b819d20 2117 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4 2118 pol = &default_policy;
52cd3b07 2119
cc9a6c87
MG
2120retry_cpuset:
2121 cpuset_mems_cookie = get_mems_allowed();
2122
52cd3b07
LS
2123 /*
2124 * No reference counting needed for current->mempolicy
2125 * nor system default_policy
2126 */
45c4745a 2127 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2128 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2129 else
2130 page = __alloc_pages_nodemask(gfp, order,
5c4b4be3
AK
2131 policy_zonelist(gfp, pol, numa_node_id()),
2132 policy_nodemask(gfp, pol));
cc9a6c87
MG
2133
2134 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2135 goto retry_cpuset;
2136
c0ff7453 2137 return page;
1da177e4
LT
2138}
2139EXPORT_SYMBOL(alloc_pages_current);
2140
ef0855d3
ON
2141int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2142{
2143 struct mempolicy *pol = mpol_dup(vma_policy(src));
2144
2145 if (IS_ERR(pol))
2146 return PTR_ERR(pol);
2147 dst->vm_policy = pol;
2148 return 0;
2149}
2150
4225399a 2151/*
846a16bf 2152 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2153 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2154 * with the mems_allowed returned by cpuset_mems_allowed(). This
2155 * keeps mempolicies cpuset relative after its cpuset moves. See
2156 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2157 *
2158 * current's mempolicy may be rebinded by the other task(the task that changes
2159 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2160 */
4225399a 2161
846a16bf
LS
2162/* Slow path of a mempolicy duplicate */
2163struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2164{
2165 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2166
2167 if (!new)
2168 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2169
2170 /* task's mempolicy is protected by alloc_lock */
2171 if (old == current->mempolicy) {
2172 task_lock(current);
2173 *new = *old;
2174 task_unlock(current);
2175 } else
2176 *new = *old;
2177
99ee4ca7 2178 rcu_read_lock();
4225399a
PJ
2179 if (current_cpuset_is_being_rebound()) {
2180 nodemask_t mems = cpuset_mems_allowed(current);
708c1bbc
MX
2181 if (new->flags & MPOL_F_REBINDING)
2182 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2183 else
2184 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
4225399a 2185 }
99ee4ca7 2186 rcu_read_unlock();
1da177e4 2187 atomic_set(&new->refcnt, 1);
1da177e4
LT
2188 return new;
2189}
2190
2191/* Slow path of a mempolicy comparison */
fcfb4dcc 2192bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2193{
2194 if (!a || !b)
fcfb4dcc 2195 return false;
45c4745a 2196 if (a->mode != b->mode)
fcfb4dcc 2197 return false;
19800502 2198 if (a->flags != b->flags)
fcfb4dcc 2199 return false;
19800502
BL
2200 if (mpol_store_user_nodemask(a))
2201 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2202 return false;
19800502 2203
45c4745a 2204 switch (a->mode) {
19770b32
MG
2205 case MPOL_BIND:
2206 /* Fall through */
1da177e4 2207 case MPOL_INTERLEAVE:
fcfb4dcc 2208 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2209 case MPOL_PREFERRED:
75719661 2210 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2211 default:
2212 BUG();
fcfb4dcc 2213 return false;
1da177e4
LT
2214 }
2215}
2216
1da177e4
LT
2217/*
2218 * Shared memory backing store policy support.
2219 *
2220 * Remember policies even when nobody has shared memory mapped.
2221 * The policies are kept in Red-Black tree linked from the inode.
2222 * They are protected by the sp->lock spinlock, which should be held
2223 * for any accesses to the tree.
2224 */
2225
2226/* lookup first element intersecting start-end */
42288fe3 2227/* Caller holds sp->lock */
1da177e4
LT
2228static struct sp_node *
2229sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2230{
2231 struct rb_node *n = sp->root.rb_node;
2232
2233 while (n) {
2234 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2235
2236 if (start >= p->end)
2237 n = n->rb_right;
2238 else if (end <= p->start)
2239 n = n->rb_left;
2240 else
2241 break;
2242 }
2243 if (!n)
2244 return NULL;
2245 for (;;) {
2246 struct sp_node *w = NULL;
2247 struct rb_node *prev = rb_prev(n);
2248 if (!prev)
2249 break;
2250 w = rb_entry(prev, struct sp_node, nd);
2251 if (w->end <= start)
2252 break;
2253 n = prev;
2254 }
2255 return rb_entry(n, struct sp_node, nd);
2256}
2257
2258/* Insert a new shared policy into the list. */
2259/* Caller holds sp->lock */
2260static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2261{
2262 struct rb_node **p = &sp->root.rb_node;
2263 struct rb_node *parent = NULL;
2264 struct sp_node *nd;
2265
2266 while (*p) {
2267 parent = *p;
2268 nd = rb_entry(parent, struct sp_node, nd);
2269 if (new->start < nd->start)
2270 p = &(*p)->rb_left;
2271 else if (new->end > nd->end)
2272 p = &(*p)->rb_right;
2273 else
2274 BUG();
2275 }
2276 rb_link_node(&new->nd, parent, p);
2277 rb_insert_color(&new->nd, &sp->root);
140d5a49 2278 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2279 new->policy ? new->policy->mode : 0);
1da177e4
LT
2280}
2281
2282/* Find shared policy intersecting idx */
2283struct mempolicy *
2284mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2285{
2286 struct mempolicy *pol = NULL;
2287 struct sp_node *sn;
2288
2289 if (!sp->root.rb_node)
2290 return NULL;
42288fe3 2291 spin_lock(&sp->lock);
1da177e4
LT
2292 sn = sp_lookup(sp, idx, idx+1);
2293 if (sn) {
2294 mpol_get(sn->policy);
2295 pol = sn->policy;
2296 }
42288fe3 2297 spin_unlock(&sp->lock);
1da177e4
LT
2298 return pol;
2299}
2300
63f74ca2
KM
2301static void sp_free(struct sp_node *n)
2302{
2303 mpol_put(n->policy);
2304 kmem_cache_free(sn_cache, n);
2305}
2306
771fb4d8
LS
2307/**
2308 * mpol_misplaced - check whether current page node is valid in policy
2309 *
2310 * @page - page to be checked
2311 * @vma - vm area where page mapped
2312 * @addr - virtual address where page mapped
2313 *
2314 * Lookup current policy node id for vma,addr and "compare to" page's
2315 * node id.
2316 *
2317 * Returns:
2318 * -1 - not misplaced, page is in the right node
2319 * node - node id where the page should be
2320 *
2321 * Policy determination "mimics" alloc_page_vma().
2322 * Called from fault path where we know the vma and faulting address.
2323 */
2324int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2325{
2326 struct mempolicy *pol;
2327 struct zone *zone;
2328 int curnid = page_to_nid(page);
2329 unsigned long pgoff;
90572890
PZ
2330 int thiscpu = raw_smp_processor_id();
2331 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2332 int polnid = -1;
2333 int ret = -1;
2334
2335 BUG_ON(!vma);
2336
2337 pol = get_vma_policy(current, vma, addr);
2338 if (!(pol->flags & MPOL_F_MOF))
2339 goto out;
2340
2341 switch (pol->mode) {
2342 case MPOL_INTERLEAVE:
2343 BUG_ON(addr >= vma->vm_end);
2344 BUG_ON(addr < vma->vm_start);
2345
2346 pgoff = vma->vm_pgoff;
2347 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2348 polnid = offset_il_node(pol, vma, pgoff);
2349 break;
2350
2351 case MPOL_PREFERRED:
2352 if (pol->flags & MPOL_F_LOCAL)
2353 polnid = numa_node_id();
2354 else
2355 polnid = pol->v.preferred_node;
2356 break;
2357
2358 case MPOL_BIND:
2359 /*
2360 * allows binding to multiple nodes.
2361 * use current page if in policy nodemask,
2362 * else select nearest allowed node, if any.
2363 * If no allowed nodes, use current [!misplaced].
2364 */
2365 if (node_isset(curnid, pol->v.nodes))
2366 goto out;
2367 (void)first_zones_zonelist(
2368 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2369 gfp_zone(GFP_HIGHUSER),
2370 &pol->v.nodes, &zone);
2371 polnid = zone->node;
2372 break;
2373
2374 default:
2375 BUG();
2376 }
5606e387
MG
2377
2378 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2379 if (pol->flags & MPOL_F_MORON) {
90572890
PZ
2380 int last_cpupid;
2381 int this_cpupid;
e42c8ff2 2382
90572890
PZ
2383 polnid = thisnid;
2384 this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
5606e387 2385
e42c8ff2
MG
2386 /*
2387 * Multi-stage node selection is used in conjunction
2388 * with a periodic migration fault to build a temporal
2389 * task<->page relation. By using a two-stage filter we
2390 * remove short/unlikely relations.
2391 *
2392 * Using P(p) ~ n_p / n_t as per frequentist
2393 * probability, we can equate a task's usage of a
2394 * particular page (n_p) per total usage of this
2395 * page (n_t) (in a given time-span) to a probability.
2396 *
2397 * Our periodic faults will sample this probability and
2398 * getting the same result twice in a row, given these
2399 * samples are fully independent, is then given by
2400 * P(n)^2, provided our sample period is sufficiently
2401 * short compared to the usage pattern.
2402 *
2403 * This quadric squishes small probabilities, making
2404 * it less likely we act on an unlikely task<->page
2405 * relation.
2406 */
90572890 2407 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
de1c9ce6 2408 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
de1c9ce6
RR
2409 goto out;
2410 }
e42c8ff2
MG
2411 }
2412
771fb4d8
LS
2413 if (curnid != polnid)
2414 ret = polnid;
2415out:
2416 mpol_cond_put(pol);
2417
2418 return ret;
2419}
2420
1da177e4
LT
2421static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2422{
140d5a49 2423 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2424 rb_erase(&n->nd, &sp->root);
63f74ca2 2425 sp_free(n);
1da177e4
LT
2426}
2427
42288fe3
MG
2428static void sp_node_init(struct sp_node *node, unsigned long start,
2429 unsigned long end, struct mempolicy *pol)
2430{
2431 node->start = start;
2432 node->end = end;
2433 node->policy = pol;
2434}
2435
dbcb0f19
AB
2436static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2437 struct mempolicy *pol)
1da177e4 2438{
869833f2
KM
2439 struct sp_node *n;
2440 struct mempolicy *newpol;
1da177e4 2441
869833f2 2442 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2443 if (!n)
2444 return NULL;
869833f2
KM
2445
2446 newpol = mpol_dup(pol);
2447 if (IS_ERR(newpol)) {
2448 kmem_cache_free(sn_cache, n);
2449 return NULL;
2450 }
2451 newpol->flags |= MPOL_F_SHARED;
42288fe3 2452 sp_node_init(n, start, end, newpol);
869833f2 2453
1da177e4
LT
2454 return n;
2455}
2456
2457/* Replace a policy range. */
2458static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2459 unsigned long end, struct sp_node *new)
2460{
b22d127a 2461 struct sp_node *n;
42288fe3
MG
2462 struct sp_node *n_new = NULL;
2463 struct mempolicy *mpol_new = NULL;
b22d127a 2464 int ret = 0;
1da177e4 2465
42288fe3
MG
2466restart:
2467 spin_lock(&sp->lock);
1da177e4
LT
2468 n = sp_lookup(sp, start, end);
2469 /* Take care of old policies in the same range. */
2470 while (n && n->start < end) {
2471 struct rb_node *next = rb_next(&n->nd);
2472 if (n->start >= start) {
2473 if (n->end <= end)
2474 sp_delete(sp, n);
2475 else
2476 n->start = end;
2477 } else {
2478 /* Old policy spanning whole new range. */
2479 if (n->end > end) {
42288fe3
MG
2480 if (!n_new)
2481 goto alloc_new;
2482
2483 *mpol_new = *n->policy;
2484 atomic_set(&mpol_new->refcnt, 1);
7880639c 2485 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2486 n->end = start;
5ca39575 2487 sp_insert(sp, n_new);
42288fe3
MG
2488 n_new = NULL;
2489 mpol_new = NULL;
1da177e4
LT
2490 break;
2491 } else
2492 n->end = start;
2493 }
2494 if (!next)
2495 break;
2496 n = rb_entry(next, struct sp_node, nd);
2497 }
2498 if (new)
2499 sp_insert(sp, new);
42288fe3
MG
2500 spin_unlock(&sp->lock);
2501 ret = 0;
2502
2503err_out:
2504 if (mpol_new)
2505 mpol_put(mpol_new);
2506 if (n_new)
2507 kmem_cache_free(sn_cache, n_new);
2508
b22d127a 2509 return ret;
42288fe3
MG
2510
2511alloc_new:
2512 spin_unlock(&sp->lock);
2513 ret = -ENOMEM;
2514 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2515 if (!n_new)
2516 goto err_out;
2517 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2518 if (!mpol_new)
2519 goto err_out;
2520 goto restart;
1da177e4
LT
2521}
2522
71fe804b
LS
2523/**
2524 * mpol_shared_policy_init - initialize shared policy for inode
2525 * @sp: pointer to inode shared policy
2526 * @mpol: struct mempolicy to install
2527 *
2528 * Install non-NULL @mpol in inode's shared policy rb-tree.
2529 * On entry, the current task has a reference on a non-NULL @mpol.
2530 * This must be released on exit.
4bfc4495 2531 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2532 */
2533void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2534{
58568d2a
MX
2535 int ret;
2536
71fe804b 2537 sp->root = RB_ROOT; /* empty tree == default mempolicy */
42288fe3 2538 spin_lock_init(&sp->lock);
71fe804b
LS
2539
2540 if (mpol) {
2541 struct vm_area_struct pvma;
2542 struct mempolicy *new;
4bfc4495 2543 NODEMASK_SCRATCH(scratch);
71fe804b 2544
4bfc4495 2545 if (!scratch)
5c0c1654 2546 goto put_mpol;
71fe804b
LS
2547 /* contextualize the tmpfs mount point mempolicy */
2548 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2549 if (IS_ERR(new))
0cae3457 2550 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2551
2552 task_lock(current);
4bfc4495 2553 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2554 task_unlock(current);
15d77835 2555 if (ret)
5c0c1654 2556 goto put_new;
71fe804b
LS
2557
2558 /* Create pseudo-vma that contains just the policy */
2559 memset(&pvma, 0, sizeof(struct vm_area_struct));
2560 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2561 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2562
5c0c1654 2563put_new:
71fe804b 2564 mpol_put(new); /* drop initial ref */
0cae3457 2565free_scratch:
4bfc4495 2566 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2567put_mpol:
2568 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2569 }
2570}
2571
1da177e4
LT
2572int mpol_set_shared_policy(struct shared_policy *info,
2573 struct vm_area_struct *vma, struct mempolicy *npol)
2574{
2575 int err;
2576 struct sp_node *new = NULL;
2577 unsigned long sz = vma_pages(vma);
2578
028fec41 2579 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2580 vma->vm_pgoff,
45c4745a 2581 sz, npol ? npol->mode : -1,
028fec41 2582 npol ? npol->flags : -1,
00ef2d2f 2583 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2584
2585 if (npol) {
2586 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2587 if (!new)
2588 return -ENOMEM;
2589 }
2590 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2591 if (err && new)
63f74ca2 2592 sp_free(new);
1da177e4
LT
2593 return err;
2594}
2595
2596/* Free a backing policy store on inode delete. */
2597void mpol_free_shared_policy(struct shared_policy *p)
2598{
2599 struct sp_node *n;
2600 struct rb_node *next;
2601
2602 if (!p->root.rb_node)
2603 return;
42288fe3 2604 spin_lock(&p->lock);
1da177e4
LT
2605 next = rb_first(&p->root);
2606 while (next) {
2607 n = rb_entry(next, struct sp_node, nd);
2608 next = rb_next(&n->nd);
63f74ca2 2609 sp_delete(p, n);
1da177e4 2610 }
42288fe3 2611 spin_unlock(&p->lock);
1da177e4
LT
2612}
2613
1a687c2e
MG
2614#ifdef CONFIG_NUMA_BALANCING
2615static bool __initdata numabalancing_override;
2616
2617static void __init check_numabalancing_enable(void)
2618{
2619 bool numabalancing_default = false;
2620
2621 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2622 numabalancing_default = true;
2623
2624 if (nr_node_ids > 1 && !numabalancing_override) {
2625 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2626 "Configure with numa_balancing= or sysctl");
2627 set_numabalancing_state(numabalancing_default);
2628 }
2629}
2630
2631static int __init setup_numabalancing(char *str)
2632{
2633 int ret = 0;
2634 if (!str)
2635 goto out;
2636 numabalancing_override = true;
2637
2638 if (!strcmp(str, "enable")) {
2639 set_numabalancing_state(true);
2640 ret = 1;
2641 } else if (!strcmp(str, "disable")) {
2642 set_numabalancing_state(false);
2643 ret = 1;
2644 }
2645out:
2646 if (!ret)
2647 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2648
2649 return ret;
2650}
2651__setup("numa_balancing=", setup_numabalancing);
2652#else
2653static inline void __init check_numabalancing_enable(void)
2654{
2655}
2656#endif /* CONFIG_NUMA_BALANCING */
2657
1da177e4
LT
2658/* assumes fs == KERNEL_DS */
2659void __init numa_policy_init(void)
2660{
b71636e2
PM
2661 nodemask_t interleave_nodes;
2662 unsigned long largest = 0;
2663 int nid, prefer = 0;
2664
1da177e4
LT
2665 policy_cache = kmem_cache_create("numa_policy",
2666 sizeof(struct mempolicy),
20c2df83 2667 0, SLAB_PANIC, NULL);
1da177e4
LT
2668
2669 sn_cache = kmem_cache_create("shared_policy_node",
2670 sizeof(struct sp_node),
20c2df83 2671 0, SLAB_PANIC, NULL);
1da177e4 2672
5606e387
MG
2673 for_each_node(nid) {
2674 preferred_node_policy[nid] = (struct mempolicy) {
2675 .refcnt = ATOMIC_INIT(1),
2676 .mode = MPOL_PREFERRED,
2677 .flags = MPOL_F_MOF | MPOL_F_MORON,
2678 .v = { .preferred_node = nid, },
2679 };
2680 }
2681
b71636e2
PM
2682 /*
2683 * Set interleaving policy for system init. Interleaving is only
2684 * enabled across suitably sized nodes (default is >= 16MB), or
2685 * fall back to the largest node if they're all smaller.
2686 */
2687 nodes_clear(interleave_nodes);
01f13bd6 2688 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2689 unsigned long total_pages = node_present_pages(nid);
2690
2691 /* Preserve the largest node */
2692 if (largest < total_pages) {
2693 largest = total_pages;
2694 prefer = nid;
2695 }
2696
2697 /* Interleave this node? */
2698 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2699 node_set(nid, interleave_nodes);
2700 }
2701
2702 /* All too small, use the largest */
2703 if (unlikely(nodes_empty(interleave_nodes)))
2704 node_set(prefer, interleave_nodes);
1da177e4 2705
028fec41 2706 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4 2707 printk("numa_policy_init: interleaving failed\n");
1a687c2e
MG
2708
2709 check_numabalancing_enable();
1da177e4
LT
2710}
2711
8bccd85f 2712/* Reset policy of current process to default */
1da177e4
LT
2713void numa_default_policy(void)
2714{
028fec41 2715 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2716}
68860ec1 2717
095f1fc4
LS
2718/*
2719 * Parse and format mempolicy from/to strings
2720 */
2721
1a75a6c8 2722/*
f2a07f40 2723 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2724 */
345ace9c
LS
2725static const char * const policy_modes[] =
2726{
2727 [MPOL_DEFAULT] = "default",
2728 [MPOL_PREFERRED] = "prefer",
2729 [MPOL_BIND] = "bind",
2730 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2731 [MPOL_LOCAL] = "local",
345ace9c 2732};
1a75a6c8 2733
095f1fc4
LS
2734
2735#ifdef CONFIG_TMPFS
2736/**
f2a07f40 2737 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2738 * @str: string containing mempolicy to parse
71fe804b 2739 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2740 *
2741 * Format of input:
2742 * <mode>[=<flags>][:<nodelist>]
2743 *
71fe804b 2744 * On success, returns 0, else 1
095f1fc4 2745 */
a7a88b23 2746int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2747{
71fe804b 2748 struct mempolicy *new = NULL;
b4652e84 2749 unsigned short mode;
f2a07f40 2750 unsigned short mode_flags;
71fe804b 2751 nodemask_t nodes;
095f1fc4
LS
2752 char *nodelist = strchr(str, ':');
2753 char *flags = strchr(str, '=');
095f1fc4
LS
2754 int err = 1;
2755
2756 if (nodelist) {
2757 /* NUL-terminate mode or flags string */
2758 *nodelist++ = '\0';
71fe804b 2759 if (nodelist_parse(nodelist, nodes))
095f1fc4 2760 goto out;
01f13bd6 2761 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2762 goto out;
71fe804b
LS
2763 } else
2764 nodes_clear(nodes);
2765
095f1fc4
LS
2766 if (flags)
2767 *flags++ = '\0'; /* terminate mode string */
2768
479e2802 2769 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2770 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2771 break;
2772 }
2773 }
a720094d 2774 if (mode >= MPOL_MAX)
095f1fc4
LS
2775 goto out;
2776
71fe804b 2777 switch (mode) {
095f1fc4 2778 case MPOL_PREFERRED:
71fe804b
LS
2779 /*
2780 * Insist on a nodelist of one node only
2781 */
095f1fc4
LS
2782 if (nodelist) {
2783 char *rest = nodelist;
2784 while (isdigit(*rest))
2785 rest++;
926f2ae0
KM
2786 if (*rest)
2787 goto out;
095f1fc4
LS
2788 }
2789 break;
095f1fc4
LS
2790 case MPOL_INTERLEAVE:
2791 /*
2792 * Default to online nodes with memory if no nodelist
2793 */
2794 if (!nodelist)
01f13bd6 2795 nodes = node_states[N_MEMORY];
3f226aa1 2796 break;
71fe804b 2797 case MPOL_LOCAL:
3f226aa1 2798 /*
71fe804b 2799 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2800 */
71fe804b 2801 if (nodelist)
3f226aa1 2802 goto out;
71fe804b 2803 mode = MPOL_PREFERRED;
3f226aa1 2804 break;
413b43de
RT
2805 case MPOL_DEFAULT:
2806 /*
2807 * Insist on a empty nodelist
2808 */
2809 if (!nodelist)
2810 err = 0;
2811 goto out;
d69b2e63
KM
2812 case MPOL_BIND:
2813 /*
2814 * Insist on a nodelist
2815 */
2816 if (!nodelist)
2817 goto out;
095f1fc4
LS
2818 }
2819
71fe804b 2820 mode_flags = 0;
095f1fc4
LS
2821 if (flags) {
2822 /*
2823 * Currently, we only support two mutually exclusive
2824 * mode flags.
2825 */
2826 if (!strcmp(flags, "static"))
71fe804b 2827 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2828 else if (!strcmp(flags, "relative"))
71fe804b 2829 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2830 else
926f2ae0 2831 goto out;
095f1fc4 2832 }
71fe804b
LS
2833
2834 new = mpol_new(mode, mode_flags, &nodes);
2835 if (IS_ERR(new))
926f2ae0
KM
2836 goto out;
2837
f2a07f40
HD
2838 /*
2839 * Save nodes for mpol_to_str() to show the tmpfs mount options
2840 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2841 */
2842 if (mode != MPOL_PREFERRED)
2843 new->v.nodes = nodes;
2844 else if (nodelist)
2845 new->v.preferred_node = first_node(nodes);
2846 else
2847 new->flags |= MPOL_F_LOCAL;
2848
2849 /*
2850 * Save nodes for contextualization: this will be used to "clone"
2851 * the mempolicy in a specific context [cpuset] at a later time.
2852 */
2853 new->w.user_nodemask = nodes;
2854
926f2ae0 2855 err = 0;
71fe804b 2856
095f1fc4
LS
2857out:
2858 /* Restore string for error message */
2859 if (nodelist)
2860 *--nodelist = ':';
2861 if (flags)
2862 *--flags = '=';
71fe804b
LS
2863 if (!err)
2864 *mpol = new;
095f1fc4
LS
2865 return err;
2866}
2867#endif /* CONFIG_TMPFS */
2868
71fe804b
LS
2869/**
2870 * mpol_to_str - format a mempolicy structure for printing
2871 * @buffer: to contain formatted mempolicy string
2872 * @maxlen: length of @buffer
2873 * @pol: pointer to mempolicy to be formatted
71fe804b 2874 *
948927ee
DR
2875 * Convert @pol into a string. If @buffer is too short, truncate the string.
2876 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2877 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2878 */
948927ee 2879void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2880{
2881 char *p = buffer;
948927ee
DR
2882 nodemask_t nodes = NODE_MASK_NONE;
2883 unsigned short mode = MPOL_DEFAULT;
2884 unsigned short flags = 0;
2291990a 2885
948927ee 2886 if (pol && pol != &default_policy) {
bea904d5 2887 mode = pol->mode;
948927ee
DR
2888 flags = pol->flags;
2889 }
bea904d5 2890
1a75a6c8
CL
2891 switch (mode) {
2892 case MPOL_DEFAULT:
1a75a6c8 2893 break;
1a75a6c8 2894 case MPOL_PREFERRED:
fc36b8d3 2895 if (flags & MPOL_F_LOCAL)
f2a07f40 2896 mode = MPOL_LOCAL;
53f2556b 2897 else
fc36b8d3 2898 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2899 break;
1a75a6c8 2900 case MPOL_BIND:
1a75a6c8 2901 case MPOL_INTERLEAVE:
f2a07f40 2902 nodes = pol->v.nodes;
1a75a6c8 2903 break;
1a75a6c8 2904 default:
948927ee
DR
2905 WARN_ON_ONCE(1);
2906 snprintf(p, maxlen, "unknown");
2907 return;
1a75a6c8
CL
2908 }
2909
b7a9f420 2910 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2911
fc36b8d3 2912 if (flags & MPOL_MODE_FLAGS) {
948927ee 2913 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2914
2291990a
LS
2915 /*
2916 * Currently, the only defined flags are mutually exclusive
2917 */
f5b087b5 2918 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2919 p += snprintf(p, buffer + maxlen - p, "static");
2920 else if (flags & MPOL_F_RELATIVE_NODES)
2921 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2922 }
2923
1a75a6c8 2924 if (!nodes_empty(nodes)) {
948927ee 2925 p += snprintf(p, buffer + maxlen - p, ":");
1a75a6c8
CL
2926 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2927 }
1a75a6c8 2928}