]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/migrate.c
mm/hmm: add new helper to hotplug CDM memory region
[thirdparty/kernel/stable.git] / mm / migrate.c
CommitLineData
b20a3503 1/*
14e0f9bc 2 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
cde53535 12 * Christoph Lameter
b20a3503
CL
13 */
14
15#include <linux/migrate.h>
b95f1b31 16#include <linux/export.h>
b20a3503 17#include <linux/swap.h>
0697212a 18#include <linux/swapops.h>
b20a3503 19#include <linux/pagemap.h>
e23ca00b 20#include <linux/buffer_head.h>
b20a3503 21#include <linux/mm_inline.h>
b488893a 22#include <linux/nsproxy.h>
b20a3503 23#include <linux/pagevec.h>
e9995ef9 24#include <linux/ksm.h>
b20a3503
CL
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
04e62a29 29#include <linux/writeback.h>
742755a1
CL
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
86c3a764 32#include <linux/security.h>
42cb14b1 33#include <linux/backing-dev.h>
bda807d4 34#include <linux/compaction.h>
4f5ca265 35#include <linux/syscalls.h>
290408d4 36#include <linux/hugetlb.h>
8e6ac7fa 37#include <linux/hugetlb_cgroup.h>
5a0e3ad6 38#include <linux/gfp.h>
df6ad698 39#include <linux/pfn_t.h>
a5430dda 40#include <linux/memremap.h>
8315ada7 41#include <linux/userfaultfd_k.h>
bf6bddf1 42#include <linux/balloon_compaction.h>
f714f4f2 43#include <linux/mmu_notifier.h>
33c3fc71 44#include <linux/page_idle.h>
d435edca 45#include <linux/page_owner.h>
6e84f315 46#include <linux/sched/mm.h>
197e7e52 47#include <linux/ptrace.h>
b20a3503 48
0d1836c3
MN
49#include <asm/tlbflush.h>
50
7b2a2d4a
MG
51#define CREATE_TRACE_POINTS
52#include <trace/events/migrate.h>
53
b20a3503
CL
54#include "internal.h"
55
b20a3503 56/*
742755a1 57 * migrate_prep() needs to be called before we start compiling a list of pages
748446bb
MG
58 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
59 * undesirable, use migrate_prep_local()
b20a3503
CL
60 */
61int migrate_prep(void)
62{
b20a3503
CL
63 /*
64 * Clear the LRU lists so pages can be isolated.
65 * Note that pages may be moved off the LRU after we have
66 * drained them. Those pages will fail to migrate like other
67 * pages that may be busy.
68 */
69 lru_add_drain_all();
70
71 return 0;
72}
73
748446bb
MG
74/* Do the necessary work of migrate_prep but not if it involves other CPUs */
75int migrate_prep_local(void)
76{
77 lru_add_drain();
78
79 return 0;
80}
81
9e5bcd61 82int isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4
MK
83{
84 struct address_space *mapping;
85
86 /*
87 * Avoid burning cycles with pages that are yet under __free_pages(),
88 * or just got freed under us.
89 *
90 * In case we 'win' a race for a movable page being freed under us and
91 * raise its refcount preventing __free_pages() from doing its job
92 * the put_page() at the end of this block will take care of
93 * release this page, thus avoiding a nasty leakage.
94 */
95 if (unlikely(!get_page_unless_zero(page)))
96 goto out;
97
98 /*
99 * Check PageMovable before holding a PG_lock because page's owner
100 * assumes anybody doesn't touch PG_lock of newly allocated page
101 * so unconditionally grapping the lock ruins page's owner side.
102 */
103 if (unlikely(!__PageMovable(page)))
104 goto out_putpage;
105 /*
106 * As movable pages are not isolated from LRU lists, concurrent
107 * compaction threads can race against page migration functions
108 * as well as race against the releasing a page.
109 *
110 * In order to avoid having an already isolated movable page
111 * being (wrongly) re-isolated while it is under migration,
112 * or to avoid attempting to isolate pages being released,
113 * lets be sure we have the page lock
114 * before proceeding with the movable page isolation steps.
115 */
116 if (unlikely(!trylock_page(page)))
117 goto out_putpage;
118
119 if (!PageMovable(page) || PageIsolated(page))
120 goto out_no_isolated;
121
122 mapping = page_mapping(page);
123 VM_BUG_ON_PAGE(!mapping, page);
124
125 if (!mapping->a_ops->isolate_page(page, mode))
126 goto out_no_isolated;
127
128 /* Driver shouldn't use PG_isolated bit of page->flags */
129 WARN_ON_ONCE(PageIsolated(page));
130 __SetPageIsolated(page);
131 unlock_page(page);
132
9e5bcd61 133 return 0;
bda807d4
MK
134
135out_no_isolated:
136 unlock_page(page);
137out_putpage:
138 put_page(page);
139out:
9e5bcd61 140 return -EBUSY;
bda807d4
MK
141}
142
143/* It should be called on page which is PG_movable */
144void putback_movable_page(struct page *page)
145{
146 struct address_space *mapping;
147
148 VM_BUG_ON_PAGE(!PageLocked(page), page);
149 VM_BUG_ON_PAGE(!PageMovable(page), page);
150 VM_BUG_ON_PAGE(!PageIsolated(page), page);
151
152 mapping = page_mapping(page);
153 mapping->a_ops->putback_page(page);
154 __ClearPageIsolated(page);
155}
156
5733c7d1
RA
157/*
158 * Put previously isolated pages back onto the appropriate lists
159 * from where they were once taken off for compaction/migration.
160 *
59c82b70
JK
161 * This function shall be used whenever the isolated pageset has been
162 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
163 * and isolate_huge_page().
5733c7d1
RA
164 */
165void putback_movable_pages(struct list_head *l)
166{
167 struct page *page;
168 struct page *page2;
169
b20a3503 170 list_for_each_entry_safe(page, page2, l, lru) {
31caf665
NH
171 if (unlikely(PageHuge(page))) {
172 putback_active_hugepage(page);
173 continue;
174 }
e24f0b8f 175 list_del(&page->lru);
bda807d4
MK
176 /*
177 * We isolated non-lru movable page so here we can use
178 * __PageMovable because LRU page's mapping cannot have
179 * PAGE_MAPPING_MOVABLE.
180 */
b1123ea6 181 if (unlikely(__PageMovable(page))) {
bda807d4
MK
182 VM_BUG_ON_PAGE(!PageIsolated(page), page);
183 lock_page(page);
184 if (PageMovable(page))
185 putback_movable_page(page);
186 else
187 __ClearPageIsolated(page);
188 unlock_page(page);
189 put_page(page);
190 } else {
e8db67eb
NH
191 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
192 page_is_file_cache(page), -hpage_nr_pages(page));
fc280fe8 193 putback_lru_page(page);
bda807d4 194 }
b20a3503 195 }
b20a3503
CL
196}
197
0697212a
CL
198/*
199 * Restore a potential migration pte to a working pte entry
200 */
e4b82222 201static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
e9995ef9 202 unsigned long addr, void *old)
0697212a 203{
3fe87967
KS
204 struct page_vma_mapped_walk pvmw = {
205 .page = old,
206 .vma = vma,
207 .address = addr,
208 .flags = PVMW_SYNC | PVMW_MIGRATION,
209 };
210 struct page *new;
211 pte_t pte;
0697212a 212 swp_entry_t entry;
0697212a 213
3fe87967
KS
214 VM_BUG_ON_PAGE(PageTail(page), page);
215 while (page_vma_mapped_walk(&pvmw)) {
4b0ece6f
NH
216 if (PageKsm(page))
217 new = page;
218 else
219 new = page - pvmw.page->index +
220 linear_page_index(vma, pvmw.address);
0697212a 221
616b8371
ZY
222#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
223 /* PMD-mapped THP migration entry */
224 if (!pvmw.pte) {
225 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
226 remove_migration_pmd(&pvmw, new);
227 continue;
228 }
229#endif
230
3fe87967
KS
231 get_page(new);
232 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
233 if (pte_swp_soft_dirty(*pvmw.pte))
234 pte = pte_mksoft_dirty(pte);
0697212a 235
3fe87967
KS
236 /*
237 * Recheck VMA as permissions can change since migration started
238 */
239 entry = pte_to_swp_entry(*pvmw.pte);
240 if (is_write_migration_entry(entry))
241 pte = maybe_mkwrite(pte, vma);
d3cb8bf6 242
df6ad698
JG
243 if (unlikely(is_zone_device_page(new))) {
244 if (is_device_private_page(new)) {
245 entry = make_device_private_entry(new, pte_write(pte));
246 pte = swp_entry_to_pte(entry);
247 } else if (is_device_public_page(new)) {
248 pte = pte_mkdevmap(pte);
249 flush_dcache_page(new);
250 }
a5430dda
JG
251 } else
252 flush_dcache_page(new);
253
3ef8fd7f 254#ifdef CONFIG_HUGETLB_PAGE
3fe87967
KS
255 if (PageHuge(new)) {
256 pte = pte_mkhuge(pte);
257 pte = arch_make_huge_pte(pte, vma, new, 0);
383321ab 258 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
3fe87967
KS
259 if (PageAnon(new))
260 hugepage_add_anon_rmap(new, vma, pvmw.address);
261 else
262 page_dup_rmap(new, true);
383321ab
AK
263 } else
264#endif
265 {
266 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
04e62a29 267
383321ab
AK
268 if (PageAnon(new))
269 page_add_anon_rmap(new, vma, pvmw.address, false);
270 else
271 page_add_file_rmap(new, false);
272 }
3fe87967
KS
273 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
274 mlock_vma_page(new);
275
276 /* No need to invalidate - it was non-present before */
277 update_mmu_cache(vma, pvmw.address, pvmw.pte);
278 }
51afb12b 279
e4b82222 280 return true;
0697212a
CL
281}
282
04e62a29
CL
283/*
284 * Get rid of all migration entries and replace them by
285 * references to the indicated page.
286 */
e388466d 287void remove_migration_ptes(struct page *old, struct page *new, bool locked)
04e62a29 288{
051ac83a
JK
289 struct rmap_walk_control rwc = {
290 .rmap_one = remove_migration_pte,
291 .arg = old,
292 };
293
e388466d
KS
294 if (locked)
295 rmap_walk_locked(new, &rwc);
296 else
297 rmap_walk(new, &rwc);
04e62a29
CL
298}
299
0697212a
CL
300/*
301 * Something used the pte of a page under migration. We need to
302 * get to the page and wait until migration is finished.
303 * When we return from this function the fault will be retried.
0697212a 304 */
e66f17ff 305void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad309 306 spinlock_t *ptl)
0697212a 307{
30dad309 308 pte_t pte;
0697212a
CL
309 swp_entry_t entry;
310 struct page *page;
311
30dad309 312 spin_lock(ptl);
0697212a
CL
313 pte = *ptep;
314 if (!is_swap_pte(pte))
315 goto out;
316
317 entry = pte_to_swp_entry(pte);
318 if (!is_migration_entry(entry))
319 goto out;
320
321 page = migration_entry_to_page(entry);
322
e286781d
NP
323 /*
324 * Once radix-tree replacement of page migration started, page_count
325 * *must* be zero. And, we don't want to call wait_on_page_locked()
326 * against a page without get_page().
327 * So, we use get_page_unless_zero(), here. Even failed, page fault
328 * will occur again.
329 */
330 if (!get_page_unless_zero(page))
331 goto out;
0697212a
CL
332 pte_unmap_unlock(ptep, ptl);
333 wait_on_page_locked(page);
334 put_page(page);
335 return;
336out:
337 pte_unmap_unlock(ptep, ptl);
338}
339
30dad309
NH
340void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
341 unsigned long address)
342{
343 spinlock_t *ptl = pte_lockptr(mm, pmd);
344 pte_t *ptep = pte_offset_map(pmd, address);
345 __migration_entry_wait(mm, ptep, ptl);
346}
347
cb900f41
KS
348void migration_entry_wait_huge(struct vm_area_struct *vma,
349 struct mm_struct *mm, pte_t *pte)
30dad309 350{
cb900f41 351 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
30dad309
NH
352 __migration_entry_wait(mm, pte, ptl);
353}
354
616b8371
ZY
355#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
356void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
357{
358 spinlock_t *ptl;
359 struct page *page;
360
361 ptl = pmd_lock(mm, pmd);
362 if (!is_pmd_migration_entry(*pmd))
363 goto unlock;
364 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
365 if (!get_page_unless_zero(page))
366 goto unlock;
367 spin_unlock(ptl);
368 wait_on_page_locked(page);
369 put_page(page);
370 return;
371unlock:
372 spin_unlock(ptl);
373}
374#endif
375
b969c4ab
MG
376#ifdef CONFIG_BLOCK
377/* Returns true if all buffers are successfully locked */
a6bc32b8
MG
378static bool buffer_migrate_lock_buffers(struct buffer_head *head,
379 enum migrate_mode mode)
b969c4ab
MG
380{
381 struct buffer_head *bh = head;
382
383 /* Simple case, sync compaction */
a6bc32b8 384 if (mode != MIGRATE_ASYNC) {
b969c4ab
MG
385 do {
386 get_bh(bh);
387 lock_buffer(bh);
388 bh = bh->b_this_page;
389
390 } while (bh != head);
391
392 return true;
393 }
394
395 /* async case, we cannot block on lock_buffer so use trylock_buffer */
396 do {
397 get_bh(bh);
398 if (!trylock_buffer(bh)) {
399 /*
400 * We failed to lock the buffer and cannot stall in
401 * async migration. Release the taken locks
402 */
403 struct buffer_head *failed_bh = bh;
404 put_bh(failed_bh);
405 bh = head;
406 while (bh != failed_bh) {
407 unlock_buffer(bh);
408 put_bh(bh);
409 bh = bh->b_this_page;
410 }
411 return false;
412 }
413
414 bh = bh->b_this_page;
415 } while (bh != head);
416 return true;
417}
418#else
419static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
a6bc32b8 420 enum migrate_mode mode)
b969c4ab
MG
421{
422 return true;
423}
424#endif /* CONFIG_BLOCK */
425
b20a3503 426/*
c3fcf8a5 427 * Replace the page in the mapping.
5b5c7120
CL
428 *
429 * The number of remaining references must be:
430 * 1 for anonymous pages without a mapping
431 * 2 for pages with a mapping
266cf658 432 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 433 */
36bc08cc 434int migrate_page_move_mapping(struct address_space *mapping,
b969c4ab 435 struct page *newpage, struct page *page,
8e321fef
BL
436 struct buffer_head *head, enum migrate_mode mode,
437 int extra_count)
b20a3503 438{
42cb14b1
HD
439 struct zone *oldzone, *newzone;
440 int dirty;
8e321fef 441 int expected_count = 1 + extra_count;
7cf9c2c7 442 void **pslot;
b20a3503 443
8763cb45 444 /*
df6ad698
JG
445 * Device public or private pages have an extra refcount as they are
446 * ZONE_DEVICE pages.
8763cb45 447 */
df6ad698
JG
448 expected_count += is_device_private_page(page);
449 expected_count += is_device_public_page(page);
8763cb45 450
6c5240ae 451 if (!mapping) {
0e8c7d0f 452 /* Anonymous page without mapping */
8e321fef 453 if (page_count(page) != expected_count)
6c5240ae 454 return -EAGAIN;
cf4b769a
HD
455
456 /* No turning back from here */
cf4b769a
HD
457 newpage->index = page->index;
458 newpage->mapping = page->mapping;
459 if (PageSwapBacked(page))
fa9949da 460 __SetPageSwapBacked(newpage);
cf4b769a 461
78bd5209 462 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
463 }
464
42cb14b1
HD
465 oldzone = page_zone(page);
466 newzone = page_zone(newpage);
467
19fd6231 468 spin_lock_irq(&mapping->tree_lock);
b20a3503 469
7cf9c2c7
NP
470 pslot = radix_tree_lookup_slot(&mapping->page_tree,
471 page_index(page));
b20a3503 472
8e321fef 473 expected_count += 1 + page_has_private(page);
e286781d 474 if (page_count(page) != expected_count ||
29c1f677 475 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
19fd6231 476 spin_unlock_irq(&mapping->tree_lock);
e23ca00b 477 return -EAGAIN;
b20a3503
CL
478 }
479
fe896d18 480 if (!page_ref_freeze(page, expected_count)) {
19fd6231 481 spin_unlock_irq(&mapping->tree_lock);
e286781d
NP
482 return -EAGAIN;
483 }
484
b969c4ab
MG
485 /*
486 * In the async migration case of moving a page with buffers, lock the
487 * buffers using trylock before the mapping is moved. If the mapping
488 * was moved, we later failed to lock the buffers and could not move
489 * the mapping back due to an elevated page count, we would have to
490 * block waiting on other references to be dropped.
491 */
a6bc32b8
MG
492 if (mode == MIGRATE_ASYNC && head &&
493 !buffer_migrate_lock_buffers(head, mode)) {
fe896d18 494 page_ref_unfreeze(page, expected_count);
b969c4ab
MG
495 spin_unlock_irq(&mapping->tree_lock);
496 return -EAGAIN;
497 }
498
b20a3503 499 /*
cf4b769a
HD
500 * Now we know that no one else is looking at the page:
501 * no turning back from here.
b20a3503 502 */
cf4b769a
HD
503 newpage->index = page->index;
504 newpage->mapping = page->mapping;
7cf9c2c7 505 get_page(newpage); /* add cache reference */
6326fec1
NP
506 if (PageSwapBacked(page)) {
507 __SetPageSwapBacked(newpage);
508 if (PageSwapCache(page)) {
509 SetPageSwapCache(newpage);
510 set_page_private(newpage, page_private(page));
511 }
512 } else {
513 VM_BUG_ON_PAGE(PageSwapCache(page), page);
b20a3503
CL
514 }
515
42cb14b1
HD
516 /* Move dirty while page refs frozen and newpage not yet exposed */
517 dirty = PageDirty(page);
518 if (dirty) {
519 ClearPageDirty(page);
520 SetPageDirty(newpage);
521 }
522
6d75f366 523 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
7cf9c2c7
NP
524
525 /*
937a94c9
JG
526 * Drop cache reference from old page by unfreezing
527 * to one less reference.
7cf9c2c7
NP
528 * We know this isn't the last reference.
529 */
fe896d18 530 page_ref_unfreeze(page, expected_count - 1);
7cf9c2c7 531
42cb14b1
HD
532 spin_unlock(&mapping->tree_lock);
533 /* Leave irq disabled to prevent preemption while updating stats */
534
0e8c7d0f
CL
535 /*
536 * If moved to a different zone then also account
537 * the page for that zone. Other VM counters will be
538 * taken care of when we establish references to the
539 * new page and drop references to the old page.
540 *
541 * Note that anonymous pages are accounted for
4b9d0fab 542 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
543 * are mapped to swap space.
544 */
42cb14b1 545 if (newzone != oldzone) {
11fb9989
MG
546 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
547 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
42cb14b1 548 if (PageSwapBacked(page) && !PageSwapCache(page)) {
11fb9989
MG
549 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
550 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
42cb14b1
HD
551 }
552 if (dirty && mapping_cap_account_dirty(mapping)) {
11fb9989 553 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b4 554 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
11fb9989 555 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b4 556 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
42cb14b1 557 }
4b02108a 558 }
42cb14b1 559 local_irq_enable();
b20a3503 560
78bd5209 561 return MIGRATEPAGE_SUCCESS;
b20a3503 562}
1118dce7 563EXPORT_SYMBOL(migrate_page_move_mapping);
b20a3503 564
290408d4
NH
565/*
566 * The expected number of remaining references is the same as that
567 * of migrate_page_move_mapping().
568 */
569int migrate_huge_page_move_mapping(struct address_space *mapping,
570 struct page *newpage, struct page *page)
571{
572 int expected_count;
573 void **pslot;
574
290408d4
NH
575 spin_lock_irq(&mapping->tree_lock);
576
577 pslot = radix_tree_lookup_slot(&mapping->page_tree,
578 page_index(page));
579
580 expected_count = 2 + page_has_private(page);
581 if (page_count(page) != expected_count ||
29c1f677 582 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
290408d4
NH
583 spin_unlock_irq(&mapping->tree_lock);
584 return -EAGAIN;
585 }
586
fe896d18 587 if (!page_ref_freeze(page, expected_count)) {
290408d4
NH
588 spin_unlock_irq(&mapping->tree_lock);
589 return -EAGAIN;
590 }
591
cf4b769a
HD
592 newpage->index = page->index;
593 newpage->mapping = page->mapping;
6a93ca8f 594
290408d4
NH
595 get_page(newpage);
596
6d75f366 597 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
290408d4 598
fe896d18 599 page_ref_unfreeze(page, expected_count - 1);
290408d4
NH
600
601 spin_unlock_irq(&mapping->tree_lock);
6a93ca8f 602
78bd5209 603 return MIGRATEPAGE_SUCCESS;
290408d4
NH
604}
605
30b0a105
DH
606/*
607 * Gigantic pages are so large that we do not guarantee that page++ pointer
608 * arithmetic will work across the entire page. We need something more
609 * specialized.
610 */
611static void __copy_gigantic_page(struct page *dst, struct page *src,
612 int nr_pages)
613{
614 int i;
615 struct page *dst_base = dst;
616 struct page *src_base = src;
617
618 for (i = 0; i < nr_pages; ) {
619 cond_resched();
620 copy_highpage(dst, src);
621
622 i++;
623 dst = mem_map_next(dst, dst_base, i);
624 src = mem_map_next(src, src_base, i);
625 }
626}
627
628static void copy_huge_page(struct page *dst, struct page *src)
629{
630 int i;
631 int nr_pages;
632
633 if (PageHuge(src)) {
634 /* hugetlbfs page */
635 struct hstate *h = page_hstate(src);
636 nr_pages = pages_per_huge_page(h);
637
638 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
639 __copy_gigantic_page(dst, src, nr_pages);
640 return;
641 }
642 } else {
643 /* thp page */
644 BUG_ON(!PageTransHuge(src));
645 nr_pages = hpage_nr_pages(src);
646 }
647
648 for (i = 0; i < nr_pages; i++) {
649 cond_resched();
650 copy_highpage(dst + i, src + i);
651 }
652}
653
b20a3503
CL
654/*
655 * Copy the page to its new location
656 */
2916ecc0 657void migrate_page_states(struct page *newpage, struct page *page)
b20a3503 658{
7851a45c
RR
659 int cpupid;
660
b20a3503
CL
661 if (PageError(page))
662 SetPageError(newpage);
663 if (PageReferenced(page))
664 SetPageReferenced(newpage);
665 if (PageUptodate(page))
666 SetPageUptodate(newpage);
894bc310 667 if (TestClearPageActive(page)) {
309381fe 668 VM_BUG_ON_PAGE(PageUnevictable(page), page);
b20a3503 669 SetPageActive(newpage);
418b27ef
LS
670 } else if (TestClearPageUnevictable(page))
671 SetPageUnevictable(newpage);
b20a3503
CL
672 if (PageChecked(page))
673 SetPageChecked(newpage);
674 if (PageMappedToDisk(page))
675 SetPageMappedToDisk(newpage);
676
42cb14b1
HD
677 /* Move dirty on pages not done by migrate_page_move_mapping() */
678 if (PageDirty(page))
679 SetPageDirty(newpage);
b20a3503 680
33c3fc71
VD
681 if (page_is_young(page))
682 set_page_young(newpage);
683 if (page_is_idle(page))
684 set_page_idle(newpage);
685
7851a45c
RR
686 /*
687 * Copy NUMA information to the new page, to prevent over-eager
688 * future migrations of this same page.
689 */
690 cpupid = page_cpupid_xchg_last(page, -1);
691 page_cpupid_xchg_last(newpage, cpupid);
692
e9995ef9 693 ksm_migrate_page(newpage, page);
c8d6553b
HD
694 /*
695 * Please do not reorder this without considering how mm/ksm.c's
696 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
697 */
b3b3a99c
NH
698 if (PageSwapCache(page))
699 ClearPageSwapCache(page);
b20a3503
CL
700 ClearPagePrivate(page);
701 set_page_private(page, 0);
b20a3503
CL
702
703 /*
704 * If any waiters have accumulated on the new page then
705 * wake them up.
706 */
707 if (PageWriteback(newpage))
708 end_page_writeback(newpage);
d435edca
VB
709
710 copy_page_owner(page, newpage);
74485cf2
JW
711
712 mem_cgroup_migrate(page, newpage);
b20a3503 713}
2916ecc0
JG
714EXPORT_SYMBOL(migrate_page_states);
715
716void migrate_page_copy(struct page *newpage, struct page *page)
717{
718 if (PageHuge(page) || PageTransHuge(page))
719 copy_huge_page(newpage, page);
720 else
721 copy_highpage(newpage, page);
722
723 migrate_page_states(newpage, page);
724}
1118dce7 725EXPORT_SYMBOL(migrate_page_copy);
b20a3503 726
1d8b85cc
CL
727/************************************************************
728 * Migration functions
729 ***********************************************************/
730
b20a3503 731/*
bda807d4 732 * Common logic to directly migrate a single LRU page suitable for
266cf658 733 * pages that do not use PagePrivate/PagePrivate2.
b20a3503
CL
734 *
735 * Pages are locked upon entry and exit.
736 */
2d1db3b1 737int migrate_page(struct address_space *mapping,
a6bc32b8
MG
738 struct page *newpage, struct page *page,
739 enum migrate_mode mode)
b20a3503
CL
740{
741 int rc;
742
743 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
744
8e321fef 745 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
b20a3503 746
78bd5209 747 if (rc != MIGRATEPAGE_SUCCESS)
b20a3503
CL
748 return rc;
749
2916ecc0
JG
750 if (mode != MIGRATE_SYNC_NO_COPY)
751 migrate_page_copy(newpage, page);
752 else
753 migrate_page_states(newpage, page);
78bd5209 754 return MIGRATEPAGE_SUCCESS;
b20a3503
CL
755}
756EXPORT_SYMBOL(migrate_page);
757
9361401e 758#ifdef CONFIG_BLOCK
1d8b85cc
CL
759/*
760 * Migration function for pages with buffers. This function can only be used
761 * if the underlying filesystem guarantees that no other references to "page"
762 * exist.
763 */
2d1db3b1 764int buffer_migrate_page(struct address_space *mapping,
a6bc32b8 765 struct page *newpage, struct page *page, enum migrate_mode mode)
1d8b85cc 766{
1d8b85cc
CL
767 struct buffer_head *bh, *head;
768 int rc;
769
1d8b85cc 770 if (!page_has_buffers(page))
a6bc32b8 771 return migrate_page(mapping, newpage, page, mode);
1d8b85cc
CL
772
773 head = page_buffers(page);
774
8e321fef 775 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
1d8b85cc 776
78bd5209 777 if (rc != MIGRATEPAGE_SUCCESS)
1d8b85cc
CL
778 return rc;
779
b969c4ab
MG
780 /*
781 * In the async case, migrate_page_move_mapping locked the buffers
782 * with an IRQ-safe spinlock held. In the sync case, the buffers
783 * need to be locked now
784 */
a6bc32b8
MG
785 if (mode != MIGRATE_ASYNC)
786 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
1d8b85cc
CL
787
788 ClearPagePrivate(page);
789 set_page_private(newpage, page_private(page));
790 set_page_private(page, 0);
791 put_page(page);
792 get_page(newpage);
793
794 bh = head;
795 do {
796 set_bh_page(bh, newpage, bh_offset(bh));
797 bh = bh->b_this_page;
798
799 } while (bh != head);
800
801 SetPagePrivate(newpage);
802
2916ecc0
JG
803 if (mode != MIGRATE_SYNC_NO_COPY)
804 migrate_page_copy(newpage, page);
805 else
806 migrate_page_states(newpage, page);
1d8b85cc
CL
807
808 bh = head;
809 do {
810 unlock_buffer(bh);
2916ecc0 811 put_bh(bh);
1d8b85cc
CL
812 bh = bh->b_this_page;
813
814 } while (bh != head);
815
78bd5209 816 return MIGRATEPAGE_SUCCESS;
1d8b85cc
CL
817}
818EXPORT_SYMBOL(buffer_migrate_page);
9361401e 819#endif
1d8b85cc 820
04e62a29
CL
821/*
822 * Writeback a page to clean the dirty state
823 */
824static int writeout(struct address_space *mapping, struct page *page)
8351a6e4 825{
04e62a29
CL
826 struct writeback_control wbc = {
827 .sync_mode = WB_SYNC_NONE,
828 .nr_to_write = 1,
829 .range_start = 0,
830 .range_end = LLONG_MAX,
04e62a29
CL
831 .for_reclaim = 1
832 };
833 int rc;
834
835 if (!mapping->a_ops->writepage)
836 /* No write method for the address space */
837 return -EINVAL;
838
839 if (!clear_page_dirty_for_io(page))
840 /* Someone else already triggered a write */
841 return -EAGAIN;
842
8351a6e4 843 /*
04e62a29
CL
844 * A dirty page may imply that the underlying filesystem has
845 * the page on some queue. So the page must be clean for
846 * migration. Writeout may mean we loose the lock and the
847 * page state is no longer what we checked for earlier.
848 * At this point we know that the migration attempt cannot
849 * be successful.
8351a6e4 850 */
e388466d 851 remove_migration_ptes(page, page, false);
8351a6e4 852
04e62a29 853 rc = mapping->a_ops->writepage(page, &wbc);
8351a6e4 854
04e62a29
CL
855 if (rc != AOP_WRITEPAGE_ACTIVATE)
856 /* unlocked. Relock */
857 lock_page(page);
858
bda8550d 859 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
860}
861
862/*
863 * Default handling if a filesystem does not provide a migration function.
864 */
865static int fallback_migrate_page(struct address_space *mapping,
a6bc32b8 866 struct page *newpage, struct page *page, enum migrate_mode mode)
04e62a29 867{
b969c4ab 868 if (PageDirty(page)) {
a6bc32b8 869 /* Only writeback pages in full synchronous migration */
2916ecc0
JG
870 switch (mode) {
871 case MIGRATE_SYNC:
872 case MIGRATE_SYNC_NO_COPY:
873 break;
874 default:
b969c4ab 875 return -EBUSY;
2916ecc0 876 }
04e62a29 877 return writeout(mapping, page);
b969c4ab 878 }
8351a6e4
CL
879
880 /*
881 * Buffers may be managed in a filesystem specific way.
882 * We must have no buffers or drop them.
883 */
266cf658 884 if (page_has_private(page) &&
8351a6e4
CL
885 !try_to_release_page(page, GFP_KERNEL))
886 return -EAGAIN;
887
a6bc32b8 888 return migrate_page(mapping, newpage, page, mode);
8351a6e4
CL
889}
890
e24f0b8f
CL
891/*
892 * Move a page to a newly allocated page
893 * The page is locked and all ptes have been successfully removed.
894 *
895 * The new page will have replaced the old page if this function
896 * is successful.
894bc310
LS
897 *
898 * Return value:
899 * < 0 - error code
78bd5209 900 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 901 */
3fe2011f 902static int move_to_new_page(struct page *newpage, struct page *page,
5c3f9a67 903 enum migrate_mode mode)
e24f0b8f
CL
904{
905 struct address_space *mapping;
bda807d4
MK
906 int rc = -EAGAIN;
907 bool is_lru = !__PageMovable(page);
e24f0b8f 908
7db7671f
HD
909 VM_BUG_ON_PAGE(!PageLocked(page), page);
910 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
e24f0b8f 911
e24f0b8f 912 mapping = page_mapping(page);
bda807d4
MK
913
914 if (likely(is_lru)) {
915 if (!mapping)
916 rc = migrate_page(mapping, newpage, page, mode);
917 else if (mapping->a_ops->migratepage)
918 /*
919 * Most pages have a mapping and most filesystems
920 * provide a migratepage callback. Anonymous pages
921 * are part of swap space which also has its own
922 * migratepage callback. This is the most common path
923 * for page migration.
924 */
925 rc = mapping->a_ops->migratepage(mapping, newpage,
926 page, mode);
927 else
928 rc = fallback_migrate_page(mapping, newpage,
929 page, mode);
930 } else {
e24f0b8f 931 /*
bda807d4
MK
932 * In case of non-lru page, it could be released after
933 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 934 */
bda807d4
MK
935 VM_BUG_ON_PAGE(!PageIsolated(page), page);
936 if (!PageMovable(page)) {
937 rc = MIGRATEPAGE_SUCCESS;
938 __ClearPageIsolated(page);
939 goto out;
940 }
941
942 rc = mapping->a_ops->migratepage(mapping, newpage,
943 page, mode);
944 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
945 !PageIsolated(page));
946 }
e24f0b8f 947
5c3f9a67
HD
948 /*
949 * When successful, old pagecache page->mapping must be cleared before
950 * page is freed; but stats require that PageAnon be left as PageAnon.
951 */
952 if (rc == MIGRATEPAGE_SUCCESS) {
bda807d4
MK
953 if (__PageMovable(page)) {
954 VM_BUG_ON_PAGE(!PageIsolated(page), page);
955
956 /*
957 * We clear PG_movable under page_lock so any compactor
958 * cannot try to migrate this page.
959 */
960 __ClearPageIsolated(page);
961 }
962
963 /*
964 * Anonymous and movable page->mapping will be cleard by
965 * free_pages_prepare so don't reset it here for keeping
966 * the type to work PageAnon, for example.
967 */
968 if (!PageMappingFlags(page))
5c3f9a67 969 page->mapping = NULL;
3fe2011f 970 }
bda807d4 971out:
e24f0b8f
CL
972 return rc;
973}
974
0dabec93 975static int __unmap_and_move(struct page *page, struct page *newpage,
9c620e2b 976 int force, enum migrate_mode mode)
e24f0b8f 977{
0dabec93 978 int rc = -EAGAIN;
2ebba6b7 979 int page_was_mapped = 0;
3f6c8272 980 struct anon_vma *anon_vma = NULL;
bda807d4 981 bool is_lru = !__PageMovable(page);
95a402c3 982
529ae9aa 983 if (!trylock_page(page)) {
a6bc32b8 984 if (!force || mode == MIGRATE_ASYNC)
0dabec93 985 goto out;
3e7d3449
MG
986
987 /*
988 * It's not safe for direct compaction to call lock_page.
989 * For example, during page readahead pages are added locked
990 * to the LRU. Later, when the IO completes the pages are
991 * marked uptodate and unlocked. However, the queueing
992 * could be merging multiple pages for one bio (e.g.
993 * mpage_readpages). If an allocation happens for the
994 * second or third page, the process can end up locking
995 * the same page twice and deadlocking. Rather than
996 * trying to be clever about what pages can be locked,
997 * avoid the use of lock_page for direct compaction
998 * altogether.
999 */
1000 if (current->flags & PF_MEMALLOC)
0dabec93 1001 goto out;
3e7d3449 1002
e24f0b8f
CL
1003 lock_page(page);
1004 }
1005
1006 if (PageWriteback(page)) {
11bc82d6 1007 /*
fed5b64a 1008 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1009 * necessary to wait for PageWriteback. In the async case,
1010 * the retry loop is too short and in the sync-light case,
1011 * the overhead of stalling is too much
11bc82d6 1012 */
2916ecc0
JG
1013 switch (mode) {
1014 case MIGRATE_SYNC:
1015 case MIGRATE_SYNC_NO_COPY:
1016 break;
1017 default:
11bc82d6 1018 rc = -EBUSY;
0a31bc97 1019 goto out_unlock;
11bc82d6
AA
1020 }
1021 if (!force)
0a31bc97 1022 goto out_unlock;
e24f0b8f
CL
1023 wait_on_page_writeback(page);
1024 }
03f15c86 1025
e24f0b8f 1026 /*
dc386d4d
KH
1027 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1028 * we cannot notice that anon_vma is freed while we migrates a page.
1ce82b69 1029 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1030 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1031 * File Caches may use write_page() or lock_page() in migration, then,
1032 * just care Anon page here.
03f15c86
HD
1033 *
1034 * Only page_get_anon_vma() understands the subtleties of
1035 * getting a hold on an anon_vma from outside one of its mms.
1036 * But if we cannot get anon_vma, then we won't need it anyway,
1037 * because that implies that the anon page is no longer mapped
1038 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1039 */
03f15c86 1040 if (PageAnon(page) && !PageKsm(page))
746b18d4 1041 anon_vma = page_get_anon_vma(page);
62e1c553 1042
7db7671f
HD
1043 /*
1044 * Block others from accessing the new page when we get around to
1045 * establishing additional references. We are usually the only one
1046 * holding a reference to newpage at this point. We used to have a BUG
1047 * here if trylock_page(newpage) fails, but would like to allow for
1048 * cases where there might be a race with the previous use of newpage.
1049 * This is much like races on refcount of oldpage: just don't BUG().
1050 */
1051 if (unlikely(!trylock_page(newpage)))
1052 goto out_unlock;
1053
bda807d4
MK
1054 if (unlikely(!is_lru)) {
1055 rc = move_to_new_page(newpage, page, mode);
1056 goto out_unlock_both;
1057 }
1058
dc386d4d 1059 /*
62e1c553
SL
1060 * Corner case handling:
1061 * 1. When a new swap-cache page is read into, it is added to the LRU
1062 * and treated as swapcache but it has no rmap yet.
1063 * Calling try_to_unmap() against a page->mapping==NULL page will
1064 * trigger a BUG. So handle it here.
1065 * 2. An orphaned page (see truncate_complete_page) might have
1066 * fs-private metadata. The page can be picked up due to memory
1067 * offlining. Everywhere else except page reclaim, the page is
1068 * invisible to the vm, so the page can not be migrated. So try to
1069 * free the metadata, so the page can be freed.
e24f0b8f 1070 */
62e1c553 1071 if (!page->mapping) {
309381fe 1072 VM_BUG_ON_PAGE(PageAnon(page), page);
1ce82b69 1073 if (page_has_private(page)) {
62e1c553 1074 try_to_free_buffers(page);
7db7671f 1075 goto out_unlock_both;
62e1c553 1076 }
7db7671f
HD
1077 } else if (page_mapped(page)) {
1078 /* Establish migration ptes */
03f15c86
HD
1079 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1080 page);
2ebba6b7 1081 try_to_unmap(page,
da1b13cc 1082 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
2ebba6b7
HD
1083 page_was_mapped = 1;
1084 }
dc386d4d 1085
e6a1530d 1086 if (!page_mapped(page))
5c3f9a67 1087 rc = move_to_new_page(newpage, page, mode);
e24f0b8f 1088
5c3f9a67
HD
1089 if (page_was_mapped)
1090 remove_migration_ptes(page,
e388466d 1091 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
3f6c8272 1092
7db7671f
HD
1093out_unlock_both:
1094 unlock_page(newpage);
1095out_unlock:
3f6c8272 1096 /* Drop an anon_vma reference if we took one */
76545066 1097 if (anon_vma)
9e60109f 1098 put_anon_vma(anon_vma);
e24f0b8f 1099 unlock_page(page);
0dabec93 1100out:
c6c919eb
MK
1101 /*
1102 * If migration is successful, decrease refcount of the newpage
1103 * which will not free the page because new page owner increased
1104 * refcounter. As well, if it is LRU page, add the page to LRU
1105 * list in here.
1106 */
1107 if (rc == MIGRATEPAGE_SUCCESS) {
b1123ea6 1108 if (unlikely(__PageMovable(newpage)))
c6c919eb
MK
1109 put_page(newpage);
1110 else
1111 putback_lru_page(newpage);
1112 }
1113
0dabec93
MK
1114 return rc;
1115}
95a402c3 1116
ef2a5153
GU
1117/*
1118 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
1119 * around it.
1120 */
1121#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1122#define ICE_noinline noinline
1123#else
1124#define ICE_noinline
1125#endif
1126
0dabec93
MK
1127/*
1128 * Obtain the lock on page, remove all ptes and migrate the page
1129 * to the newly allocated page in newpage.
1130 */
ef2a5153
GU
1131static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1132 free_page_t put_new_page,
1133 unsigned long private, struct page *page,
add05cec
NH
1134 int force, enum migrate_mode mode,
1135 enum migrate_reason reason)
0dabec93 1136{
2def7424 1137 int rc = MIGRATEPAGE_SUCCESS;
0dabec93 1138 int *result = NULL;
2def7424 1139 struct page *newpage;
0dabec93 1140
2def7424 1141 newpage = get_new_page(page, private, &result);
0dabec93
MK
1142 if (!newpage)
1143 return -ENOMEM;
1144
1145 if (page_count(page) == 1) {
1146 /* page was freed from under us. So we are done. */
c6c919eb
MK
1147 ClearPageActive(page);
1148 ClearPageUnevictable(page);
bda807d4
MK
1149 if (unlikely(__PageMovable(page))) {
1150 lock_page(page);
1151 if (!PageMovable(page))
1152 __ClearPageIsolated(page);
1153 unlock_page(page);
1154 }
c6c919eb
MK
1155 if (put_new_page)
1156 put_new_page(newpage, private);
1157 else
1158 put_page(newpage);
0dabec93
MK
1159 goto out;
1160 }
1161
616b8371 1162 if (unlikely(PageTransHuge(page) && !PageTransHuge(newpage))) {
4d2fa965
KS
1163 lock_page(page);
1164 rc = split_huge_page(page);
1165 unlock_page(page);
1166 if (rc)
0dabec93 1167 goto out;
4d2fa965 1168 }
0dabec93 1169
9c620e2b 1170 rc = __unmap_and_move(page, newpage, force, mode);
c6c919eb 1171 if (rc == MIGRATEPAGE_SUCCESS)
7cd12b4a 1172 set_page_owner_migrate_reason(newpage, reason);
bf6bddf1 1173
0dabec93 1174out:
e24f0b8f 1175 if (rc != -EAGAIN) {
0dabec93
MK
1176 /*
1177 * A page that has been migrated has all references
1178 * removed and will be freed. A page that has not been
1179 * migrated will have kepts its references and be
1180 * restored.
1181 */
1182 list_del(&page->lru);
6afcf8ef
ML
1183
1184 /*
1185 * Compaction can migrate also non-LRU pages which are
1186 * not accounted to NR_ISOLATED_*. They can be recognized
1187 * as __PageMovable
1188 */
1189 if (likely(!__PageMovable(page)))
e8db67eb
NH
1190 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1191 page_is_file_cache(page), -hpage_nr_pages(page));
c6c919eb
MK
1192 }
1193
1194 /*
1195 * If migration is successful, releases reference grabbed during
1196 * isolation. Otherwise, restore the page to right list unless
1197 * we want to retry.
1198 */
1199 if (rc == MIGRATEPAGE_SUCCESS) {
1200 put_page(page);
1201 if (reason == MR_MEMORY_FAILURE) {
d7e69488 1202 /*
c6c919eb
MK
1203 * Set PG_HWPoison on just freed page
1204 * intentionally. Although it's rather weird,
1205 * it's how HWPoison flag works at the moment.
d7e69488 1206 */
da1b13cc
WL
1207 if (!test_set_page_hwpoison(page))
1208 num_poisoned_pages_inc();
c6c919eb
MK
1209 }
1210 } else {
bda807d4
MK
1211 if (rc != -EAGAIN) {
1212 if (likely(!__PageMovable(page))) {
1213 putback_lru_page(page);
1214 goto put_new;
1215 }
1216
1217 lock_page(page);
1218 if (PageMovable(page))
1219 putback_movable_page(page);
1220 else
1221 __ClearPageIsolated(page);
1222 unlock_page(page);
1223 put_page(page);
1224 }
1225put_new:
c6c919eb
MK
1226 if (put_new_page)
1227 put_new_page(newpage, private);
1228 else
1229 put_page(newpage);
e24f0b8f 1230 }
68711a74 1231
742755a1
CL
1232 if (result) {
1233 if (rc)
1234 *result = rc;
1235 else
1236 *result = page_to_nid(newpage);
1237 }
e24f0b8f
CL
1238 return rc;
1239}
1240
290408d4
NH
1241/*
1242 * Counterpart of unmap_and_move_page() for hugepage migration.
1243 *
1244 * This function doesn't wait the completion of hugepage I/O
1245 * because there is no race between I/O and migration for hugepage.
1246 * Note that currently hugepage I/O occurs only in direct I/O
1247 * where no lock is held and PG_writeback is irrelevant,
1248 * and writeback status of all subpages are counted in the reference
1249 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1250 * under direct I/O, the reference of the head page is 512 and a bit more.)
1251 * This means that when we try to migrate hugepage whose subpages are
1252 * doing direct I/O, some references remain after try_to_unmap() and
1253 * hugepage migration fails without data corruption.
1254 *
1255 * There is also no race when direct I/O is issued on the page under migration,
1256 * because then pte is replaced with migration swap entry and direct I/O code
1257 * will wait in the page fault for migration to complete.
1258 */
1259static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1260 free_page_t put_new_page, unsigned long private,
1261 struct page *hpage, int force,
7cd12b4a 1262 enum migrate_mode mode, int reason)
290408d4 1263{
2def7424 1264 int rc = -EAGAIN;
290408d4 1265 int *result = NULL;
2ebba6b7 1266 int page_was_mapped = 0;
32665f2b 1267 struct page *new_hpage;
290408d4
NH
1268 struct anon_vma *anon_vma = NULL;
1269
83467efb
NH
1270 /*
1271 * Movability of hugepages depends on architectures and hugepage size.
1272 * This check is necessary because some callers of hugepage migration
1273 * like soft offline and memory hotremove don't walk through page
1274 * tables or check whether the hugepage is pmd-based or not before
1275 * kicking migration.
1276 */
100873d7 1277 if (!hugepage_migration_supported(page_hstate(hpage))) {
32665f2b 1278 putback_active_hugepage(hpage);
83467efb 1279 return -ENOSYS;
32665f2b 1280 }
83467efb 1281
32665f2b 1282 new_hpage = get_new_page(hpage, private, &result);
290408d4
NH
1283 if (!new_hpage)
1284 return -ENOMEM;
1285
290408d4 1286 if (!trylock_page(hpage)) {
2916ecc0 1287 if (!force)
290408d4 1288 goto out;
2916ecc0
JG
1289 switch (mode) {
1290 case MIGRATE_SYNC:
1291 case MIGRATE_SYNC_NO_COPY:
1292 break;
1293 default:
1294 goto out;
1295 }
290408d4
NH
1296 lock_page(hpage);
1297 }
1298
746b18d4
PZ
1299 if (PageAnon(hpage))
1300 anon_vma = page_get_anon_vma(hpage);
290408d4 1301
7db7671f
HD
1302 if (unlikely(!trylock_page(new_hpage)))
1303 goto put_anon;
1304
2ebba6b7
HD
1305 if (page_mapped(hpage)) {
1306 try_to_unmap(hpage,
1307 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1308 page_was_mapped = 1;
1309 }
290408d4
NH
1310
1311 if (!page_mapped(hpage))
5c3f9a67 1312 rc = move_to_new_page(new_hpage, hpage, mode);
290408d4 1313
5c3f9a67
HD
1314 if (page_was_mapped)
1315 remove_migration_ptes(hpage,
e388466d 1316 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
290408d4 1317
7db7671f
HD
1318 unlock_page(new_hpage);
1319
1320put_anon:
fd4a4663 1321 if (anon_vma)
9e60109f 1322 put_anon_vma(anon_vma);
8e6ac7fa 1323
2def7424 1324 if (rc == MIGRATEPAGE_SUCCESS) {
8e6ac7fa 1325 hugetlb_cgroup_migrate(hpage, new_hpage);
2def7424 1326 put_new_page = NULL;
7cd12b4a 1327 set_page_owner_migrate_reason(new_hpage, reason);
2def7424 1328 }
8e6ac7fa 1329
290408d4 1330 unlock_page(hpage);
09761333 1331out:
b8ec1cee
NH
1332 if (rc != -EAGAIN)
1333 putback_active_hugepage(hpage);
c3114a84
AK
1334 if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1335 num_poisoned_pages_inc();
68711a74
DR
1336
1337 /*
1338 * If migration was not successful and there's a freeing callback, use
1339 * it. Otherwise, put_page() will drop the reference grabbed during
1340 * isolation.
1341 */
2def7424 1342 if (put_new_page)
68711a74
DR
1343 put_new_page(new_hpage, private);
1344 else
3aaa76e1 1345 putback_active_hugepage(new_hpage);
68711a74 1346
290408d4
NH
1347 if (result) {
1348 if (rc)
1349 *result = rc;
1350 else
1351 *result = page_to_nid(new_hpage);
1352 }
1353 return rc;
1354}
1355
b20a3503 1356/*
c73e5c9c
SB
1357 * migrate_pages - migrate the pages specified in a list, to the free pages
1358 * supplied as the target for the page migration
b20a3503 1359 *
c73e5c9c
SB
1360 * @from: The list of pages to be migrated.
1361 * @get_new_page: The function used to allocate free pages to be used
1362 * as the target of the page migration.
68711a74
DR
1363 * @put_new_page: The function used to free target pages if migration
1364 * fails, or NULL if no special handling is necessary.
c73e5c9c
SB
1365 * @private: Private data to be passed on to get_new_page()
1366 * @mode: The migration mode that specifies the constraints for
1367 * page migration, if any.
1368 * @reason: The reason for page migration.
b20a3503 1369 *
c73e5c9c
SB
1370 * The function returns after 10 attempts or if no pages are movable any more
1371 * because the list has become empty or no retryable pages exist any more.
14e0f9bc 1372 * The caller should call putback_movable_pages() to return pages to the LRU
28bd6578 1373 * or free list only if ret != 0.
b20a3503 1374 *
c73e5c9c 1375 * Returns the number of pages that were not migrated, or an error code.
b20a3503 1376 */
9c620e2b 1377int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a74
DR
1378 free_page_t put_new_page, unsigned long private,
1379 enum migrate_mode mode, int reason)
b20a3503 1380{
e24f0b8f 1381 int retry = 1;
b20a3503 1382 int nr_failed = 0;
5647bc29 1383 int nr_succeeded = 0;
b20a3503
CL
1384 int pass = 0;
1385 struct page *page;
1386 struct page *page2;
1387 int swapwrite = current->flags & PF_SWAPWRITE;
1388 int rc;
1389
1390 if (!swapwrite)
1391 current->flags |= PF_SWAPWRITE;
1392
e24f0b8f
CL
1393 for(pass = 0; pass < 10 && retry; pass++) {
1394 retry = 0;
b20a3503 1395
e24f0b8f 1396 list_for_each_entry_safe(page, page2, from, lru) {
e24f0b8f 1397 cond_resched();
2d1db3b1 1398
31caf665
NH
1399 if (PageHuge(page))
1400 rc = unmap_and_move_huge_page(get_new_page,
68711a74 1401 put_new_page, private, page,
7cd12b4a 1402 pass > 2, mode, reason);
31caf665 1403 else
68711a74 1404 rc = unmap_and_move(get_new_page, put_new_page,
add05cec
NH
1405 private, page, pass > 2, mode,
1406 reason);
2d1db3b1 1407
e24f0b8f 1408 switch(rc) {
95a402c3 1409 case -ENOMEM:
dfef2ef4 1410 nr_failed++;
95a402c3 1411 goto out;
e24f0b8f 1412 case -EAGAIN:
2d1db3b1 1413 retry++;
e24f0b8f 1414 break;
78bd5209 1415 case MIGRATEPAGE_SUCCESS:
5647bc29 1416 nr_succeeded++;
e24f0b8f
CL
1417 break;
1418 default:
354a3363
NH
1419 /*
1420 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1421 * unlike -EAGAIN case, the failed page is
1422 * removed from migration page list and not
1423 * retried in the next outer loop.
1424 */
2d1db3b1 1425 nr_failed++;
e24f0b8f 1426 break;
2d1db3b1 1427 }
b20a3503
CL
1428 }
1429 }
f2f81fb2
VB
1430 nr_failed += retry;
1431 rc = nr_failed;
95a402c3 1432out:
5647bc29
MG
1433 if (nr_succeeded)
1434 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1435 if (nr_failed)
1436 count_vm_events(PGMIGRATE_FAIL, nr_failed);
7b2a2d4a
MG
1437 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1438
b20a3503
CL
1439 if (!swapwrite)
1440 current->flags &= ~PF_SWAPWRITE;
1441
78bd5209 1442 return rc;
b20a3503 1443}
95a402c3 1444
742755a1
CL
1445#ifdef CONFIG_NUMA
1446/*
1447 * Move a list of individual pages
1448 */
1449struct page_to_node {
1450 unsigned long addr;
1451 struct page *page;
1452 int node;
1453 int status;
1454};
1455
1456static struct page *new_page_node(struct page *p, unsigned long private,
1457 int **result)
1458{
1459 struct page_to_node *pm = (struct page_to_node *)private;
1460
1461 while (pm->node != MAX_NUMNODES && pm->page != p)
1462 pm++;
1463
1464 if (pm->node == MAX_NUMNODES)
1465 return NULL;
1466
1467 *result = &pm->status;
1468
e632a938
NH
1469 if (PageHuge(p))
1470 return alloc_huge_page_node(page_hstate(compound_head(p)),
1471 pm->node);
e8db67eb
NH
1472 else if (thp_migration_supported() && PageTransHuge(p)) {
1473 struct page *thp;
1474
1475 thp = alloc_pages_node(pm->node,
1476 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
1477 HPAGE_PMD_ORDER);
1478 if (!thp)
1479 return NULL;
1480 prep_transhuge_page(thp);
1481 return thp;
1482 } else
96db800f 1483 return __alloc_pages_node(pm->node,
e97ca8e5 1484 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
742755a1
CL
1485}
1486
1487/*
1488 * Move a set of pages as indicated in the pm array. The addr
1489 * field must be set to the virtual address of the page to be moved
1490 * and the node number must contain a valid target node.
5e9a0f02 1491 * The pm array ends with node = MAX_NUMNODES.
742755a1 1492 */
5e9a0f02
BG
1493static int do_move_page_to_node_array(struct mm_struct *mm,
1494 struct page_to_node *pm,
1495 int migrate_all)
742755a1
CL
1496{
1497 int err;
1498 struct page_to_node *pp;
1499 LIST_HEAD(pagelist);
1500
1501 down_read(&mm->mmap_sem);
1502
1503 /*
1504 * Build a list of pages to migrate
1505 */
742755a1
CL
1506 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1507 struct vm_area_struct *vma;
1508 struct page *page;
e8db67eb
NH
1509 struct page *head;
1510 unsigned int follflags;
742755a1 1511
742755a1
CL
1512 err = -EFAULT;
1513 vma = find_vma(mm, pp->addr);
70384dc6 1514 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
742755a1
CL
1515 goto set_status;
1516
d899844e 1517 /* FOLL_DUMP to ignore special (like zero) pages */
e8db67eb
NH
1518 follflags = FOLL_GET | FOLL_DUMP;
1519 if (!thp_migration_supported())
1520 follflags |= FOLL_SPLIT;
1521 page = follow_page(vma, pp->addr, follflags);
89f5b7da
LT
1522
1523 err = PTR_ERR(page);
1524 if (IS_ERR(page))
1525 goto set_status;
1526
742755a1
CL
1527 err = -ENOENT;
1528 if (!page)
1529 goto set_status;
1530
742755a1
CL
1531 err = page_to_nid(page);
1532
1533 if (err == pp->node)
1534 /*
1535 * Node already in the right place
1536 */
1537 goto put_and_set;
1538
1539 err = -EACCES;
1540 if (page_mapcount(page) > 1 &&
1541 !migrate_all)
1542 goto put_and_set;
1543
e632a938 1544 if (PageHuge(page)) {
e8db67eb 1545 if (PageHead(page)) {
e66f17ff 1546 isolate_huge_page(page, &pagelist);
e8db67eb
NH
1547 err = 0;
1548 pp->page = page;
1549 }
e632a938
NH
1550 goto put_and_set;
1551 }
1552
e8db67eb
NH
1553 pp->page = compound_head(page);
1554 head = compound_head(page);
1555 err = isolate_lru_page(head);
6d9c285a 1556 if (!err) {
e8db67eb
NH
1557 list_add_tail(&head->lru, &pagelist);
1558 mod_node_page_state(page_pgdat(head),
1559 NR_ISOLATED_ANON + page_is_file_cache(head),
1560 hpage_nr_pages(head));
6d9c285a 1561 }
742755a1
CL
1562put_and_set:
1563 /*
1564 * Either remove the duplicate refcount from
1565 * isolate_lru_page() or drop the page ref if it was
1566 * not isolated.
1567 */
1568 put_page(page);
1569set_status:
1570 pp->status = err;
1571 }
1572
e78bbfa8 1573 err = 0;
cf608ac1 1574 if (!list_empty(&pagelist)) {
68711a74 1575 err = migrate_pages(&pagelist, new_page_node, NULL,
9c620e2b 1576 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1577 if (err)
e632a938 1578 putback_movable_pages(&pagelist);
cf608ac1 1579 }
742755a1
CL
1580
1581 up_read(&mm->mmap_sem);
1582 return err;
1583}
1584
5e9a0f02
BG
1585/*
1586 * Migrate an array of page address onto an array of nodes and fill
1587 * the corresponding array of status.
1588 */
3268c63e 1589static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
1590 unsigned long nr_pages,
1591 const void __user * __user *pages,
1592 const int __user *nodes,
1593 int __user *status, int flags)
1594{
3140a227 1595 struct page_to_node *pm;
3140a227
BG
1596 unsigned long chunk_nr_pages;
1597 unsigned long chunk_start;
1598 int err;
5e9a0f02 1599
3140a227
BG
1600 err = -ENOMEM;
1601 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1602 if (!pm)
5e9a0f02 1603 goto out;
35282a2d
BG
1604
1605 migrate_prep();
1606
5e9a0f02 1607 /*
3140a227
BG
1608 * Store a chunk of page_to_node array in a page,
1609 * but keep the last one as a marker
5e9a0f02 1610 */
3140a227 1611 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
5e9a0f02 1612
3140a227
BG
1613 for (chunk_start = 0;
1614 chunk_start < nr_pages;
1615 chunk_start += chunk_nr_pages) {
1616 int j;
5e9a0f02 1617
3140a227
BG
1618 if (chunk_start + chunk_nr_pages > nr_pages)
1619 chunk_nr_pages = nr_pages - chunk_start;
1620
1621 /* fill the chunk pm with addrs and nodes from user-space */
1622 for (j = 0; j < chunk_nr_pages; j++) {
1623 const void __user *p;
5e9a0f02
BG
1624 int node;
1625
3140a227
BG
1626 err = -EFAULT;
1627 if (get_user(p, pages + j + chunk_start))
1628 goto out_pm;
1629 pm[j].addr = (unsigned long) p;
1630
1631 if (get_user(node, nodes + j + chunk_start))
5e9a0f02
BG
1632 goto out_pm;
1633
1634 err = -ENODEV;
6f5a55f1
LT
1635 if (node < 0 || node >= MAX_NUMNODES)
1636 goto out_pm;
1637
389162c2 1638 if (!node_state(node, N_MEMORY))
5e9a0f02
BG
1639 goto out_pm;
1640
1641 err = -EACCES;
1642 if (!node_isset(node, task_nodes))
1643 goto out_pm;
1644
3140a227
BG
1645 pm[j].node = node;
1646 }
1647
1648 /* End marker for this chunk */
1649 pm[chunk_nr_pages].node = MAX_NUMNODES;
1650
1651 /* Migrate this chunk */
1652 err = do_move_page_to_node_array(mm, pm,
1653 flags & MPOL_MF_MOVE_ALL);
1654 if (err < 0)
1655 goto out_pm;
5e9a0f02 1656
5e9a0f02 1657 /* Return status information */
3140a227
BG
1658 for (j = 0; j < chunk_nr_pages; j++)
1659 if (put_user(pm[j].status, status + j + chunk_start)) {
5e9a0f02 1660 err = -EFAULT;
3140a227
BG
1661 goto out_pm;
1662 }
1663 }
1664 err = 0;
5e9a0f02
BG
1665
1666out_pm:
3140a227 1667 free_page((unsigned long)pm);
5e9a0f02
BG
1668out:
1669 return err;
1670}
1671
742755a1 1672/*
2f007e74 1673 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 1674 */
80bba129
BG
1675static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1676 const void __user **pages, int *status)
742755a1 1677{
2f007e74 1678 unsigned long i;
2f007e74 1679
742755a1
CL
1680 down_read(&mm->mmap_sem);
1681
2f007e74 1682 for (i = 0; i < nr_pages; i++) {
80bba129 1683 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
1684 struct vm_area_struct *vma;
1685 struct page *page;
c095adbc 1686 int err = -EFAULT;
2f007e74
BG
1687
1688 vma = find_vma(mm, addr);
70384dc6 1689 if (!vma || addr < vma->vm_start)
742755a1
CL
1690 goto set_status;
1691
d899844e
KS
1692 /* FOLL_DUMP to ignore special (like zero) pages */
1693 page = follow_page(vma, addr, FOLL_DUMP);
89f5b7da
LT
1694
1695 err = PTR_ERR(page);
1696 if (IS_ERR(page))
1697 goto set_status;
1698
d899844e 1699 err = page ? page_to_nid(page) : -ENOENT;
742755a1 1700set_status:
80bba129
BG
1701 *status = err;
1702
1703 pages++;
1704 status++;
1705 }
1706
1707 up_read(&mm->mmap_sem);
1708}
1709
1710/*
1711 * Determine the nodes of a user array of pages and store it in
1712 * a user array of status.
1713 */
1714static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1715 const void __user * __user *pages,
1716 int __user *status)
1717{
1718#define DO_PAGES_STAT_CHUNK_NR 16
1719 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1720 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 1721
87b8d1ad
PA
1722 while (nr_pages) {
1723 unsigned long chunk_nr;
80bba129 1724
87b8d1ad
PA
1725 chunk_nr = nr_pages;
1726 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1727 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1728
1729 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1730 break;
80bba129
BG
1731
1732 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1733
87b8d1ad
PA
1734 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1735 break;
742755a1 1736
87b8d1ad
PA
1737 pages += chunk_nr;
1738 status += chunk_nr;
1739 nr_pages -= chunk_nr;
1740 }
1741 return nr_pages ? -EFAULT : 0;
742755a1
CL
1742}
1743
1744/*
1745 * Move a list of pages in the address space of the currently executing
1746 * process.
1747 */
938bb9f5
HC
1748SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1749 const void __user * __user *, pages,
1750 const int __user *, nodes,
1751 int __user *, status, int, flags)
742755a1 1752{
742755a1 1753 struct task_struct *task;
742755a1 1754 struct mm_struct *mm;
5e9a0f02 1755 int err;
3268c63e 1756 nodemask_t task_nodes;
742755a1
CL
1757
1758 /* Check flags */
1759 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1760 return -EINVAL;
1761
1762 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1763 return -EPERM;
1764
1765 /* Find the mm_struct */
a879bf58 1766 rcu_read_lock();
228ebcbe 1767 task = pid ? find_task_by_vpid(pid) : current;
742755a1 1768 if (!task) {
a879bf58 1769 rcu_read_unlock();
742755a1
CL
1770 return -ESRCH;
1771 }
3268c63e 1772 get_task_struct(task);
742755a1
CL
1773
1774 /*
1775 * Check if this process has the right to modify the specified
197e7e52 1776 * process. Use the regular "ptrace_may_access()" checks.
742755a1 1777 */
197e7e52 1778 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1779 rcu_read_unlock();
742755a1 1780 err = -EPERM;
5e9a0f02 1781 goto out;
742755a1 1782 }
c69e8d9c 1783 rcu_read_unlock();
742755a1 1784
86c3a764
DQ
1785 err = security_task_movememory(task);
1786 if (err)
5e9a0f02 1787 goto out;
86c3a764 1788
3268c63e
CL
1789 task_nodes = cpuset_mems_allowed(task);
1790 mm = get_task_mm(task);
1791 put_task_struct(task);
1792
6e8b09ea
SL
1793 if (!mm)
1794 return -EINVAL;
1795
1796 if (nodes)
1797 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1798 nodes, status, flags);
1799 else
1800 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 1801
742755a1
CL
1802 mmput(mm);
1803 return err;
3268c63e
CL
1804
1805out:
1806 put_task_struct(task);
1807 return err;
742755a1 1808}
742755a1 1809
7039e1db
PZ
1810#ifdef CONFIG_NUMA_BALANCING
1811/*
1812 * Returns true if this is a safe migration target node for misplaced NUMA
1813 * pages. Currently it only checks the watermarks which crude
1814 */
1815static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 1816 unsigned long nr_migrate_pages)
7039e1db
PZ
1817{
1818 int z;
599d0c95 1819
7039e1db
PZ
1820 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1821 struct zone *zone = pgdat->node_zones + z;
1822
1823 if (!populated_zone(zone))
1824 continue;
1825
7039e1db
PZ
1826 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1827 if (!zone_watermark_ok(zone, 0,
1828 high_wmark_pages(zone) +
1829 nr_migrate_pages,
1830 0, 0))
1831 continue;
1832 return true;
1833 }
1834 return false;
1835}
1836
1837static struct page *alloc_misplaced_dst_page(struct page *page,
1838 unsigned long data,
1839 int **result)
1840{
1841 int nid = (int) data;
1842 struct page *newpage;
1843
96db800f 1844 newpage = __alloc_pages_node(nid,
e97ca8e5
JW
1845 (GFP_HIGHUSER_MOVABLE |
1846 __GFP_THISNODE | __GFP_NOMEMALLOC |
1847 __GFP_NORETRY | __GFP_NOWARN) &
8479eba7 1848 ~__GFP_RECLAIM, 0);
bac0382c 1849
7039e1db
PZ
1850 return newpage;
1851}
1852
a8f60772
MG
1853/*
1854 * page migration rate limiting control.
1855 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1856 * window of time. Default here says do not migrate more than 1280M per second.
1857 */
1858static unsigned int migrate_interval_millisecs __read_mostly = 100;
1859static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1860
b32967ff 1861/* Returns true if the node is migrate rate-limited after the update */
1c30e017
MG
1862static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1863 unsigned long nr_pages)
7039e1db 1864{
a8f60772
MG
1865 /*
1866 * Rate-limit the amount of data that is being migrated to a node.
1867 * Optimal placement is no good if the memory bus is saturated and
1868 * all the time is being spent migrating!
1869 */
a8f60772 1870 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1c5e9c27 1871 spin_lock(&pgdat->numabalancing_migrate_lock);
a8f60772
MG
1872 pgdat->numabalancing_migrate_nr_pages = 0;
1873 pgdat->numabalancing_migrate_next_window = jiffies +
1874 msecs_to_jiffies(migrate_interval_millisecs);
1c5e9c27 1875 spin_unlock(&pgdat->numabalancing_migrate_lock);
a8f60772 1876 }
af1839d7
MG
1877 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1878 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1879 nr_pages);
1c5e9c27 1880 return true;
af1839d7 1881 }
1c5e9c27
MG
1882
1883 /*
1884 * This is an unlocked non-atomic update so errors are possible.
1885 * The consequences are failing to migrate when we potentiall should
1886 * have which is not severe enough to warrant locking. If it is ever
1887 * a problem, it can be converted to a per-cpu counter.
1888 */
1889 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1890 return false;
b32967ff
MG
1891}
1892
1c30e017 1893static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 1894{
340ef390 1895 int page_lru;
a8f60772 1896
309381fe 1897 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
3abef4e6 1898
7039e1db 1899 /* Avoid migrating to a node that is nearly full */
340ef390
HD
1900 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1901 return 0;
7039e1db 1902
340ef390
HD
1903 if (isolate_lru_page(page))
1904 return 0;
7039e1db 1905
340ef390
HD
1906 /*
1907 * migrate_misplaced_transhuge_page() skips page migration's usual
1908 * check on page_count(), so we must do it here, now that the page
1909 * has been isolated: a GUP pin, or any other pin, prevents migration.
1910 * The expected page count is 3: 1 for page's mapcount and 1 for the
1911 * caller's pin and 1 for the reference taken by isolate_lru_page().
1912 */
1913 if (PageTransHuge(page) && page_count(page) != 3) {
1914 putback_lru_page(page);
1915 return 0;
7039e1db
PZ
1916 }
1917
340ef390 1918 page_lru = page_is_file_cache(page);
599d0c95 1919 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
340ef390
HD
1920 hpage_nr_pages(page));
1921
149c33e1 1922 /*
340ef390
HD
1923 * Isolating the page has taken another reference, so the
1924 * caller's reference can be safely dropped without the page
1925 * disappearing underneath us during migration.
149c33e1
MG
1926 */
1927 put_page(page);
340ef390 1928 return 1;
b32967ff
MG
1929}
1930
de466bd6
MG
1931bool pmd_trans_migrating(pmd_t pmd)
1932{
1933 struct page *page = pmd_page(pmd);
1934 return PageLocked(page);
1935}
1936
b32967ff
MG
1937/*
1938 * Attempt to migrate a misplaced page to the specified destination
1939 * node. Caller is expected to have an elevated reference count on
1940 * the page that will be dropped by this function before returning.
1941 */
1bc115d8
MG
1942int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1943 int node)
b32967ff
MG
1944{
1945 pg_data_t *pgdat = NODE_DATA(node);
340ef390 1946 int isolated;
b32967ff
MG
1947 int nr_remaining;
1948 LIST_HEAD(migratepages);
1949
1950 /*
1bc115d8
MG
1951 * Don't migrate file pages that are mapped in multiple processes
1952 * with execute permissions as they are probably shared libraries.
b32967ff 1953 */
1bc115d8
MG
1954 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1955 (vma->vm_flags & VM_EXEC))
b32967ff 1956 goto out;
b32967ff
MG
1957
1958 /*
1959 * Rate-limit the amount of data that is being migrated to a node.
1960 * Optimal placement is no good if the memory bus is saturated and
1961 * all the time is being spent migrating!
1962 */
340ef390 1963 if (numamigrate_update_ratelimit(pgdat, 1))
b32967ff 1964 goto out;
b32967ff
MG
1965
1966 isolated = numamigrate_isolate_page(pgdat, page);
1967 if (!isolated)
1968 goto out;
1969
1970 list_add(&page->lru, &migratepages);
9c620e2b 1971 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
68711a74
DR
1972 NULL, node, MIGRATE_ASYNC,
1973 MR_NUMA_MISPLACED);
b32967ff 1974 if (nr_remaining) {
59c82b70
JK
1975 if (!list_empty(&migratepages)) {
1976 list_del(&page->lru);
599d0c95 1977 dec_node_page_state(page, NR_ISOLATED_ANON +
59c82b70
JK
1978 page_is_file_cache(page));
1979 putback_lru_page(page);
1980 }
b32967ff
MG
1981 isolated = 0;
1982 } else
1983 count_vm_numa_event(NUMA_PAGE_MIGRATE);
7039e1db 1984 BUG_ON(!list_empty(&migratepages));
7039e1db 1985 return isolated;
340ef390
HD
1986
1987out:
1988 put_page(page);
1989 return 0;
7039e1db 1990}
220018d3 1991#endif /* CONFIG_NUMA_BALANCING */
b32967ff 1992
220018d3 1993#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
340ef390
HD
1994/*
1995 * Migrates a THP to a given target node. page must be locked and is unlocked
1996 * before returning.
1997 */
b32967ff
MG
1998int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1999 struct vm_area_struct *vma,
2000 pmd_t *pmd, pmd_t entry,
2001 unsigned long address,
2002 struct page *page, int node)
2003{
c4088ebd 2004 spinlock_t *ptl;
b32967ff
MG
2005 pg_data_t *pgdat = NODE_DATA(node);
2006 int isolated = 0;
2007 struct page *new_page = NULL;
b32967ff 2008 int page_lru = page_is_file_cache(page);
f714f4f2
MG
2009 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2010 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
b32967ff 2011
b32967ff
MG
2012 /*
2013 * Rate-limit the amount of data that is being migrated to a node.
2014 * Optimal placement is no good if the memory bus is saturated and
2015 * all the time is being spent migrating!
2016 */
d28d4335 2017 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
b32967ff
MG
2018 goto out_dropref;
2019
2020 new_page = alloc_pages_node(node,
25160354 2021 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
e97ca8e5 2022 HPAGE_PMD_ORDER);
340ef390
HD
2023 if (!new_page)
2024 goto out_fail;
9a982250 2025 prep_transhuge_page(new_page);
340ef390 2026
b32967ff 2027 isolated = numamigrate_isolate_page(pgdat, page);
340ef390 2028 if (!isolated) {
b32967ff 2029 put_page(new_page);
340ef390 2030 goto out_fail;
b32967ff 2031 }
b0943d61 2032
b32967ff 2033 /* Prepare a page as a migration target */
48c935ad 2034 __SetPageLocked(new_page);
d44d363f
SL
2035 if (PageSwapBacked(page))
2036 __SetPageSwapBacked(new_page);
b32967ff
MG
2037
2038 /* anon mapping, we can simply copy page->mapping to the new page: */
2039 new_page->mapping = page->mapping;
2040 new_page->index = page->index;
2041 migrate_page_copy(new_page, page);
2042 WARN_ON(PageLRU(new_page));
2043
2044 /* Recheck the target PMD */
f714f4f2 2045 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
c4088ebd 2046 ptl = pmd_lock(mm, pmd);
f4e177d1 2047 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
c4088ebd 2048 spin_unlock(ptl);
f714f4f2 2049 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff
MG
2050
2051 /* Reverse changes made by migrate_page_copy() */
2052 if (TestClearPageActive(new_page))
2053 SetPageActive(page);
2054 if (TestClearPageUnevictable(new_page))
2055 SetPageUnevictable(page);
b32967ff
MG
2056
2057 unlock_page(new_page);
2058 put_page(new_page); /* Free it */
2059
a54a407f
MG
2060 /* Retake the callers reference and putback on LRU */
2061 get_page(page);
b32967ff 2062 putback_lru_page(page);
599d0c95 2063 mod_node_page_state(page_pgdat(page),
a54a407f 2064 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
eb4489f6
MG
2065
2066 goto out_unlock;
b32967ff
MG
2067 }
2068
10102459 2069 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2b4847e7 2070 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
b32967ff 2071
2b4847e7
MG
2072 /*
2073 * Clear the old entry under pagetable lock and establish the new PTE.
2074 * Any parallel GUP will either observe the old page blocking on the
2075 * page lock, block on the page table lock or observe the new page.
2076 * The SetPageUptodate on the new page and page_add_new_anon_rmap
2077 * guarantee the copy is visible before the pagetable update.
2078 */
f714f4f2 2079 flush_cache_range(vma, mmun_start, mmun_end);
d281ee61 2080 page_add_anon_rmap(new_page, vma, mmun_start, true);
8809aa2d 2081 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
f714f4f2 2082 set_pmd_at(mm, mmun_start, pmd, entry);
ce4a9cc5 2083 update_mmu_cache_pmd(vma, address, &entry);
2b4847e7 2084
f4e177d1 2085 page_ref_unfreeze(page, 2);
51afb12b 2086 mlock_migrate_page(new_page, page);
d281ee61 2087 page_remove_rmap(page, true);
7cd12b4a 2088 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2b4847e7 2089
c4088ebd 2090 spin_unlock(ptl);
f714f4f2 2091 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff 2092
11de9927
MG
2093 /* Take an "isolate" reference and put new page on the LRU. */
2094 get_page(new_page);
2095 putback_lru_page(new_page);
2096
b32967ff
MG
2097 unlock_page(new_page);
2098 unlock_page(page);
2099 put_page(page); /* Drop the rmap reference */
2100 put_page(page); /* Drop the LRU isolation reference */
2101
2102 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2103 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2104
599d0c95 2105 mod_node_page_state(page_pgdat(page),
b32967ff
MG
2106 NR_ISOLATED_ANON + page_lru,
2107 -HPAGE_PMD_NR);
2108 return isolated;
2109
340ef390
HD
2110out_fail:
2111 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
b32967ff 2112out_dropref:
2b4847e7
MG
2113 ptl = pmd_lock(mm, pmd);
2114 if (pmd_same(*pmd, entry)) {
4d942466 2115 entry = pmd_modify(entry, vma->vm_page_prot);
f714f4f2 2116 set_pmd_at(mm, mmun_start, pmd, entry);
2b4847e7
MG
2117 update_mmu_cache_pmd(vma, address, &entry);
2118 }
2119 spin_unlock(ptl);
a54a407f 2120
eb4489f6 2121out_unlock:
340ef390 2122 unlock_page(page);
b32967ff 2123 put_page(page);
b32967ff
MG
2124 return 0;
2125}
7039e1db
PZ
2126#endif /* CONFIG_NUMA_BALANCING */
2127
2128#endif /* CONFIG_NUMA */
8763cb45 2129
8763cb45
JG
2130struct migrate_vma {
2131 struct vm_area_struct *vma;
2132 unsigned long *dst;
2133 unsigned long *src;
2134 unsigned long cpages;
2135 unsigned long npages;
2136 unsigned long start;
2137 unsigned long end;
2138};
2139
2140static int migrate_vma_collect_hole(unsigned long start,
2141 unsigned long end,
2142 struct mm_walk *walk)
2143{
2144 struct migrate_vma *migrate = walk->private;
2145 unsigned long addr;
2146
8315ada7
JG
2147 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2148 migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE;
2149 migrate->dst[migrate->npages] = 0;
2150 migrate->cpages++;
2151 }
2152
2153 return 0;
2154}
2155
2156static int migrate_vma_collect_skip(unsigned long start,
2157 unsigned long end,
2158 struct mm_walk *walk)
2159{
2160 struct migrate_vma *migrate = walk->private;
2161 unsigned long addr;
2162
8763cb45
JG
2163 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2164 migrate->dst[migrate->npages] = 0;
2165 migrate->src[migrate->npages++] = 0;
2166 }
2167
2168 return 0;
2169}
2170
2171static int migrate_vma_collect_pmd(pmd_t *pmdp,
2172 unsigned long start,
2173 unsigned long end,
2174 struct mm_walk *walk)
2175{
2176 struct migrate_vma *migrate = walk->private;
2177 struct vm_area_struct *vma = walk->vma;
2178 struct mm_struct *mm = vma->vm_mm;
8c3328f1 2179 unsigned long addr = start, unmapped = 0;
8763cb45
JG
2180 spinlock_t *ptl;
2181 pte_t *ptep;
2182
2183again:
2184 if (pmd_none(*pmdp))
2185 return migrate_vma_collect_hole(start, end, walk);
2186
2187 if (pmd_trans_huge(*pmdp)) {
2188 struct page *page;
2189
2190 ptl = pmd_lock(mm, pmdp);
2191 if (unlikely(!pmd_trans_huge(*pmdp))) {
2192 spin_unlock(ptl);
2193 goto again;
2194 }
2195
2196 page = pmd_page(*pmdp);
2197 if (is_huge_zero_page(page)) {
2198 spin_unlock(ptl);
2199 split_huge_pmd(vma, pmdp, addr);
2200 if (pmd_trans_unstable(pmdp))
8315ada7 2201 return migrate_vma_collect_skip(start, end,
8763cb45
JG
2202 walk);
2203 } else {
2204 int ret;
2205
2206 get_page(page);
2207 spin_unlock(ptl);
2208 if (unlikely(!trylock_page(page)))
8315ada7 2209 return migrate_vma_collect_skip(start, end,
8763cb45
JG
2210 walk);
2211 ret = split_huge_page(page);
2212 unlock_page(page);
2213 put_page(page);
8315ada7
JG
2214 if (ret)
2215 return migrate_vma_collect_skip(start, end,
2216 walk);
2217 if (pmd_none(*pmdp))
8763cb45
JG
2218 return migrate_vma_collect_hole(start, end,
2219 walk);
2220 }
2221 }
2222
2223 if (unlikely(pmd_bad(*pmdp)))
8315ada7 2224 return migrate_vma_collect_skip(start, end, walk);
8763cb45
JG
2225
2226 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
8c3328f1
JG
2227 arch_enter_lazy_mmu_mode();
2228
8763cb45
JG
2229 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2230 unsigned long mpfn, pfn;
2231 struct page *page;
8c3328f1 2232 swp_entry_t entry;
8763cb45
JG
2233 pte_t pte;
2234
2235 pte = *ptep;
2236 pfn = pte_pfn(pte);
2237
a5430dda 2238 if (pte_none(pte)) {
8315ada7
JG
2239 mpfn = MIGRATE_PFN_MIGRATE;
2240 migrate->cpages++;
2241 pfn = 0;
8763cb45
JG
2242 goto next;
2243 }
2244
a5430dda
JG
2245 if (!pte_present(pte)) {
2246 mpfn = pfn = 0;
2247
2248 /*
2249 * Only care about unaddressable device page special
2250 * page table entry. Other special swap entries are not
2251 * migratable, and we ignore regular swapped page.
2252 */
2253 entry = pte_to_swp_entry(pte);
2254 if (!is_device_private_entry(entry))
2255 goto next;
2256
2257 page = device_private_entry_to_page(entry);
2258 mpfn = migrate_pfn(page_to_pfn(page))|
2259 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2260 if (is_write_device_private_entry(entry))
2261 mpfn |= MIGRATE_PFN_WRITE;
2262 } else {
8315ada7
JG
2263 if (is_zero_pfn(pfn)) {
2264 mpfn = MIGRATE_PFN_MIGRATE;
2265 migrate->cpages++;
2266 pfn = 0;
2267 goto next;
2268 }
df6ad698 2269 page = _vm_normal_page(migrate->vma, addr, pte, true);
a5430dda
JG
2270 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2271 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2272 }
2273
8763cb45 2274 /* FIXME support THP */
8763cb45
JG
2275 if (!page || !page->mapping || PageTransCompound(page)) {
2276 mpfn = pfn = 0;
2277 goto next;
2278 }
a5430dda 2279 pfn = page_to_pfn(page);
8763cb45
JG
2280
2281 /*
2282 * By getting a reference on the page we pin it and that blocks
2283 * any kind of migration. Side effect is that it "freezes" the
2284 * pte.
2285 *
2286 * We drop this reference after isolating the page from the lru
2287 * for non device page (device page are not on the lru and thus
2288 * can't be dropped from it).
2289 */
2290 get_page(page);
2291 migrate->cpages++;
8763cb45 2292
8c3328f1
JG
2293 /*
2294 * Optimize for the common case where page is only mapped once
2295 * in one process. If we can lock the page, then we can safely
2296 * set up a special migration page table entry now.
2297 */
2298 if (trylock_page(page)) {
2299 pte_t swp_pte;
2300
2301 mpfn |= MIGRATE_PFN_LOCKED;
2302 ptep_get_and_clear(mm, addr, ptep);
2303
2304 /* Setup special migration page table entry */
2305 entry = make_migration_entry(page, pte_write(pte));
2306 swp_pte = swp_entry_to_pte(entry);
2307 if (pte_soft_dirty(pte))
2308 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2309 set_pte_at(mm, addr, ptep, swp_pte);
2310
2311 /*
2312 * This is like regular unmap: we remove the rmap and
2313 * drop page refcount. Page won't be freed, as we took
2314 * a reference just above.
2315 */
2316 page_remove_rmap(page, false);
2317 put_page(page);
a5430dda
JG
2318
2319 if (pte_present(pte))
2320 unmapped++;
8c3328f1
JG
2321 }
2322
8763cb45 2323next:
a5430dda 2324 migrate->dst[migrate->npages] = 0;
8763cb45
JG
2325 migrate->src[migrate->npages++] = mpfn;
2326 }
8c3328f1 2327 arch_leave_lazy_mmu_mode();
8763cb45
JG
2328 pte_unmap_unlock(ptep - 1, ptl);
2329
8c3328f1
JG
2330 /* Only flush the TLB if we actually modified any entries */
2331 if (unmapped)
2332 flush_tlb_range(walk->vma, start, end);
2333
8763cb45
JG
2334 return 0;
2335}
2336
2337/*
2338 * migrate_vma_collect() - collect pages over a range of virtual addresses
2339 * @migrate: migrate struct containing all migration information
2340 *
2341 * This will walk the CPU page table. For each virtual address backed by a
2342 * valid page, it updates the src array and takes a reference on the page, in
2343 * order to pin the page until we lock it and unmap it.
2344 */
2345static void migrate_vma_collect(struct migrate_vma *migrate)
2346{
2347 struct mm_walk mm_walk;
2348
2349 mm_walk.pmd_entry = migrate_vma_collect_pmd;
2350 mm_walk.pte_entry = NULL;
2351 mm_walk.pte_hole = migrate_vma_collect_hole;
2352 mm_walk.hugetlb_entry = NULL;
2353 mm_walk.test_walk = NULL;
2354 mm_walk.vma = migrate->vma;
2355 mm_walk.mm = migrate->vma->vm_mm;
2356 mm_walk.private = migrate;
2357
8c3328f1
JG
2358 mmu_notifier_invalidate_range_start(mm_walk.mm,
2359 migrate->start,
2360 migrate->end);
8763cb45 2361 walk_page_range(migrate->start, migrate->end, &mm_walk);
8c3328f1
JG
2362 mmu_notifier_invalidate_range_end(mm_walk.mm,
2363 migrate->start,
2364 migrate->end);
8763cb45
JG
2365
2366 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2367}
2368
2369/*
2370 * migrate_vma_check_page() - check if page is pinned or not
2371 * @page: struct page to check
2372 *
2373 * Pinned pages cannot be migrated. This is the same test as in
2374 * migrate_page_move_mapping(), except that here we allow migration of a
2375 * ZONE_DEVICE page.
2376 */
2377static bool migrate_vma_check_page(struct page *page)
2378{
2379 /*
2380 * One extra ref because caller holds an extra reference, either from
2381 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2382 * a device page.
2383 */
2384 int extra = 1;
2385
2386 /*
2387 * FIXME support THP (transparent huge page), it is bit more complex to
2388 * check them than regular pages, because they can be mapped with a pmd
2389 * or with a pte (split pte mapping).
2390 */
2391 if (PageCompound(page))
2392 return false;
2393
a5430dda
JG
2394 /* Page from ZONE_DEVICE have one extra reference */
2395 if (is_zone_device_page(page)) {
2396 /*
2397 * Private page can never be pin as they have no valid pte and
2398 * GUP will fail for those. Yet if there is a pending migration
2399 * a thread might try to wait on the pte migration entry and
2400 * will bump the page reference count. Sadly there is no way to
2401 * differentiate a regular pin from migration wait. Hence to
2402 * avoid 2 racing thread trying to migrate back to CPU to enter
2403 * infinite loop (one stoping migration because the other is
2404 * waiting on pte migration entry). We always return true here.
2405 *
2406 * FIXME proper solution is to rework migration_entry_wait() so
2407 * it does not need to take a reference on page.
2408 */
2409 if (is_device_private_page(page))
2410 return true;
2411
df6ad698
JG
2412 /*
2413 * Only allow device public page to be migrated and account for
2414 * the extra reference count imply by ZONE_DEVICE pages.
2415 */
2416 if (!is_device_public_page(page))
2417 return false;
2418 extra++;
a5430dda
JG
2419 }
2420
df6ad698
JG
2421 /* For file back page */
2422 if (page_mapping(page))
2423 extra += 1 + page_has_private(page);
2424
8763cb45
JG
2425 if ((page_count(page) - extra) > page_mapcount(page))
2426 return false;
2427
2428 return true;
2429}
2430
2431/*
2432 * migrate_vma_prepare() - lock pages and isolate them from the lru
2433 * @migrate: migrate struct containing all migration information
2434 *
2435 * This locks pages that have been collected by migrate_vma_collect(). Once each
2436 * page is locked it is isolated from the lru (for non-device pages). Finally,
2437 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2438 * migrated by concurrent kernel threads.
2439 */
2440static void migrate_vma_prepare(struct migrate_vma *migrate)
2441{
2442 const unsigned long npages = migrate->npages;
8c3328f1
JG
2443 const unsigned long start = migrate->start;
2444 unsigned long addr, i, restore = 0;
8763cb45 2445 bool allow_drain = true;
8763cb45
JG
2446
2447 lru_add_drain();
2448
2449 for (i = 0; (i < npages) && migrate->cpages; i++) {
2450 struct page *page = migrate_pfn_to_page(migrate->src[i]);
8c3328f1 2451 bool remap = true;
8763cb45
JG
2452
2453 if (!page)
2454 continue;
2455
8c3328f1
JG
2456 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2457 /*
2458 * Because we are migrating several pages there can be
2459 * a deadlock between 2 concurrent migration where each
2460 * are waiting on each other page lock.
2461 *
2462 * Make migrate_vma() a best effort thing and backoff
2463 * for any page we can not lock right away.
2464 */
2465 if (!trylock_page(page)) {
2466 migrate->src[i] = 0;
2467 migrate->cpages--;
2468 put_page(page);
2469 continue;
2470 }
2471 remap = false;
2472 migrate->src[i] |= MIGRATE_PFN_LOCKED;
8763cb45 2473 }
8763cb45 2474
a5430dda
JG
2475 /* ZONE_DEVICE pages are not on LRU */
2476 if (!is_zone_device_page(page)) {
2477 if (!PageLRU(page) && allow_drain) {
2478 /* Drain CPU's pagevec */
2479 lru_add_drain_all();
2480 allow_drain = false;
2481 }
8763cb45 2482
a5430dda
JG
2483 if (isolate_lru_page(page)) {
2484 if (remap) {
2485 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2486 migrate->cpages--;
2487 restore++;
2488 } else {
2489 migrate->src[i] = 0;
2490 unlock_page(page);
2491 migrate->cpages--;
2492 put_page(page);
2493 }
2494 continue;
8c3328f1 2495 }
a5430dda
JG
2496
2497 /* Drop the reference we took in collect */
2498 put_page(page);
8763cb45
JG
2499 }
2500
2501 if (!migrate_vma_check_page(page)) {
8c3328f1
JG
2502 if (remap) {
2503 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2504 migrate->cpages--;
2505 restore++;
8763cb45 2506
a5430dda
JG
2507 if (!is_zone_device_page(page)) {
2508 get_page(page);
2509 putback_lru_page(page);
2510 }
8c3328f1
JG
2511 } else {
2512 migrate->src[i] = 0;
2513 unlock_page(page);
2514 migrate->cpages--;
2515
a5430dda
JG
2516 if (!is_zone_device_page(page))
2517 putback_lru_page(page);
2518 else
2519 put_page(page);
8c3328f1 2520 }
8763cb45
JG
2521 }
2522 }
8c3328f1
JG
2523
2524 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2525 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2526
2527 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2528 continue;
2529
2530 remove_migration_pte(page, migrate->vma, addr, page);
2531
2532 migrate->src[i] = 0;
2533 unlock_page(page);
2534 put_page(page);
2535 restore--;
2536 }
8763cb45
JG
2537}
2538
2539/*
2540 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2541 * @migrate: migrate struct containing all migration information
2542 *
2543 * Replace page mapping (CPU page table pte) with a special migration pte entry
2544 * and check again if it has been pinned. Pinned pages are restored because we
2545 * cannot migrate them.
2546 *
2547 * This is the last step before we call the device driver callback to allocate
2548 * destination memory and copy contents of original page over to new page.
2549 */
2550static void migrate_vma_unmap(struct migrate_vma *migrate)
2551{
2552 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2553 const unsigned long npages = migrate->npages;
2554 const unsigned long start = migrate->start;
2555 unsigned long addr, i, restore = 0;
2556
2557 for (i = 0; i < npages; i++) {
2558 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2559
2560 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2561 continue;
2562
8c3328f1
JG
2563 if (page_mapped(page)) {
2564 try_to_unmap(page, flags);
2565 if (page_mapped(page))
2566 goto restore;
8763cb45 2567 }
8c3328f1
JG
2568
2569 if (migrate_vma_check_page(page))
2570 continue;
2571
2572restore:
2573 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2574 migrate->cpages--;
2575 restore++;
8763cb45
JG
2576 }
2577
2578 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2579 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2580
2581 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2582 continue;
2583
2584 remove_migration_ptes(page, page, false);
2585
2586 migrate->src[i] = 0;
2587 unlock_page(page);
2588 restore--;
2589
a5430dda
JG
2590 if (is_zone_device_page(page))
2591 put_page(page);
2592 else
2593 putback_lru_page(page);
8763cb45
JG
2594 }
2595}
2596
8315ada7
JG
2597static void migrate_vma_insert_page(struct migrate_vma *migrate,
2598 unsigned long addr,
2599 struct page *page,
2600 unsigned long *src,
2601 unsigned long *dst)
2602{
2603 struct vm_area_struct *vma = migrate->vma;
2604 struct mm_struct *mm = vma->vm_mm;
2605 struct mem_cgroup *memcg;
2606 bool flush = false;
2607 spinlock_t *ptl;
2608 pte_t entry;
2609 pgd_t *pgdp;
2610 p4d_t *p4dp;
2611 pud_t *pudp;
2612 pmd_t *pmdp;
2613 pte_t *ptep;
2614
2615 /* Only allow populating anonymous memory */
2616 if (!vma_is_anonymous(vma))
2617 goto abort;
2618
2619 pgdp = pgd_offset(mm, addr);
2620 p4dp = p4d_alloc(mm, pgdp, addr);
2621 if (!p4dp)
2622 goto abort;
2623 pudp = pud_alloc(mm, p4dp, addr);
2624 if (!pudp)
2625 goto abort;
2626 pmdp = pmd_alloc(mm, pudp, addr);
2627 if (!pmdp)
2628 goto abort;
2629
2630 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2631 goto abort;
2632
2633 /*
2634 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2635 * pte_offset_map() on pmds where a huge pmd might be created
2636 * from a different thread.
2637 *
2638 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2639 * parallel threads are excluded by other means.
2640 *
2641 * Here we only have down_read(mmap_sem).
2642 */
2643 if (pte_alloc(mm, pmdp, addr))
2644 goto abort;
2645
2646 /* See the comment in pte_alloc_one_map() */
2647 if (unlikely(pmd_trans_unstable(pmdp)))
2648 goto abort;
2649
2650 if (unlikely(anon_vma_prepare(vma)))
2651 goto abort;
2652 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2653 goto abort;
2654
2655 /*
2656 * The memory barrier inside __SetPageUptodate makes sure that
2657 * preceding stores to the page contents become visible before
2658 * the set_pte_at() write.
2659 */
2660 __SetPageUptodate(page);
2661
df6ad698
JG
2662 if (is_zone_device_page(page)) {
2663 if (is_device_private_page(page)) {
2664 swp_entry_t swp_entry;
2665
2666 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2667 entry = swp_entry_to_pte(swp_entry);
2668 } else if (is_device_public_page(page)) {
2669 entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
2670 if (vma->vm_flags & VM_WRITE)
2671 entry = pte_mkwrite(pte_mkdirty(entry));
2672 entry = pte_mkdevmap(entry);
2673 }
8315ada7
JG
2674 } else {
2675 entry = mk_pte(page, vma->vm_page_prot);
2676 if (vma->vm_flags & VM_WRITE)
2677 entry = pte_mkwrite(pte_mkdirty(entry));
2678 }
2679
2680 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2681
2682 if (pte_present(*ptep)) {
2683 unsigned long pfn = pte_pfn(*ptep);
2684
2685 if (!is_zero_pfn(pfn)) {
2686 pte_unmap_unlock(ptep, ptl);
2687 mem_cgroup_cancel_charge(page, memcg, false);
2688 goto abort;
2689 }
2690 flush = true;
2691 } else if (!pte_none(*ptep)) {
2692 pte_unmap_unlock(ptep, ptl);
2693 mem_cgroup_cancel_charge(page, memcg, false);
2694 goto abort;
2695 }
2696
2697 /*
2698 * Check for usefaultfd but do not deliver the fault. Instead,
2699 * just back off.
2700 */
2701 if (userfaultfd_missing(vma)) {
2702 pte_unmap_unlock(ptep, ptl);
2703 mem_cgroup_cancel_charge(page, memcg, false);
2704 goto abort;
2705 }
2706
2707 inc_mm_counter(mm, MM_ANONPAGES);
2708 page_add_new_anon_rmap(page, vma, addr, false);
2709 mem_cgroup_commit_charge(page, memcg, false, false);
2710 if (!is_zone_device_page(page))
2711 lru_cache_add_active_or_unevictable(page, vma);
2712 get_page(page);
2713
2714 if (flush) {
2715 flush_cache_page(vma, addr, pte_pfn(*ptep));
2716 ptep_clear_flush_notify(vma, addr, ptep);
2717 set_pte_at_notify(mm, addr, ptep, entry);
2718 update_mmu_cache(vma, addr, ptep);
2719 } else {
2720 /* No need to invalidate - it was non-present before */
2721 set_pte_at(mm, addr, ptep, entry);
2722 update_mmu_cache(vma, addr, ptep);
2723 }
2724
2725 pte_unmap_unlock(ptep, ptl);
2726 *src = MIGRATE_PFN_MIGRATE;
2727 return;
2728
2729abort:
2730 *src &= ~MIGRATE_PFN_MIGRATE;
2731}
2732
8763cb45
JG
2733/*
2734 * migrate_vma_pages() - migrate meta-data from src page to dst page
2735 * @migrate: migrate struct containing all migration information
2736 *
2737 * This migrates struct page meta-data from source struct page to destination
2738 * struct page. This effectively finishes the migration from source page to the
2739 * destination page.
2740 */
2741static void migrate_vma_pages(struct migrate_vma *migrate)
2742{
2743 const unsigned long npages = migrate->npages;
2744 const unsigned long start = migrate->start;
8315ada7
JG
2745 struct vm_area_struct *vma = migrate->vma;
2746 struct mm_struct *mm = vma->vm_mm;
2747 unsigned long addr, i, mmu_start;
2748 bool notified = false;
8763cb45
JG
2749
2750 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2751 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2752 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2753 struct address_space *mapping;
2754 int r;
2755
8315ada7
JG
2756 if (!newpage) {
2757 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
8763cb45 2758 continue;
8315ada7
JG
2759 }
2760
2761 if (!page) {
2762 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2763 continue;
2764 }
2765 if (!notified) {
2766 mmu_start = addr;
2767 notified = true;
2768 mmu_notifier_invalidate_range_start(mm,
2769 mmu_start,
2770 migrate->end);
2771 }
2772 migrate_vma_insert_page(migrate, addr, newpage,
2773 &migrate->src[i],
2774 &migrate->dst[i]);
8763cb45 2775 continue;
8315ada7 2776 }
8763cb45
JG
2777
2778 mapping = page_mapping(page);
2779
a5430dda
JG
2780 if (is_zone_device_page(newpage)) {
2781 if (is_device_private_page(newpage)) {
2782 /*
2783 * For now only support private anonymous when
2784 * migrating to un-addressable device memory.
2785 */
2786 if (mapping) {
2787 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2788 continue;
2789 }
df6ad698 2790 } else if (!is_device_public_page(newpage)) {
a5430dda
JG
2791 /*
2792 * Other types of ZONE_DEVICE page are not
2793 * supported.
2794 */
2795 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2796 continue;
2797 }
2798 }
2799
8763cb45
JG
2800 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2801 if (r != MIGRATEPAGE_SUCCESS)
2802 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2803 }
8315ada7
JG
2804
2805 if (notified)
2806 mmu_notifier_invalidate_range_end(mm, mmu_start,
2807 migrate->end);
8763cb45
JG
2808}
2809
2810/*
2811 * migrate_vma_finalize() - restore CPU page table entry
2812 * @migrate: migrate struct containing all migration information
2813 *
2814 * This replaces the special migration pte entry with either a mapping to the
2815 * new page if migration was successful for that page, or to the original page
2816 * otherwise.
2817 *
2818 * This also unlocks the pages and puts them back on the lru, or drops the extra
2819 * refcount, for device pages.
2820 */
2821static void migrate_vma_finalize(struct migrate_vma *migrate)
2822{
2823 const unsigned long npages = migrate->npages;
2824 unsigned long i;
2825
2826 for (i = 0; i < npages; i++) {
2827 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2828 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2829
8315ada7
JG
2830 if (!page) {
2831 if (newpage) {
2832 unlock_page(newpage);
2833 put_page(newpage);
2834 }
8763cb45 2835 continue;
8315ada7
JG
2836 }
2837
8763cb45
JG
2838 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2839 if (newpage) {
2840 unlock_page(newpage);
2841 put_page(newpage);
2842 }
2843 newpage = page;
2844 }
2845
2846 remove_migration_ptes(page, newpage, false);
2847 unlock_page(page);
2848 migrate->cpages--;
2849
a5430dda
JG
2850 if (is_zone_device_page(page))
2851 put_page(page);
2852 else
2853 putback_lru_page(page);
8763cb45
JG
2854
2855 if (newpage != page) {
2856 unlock_page(newpage);
a5430dda
JG
2857 if (is_zone_device_page(newpage))
2858 put_page(newpage);
2859 else
2860 putback_lru_page(newpage);
8763cb45
JG
2861 }
2862 }
2863}
2864
2865/*
2866 * migrate_vma() - migrate a range of memory inside vma
2867 *
2868 * @ops: migration callback for allocating destination memory and copying
2869 * @vma: virtual memory area containing the range to be migrated
2870 * @start: start address of the range to migrate (inclusive)
2871 * @end: end address of the range to migrate (exclusive)
2872 * @src: array of hmm_pfn_t containing source pfns
2873 * @dst: array of hmm_pfn_t containing destination pfns
2874 * @private: pointer passed back to each of the callback
2875 * Returns: 0 on success, error code otherwise
2876 *
2877 * This function tries to migrate a range of memory virtual address range, using
2878 * callbacks to allocate and copy memory from source to destination. First it
2879 * collects all the pages backing each virtual address in the range, saving this
2880 * inside the src array. Then it locks those pages and unmaps them. Once the pages
2881 * are locked and unmapped, it checks whether each page is pinned or not. Pages
2882 * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2883 * in the corresponding src array entry. It then restores any pages that are
2884 * pinned, by remapping and unlocking those pages.
2885 *
2886 * At this point it calls the alloc_and_copy() callback. For documentation on
2887 * what is expected from that callback, see struct migrate_vma_ops comments in
2888 * include/linux/migrate.h
2889 *
2890 * After the alloc_and_copy() callback, this function goes over each entry in
2891 * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2892 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2893 * then the function tries to migrate struct page information from the source
2894 * struct page to the destination struct page. If it fails to migrate the struct
2895 * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2896 * array.
2897 *
2898 * At this point all successfully migrated pages have an entry in the src
2899 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2900 * array entry with MIGRATE_PFN_VALID flag set.
2901 *
2902 * It then calls the finalize_and_map() callback. See comments for "struct
2903 * migrate_vma_ops", in include/linux/migrate.h for details about
2904 * finalize_and_map() behavior.
2905 *
2906 * After the finalize_and_map() callback, for successfully migrated pages, this
2907 * function updates the CPU page table to point to new pages, otherwise it
2908 * restores the CPU page table to point to the original source pages.
2909 *
2910 * Function returns 0 after the above steps, even if no pages were migrated
2911 * (The function only returns an error if any of the arguments are invalid.)
2912 *
2913 * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2914 * unsigned long entries.
2915 */
2916int migrate_vma(const struct migrate_vma_ops *ops,
2917 struct vm_area_struct *vma,
2918 unsigned long start,
2919 unsigned long end,
2920 unsigned long *src,
2921 unsigned long *dst,
2922 void *private)
2923{
2924 struct migrate_vma migrate;
2925
2926 /* Sanity check the arguments */
2927 start &= PAGE_MASK;
2928 end &= PAGE_MASK;
2929 if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL))
2930 return -EINVAL;
2931 if (start < vma->vm_start || start >= vma->vm_end)
2932 return -EINVAL;
2933 if (end <= vma->vm_start || end > vma->vm_end)
2934 return -EINVAL;
2935 if (!ops || !src || !dst || start >= end)
2936 return -EINVAL;
2937
2938 memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2939 migrate.src = src;
2940 migrate.dst = dst;
2941 migrate.start = start;
2942 migrate.npages = 0;
2943 migrate.cpages = 0;
2944 migrate.end = end;
2945 migrate.vma = vma;
2946
2947 /* Collect, and try to unmap source pages */
2948 migrate_vma_collect(&migrate);
2949 if (!migrate.cpages)
2950 return 0;
2951
2952 /* Lock and isolate page */
2953 migrate_vma_prepare(&migrate);
2954 if (!migrate.cpages)
2955 return 0;
2956
2957 /* Unmap pages */
2958 migrate_vma_unmap(&migrate);
2959 if (!migrate.cpages)
2960 return 0;
2961
2962 /*
2963 * At this point pages are locked and unmapped, and thus they have
2964 * stable content and can safely be copied to destination memory that
2965 * is allocated by the callback.
2966 *
2967 * Note that migration can fail in migrate_vma_struct_page() for each
2968 * individual page.
2969 */
2970 ops->alloc_and_copy(vma, src, dst, start, end, private);
2971
2972 /* This does the real migration of struct page */
2973 migrate_vma_pages(&migrate);
2974
2975 ops->finalize_and_map(vma, src, dst, start, end, private);
2976
2977 /* Unlock and remap pages */
2978 migrate_vma_finalize(&migrate);
2979
2980 return 0;
2981}
2982EXPORT_SYMBOL(migrate_vma);