]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - mm/migrate.c
mm: memcontrol: rewrite charge API
[thirdparty/kernel/stable.git] / mm / migrate.c
CommitLineData
b20a3503
CL
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
cde53535 12 * Christoph Lameter
b20a3503
CL
13 */
14
15#include <linux/migrate.h>
b95f1b31 16#include <linux/export.h>
b20a3503 17#include <linux/swap.h>
0697212a 18#include <linux/swapops.h>
b20a3503 19#include <linux/pagemap.h>
e23ca00b 20#include <linux/buffer_head.h>
b20a3503 21#include <linux/mm_inline.h>
b488893a 22#include <linux/nsproxy.h>
b20a3503 23#include <linux/pagevec.h>
e9995ef9 24#include <linux/ksm.h>
b20a3503
CL
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
04e62a29 29#include <linux/writeback.h>
742755a1
CL
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
86c3a764 32#include <linux/security.h>
8a9f3ccd 33#include <linux/memcontrol.h>
4f5ca265 34#include <linux/syscalls.h>
290408d4 35#include <linux/hugetlb.h>
8e6ac7fa 36#include <linux/hugetlb_cgroup.h>
5a0e3ad6 37#include <linux/gfp.h>
bf6bddf1 38#include <linux/balloon_compaction.h>
f714f4f2 39#include <linux/mmu_notifier.h>
b20a3503 40
0d1836c3
MN
41#include <asm/tlbflush.h>
42
7b2a2d4a
MG
43#define CREATE_TRACE_POINTS
44#include <trace/events/migrate.h>
45
b20a3503
CL
46#include "internal.h"
47
b20a3503 48/*
742755a1 49 * migrate_prep() needs to be called before we start compiling a list of pages
748446bb
MG
50 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
51 * undesirable, use migrate_prep_local()
b20a3503
CL
52 */
53int migrate_prep(void)
54{
b20a3503
CL
55 /*
56 * Clear the LRU lists so pages can be isolated.
57 * Note that pages may be moved off the LRU after we have
58 * drained them. Those pages will fail to migrate like other
59 * pages that may be busy.
60 */
61 lru_add_drain_all();
62
63 return 0;
64}
65
748446bb
MG
66/* Do the necessary work of migrate_prep but not if it involves other CPUs */
67int migrate_prep_local(void)
68{
69 lru_add_drain();
70
71 return 0;
72}
73
5733c7d1
RA
74/*
75 * Put previously isolated pages back onto the appropriate lists
76 * from where they were once taken off for compaction/migration.
77 *
59c82b70
JK
78 * This function shall be used whenever the isolated pageset has been
79 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
80 * and isolate_huge_page().
5733c7d1
RA
81 */
82void putback_movable_pages(struct list_head *l)
83{
84 struct page *page;
85 struct page *page2;
86
b20a3503 87 list_for_each_entry_safe(page, page2, l, lru) {
31caf665
NH
88 if (unlikely(PageHuge(page))) {
89 putback_active_hugepage(page);
90 continue;
91 }
e24f0b8f 92 list_del(&page->lru);
a731286d 93 dec_zone_page_state(page, NR_ISOLATED_ANON +
6c0b1351 94 page_is_file_cache(page));
117aad1e 95 if (unlikely(isolated_balloon_page(page)))
bf6bddf1
RA
96 balloon_page_putback(page);
97 else
98 putback_lru_page(page);
b20a3503 99 }
b20a3503
CL
100}
101
0697212a
CL
102/*
103 * Restore a potential migration pte to a working pte entry
104 */
e9995ef9
HD
105static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
106 unsigned long addr, void *old)
0697212a
CL
107{
108 struct mm_struct *mm = vma->vm_mm;
109 swp_entry_t entry;
0697212a
CL
110 pmd_t *pmd;
111 pte_t *ptep, pte;
112 spinlock_t *ptl;
113
290408d4
NH
114 if (unlikely(PageHuge(new))) {
115 ptep = huge_pte_offset(mm, addr);
116 if (!ptep)
117 goto out;
cb900f41 118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
290408d4 119 } else {
6219049a
BL
120 pmd = mm_find_pmd(mm, addr);
121 if (!pmd)
290408d4 122 goto out;
0697212a 123
290408d4 124 ptep = pte_offset_map(pmd, addr);
0697212a 125
486cf46f
HD
126 /*
127 * Peek to check is_swap_pte() before taking ptlock? No, we
128 * can race mremap's move_ptes(), which skips anon_vma lock.
129 */
290408d4
NH
130
131 ptl = pte_lockptr(mm, pmd);
132 }
0697212a 133
0697212a
CL
134 spin_lock(ptl);
135 pte = *ptep;
136 if (!is_swap_pte(pte))
e9995ef9 137 goto unlock;
0697212a
CL
138
139 entry = pte_to_swp_entry(pte);
140
e9995ef9
HD
141 if (!is_migration_entry(entry) ||
142 migration_entry_to_page(entry) != old)
143 goto unlock;
0697212a 144
0697212a
CL
145 get_page(new);
146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
c3d16e16
CG
147 if (pte_swp_soft_dirty(*ptep))
148 pte = pte_mksoft_dirty(pte);
0697212a
CL
149 if (is_write_migration_entry(entry))
150 pte = pte_mkwrite(pte);
3ef8fd7f 151#ifdef CONFIG_HUGETLB_PAGE
be7517d6 152 if (PageHuge(new)) {
290408d4 153 pte = pte_mkhuge(pte);
be7517d6
TL
154 pte = arch_make_huge_pte(pte, vma, new, 0);
155 }
3ef8fd7f 156#endif
c2cc499c 157 flush_dcache_page(new);
0697212a 158 set_pte_at(mm, addr, ptep, pte);
04e62a29 159
290408d4
NH
160 if (PageHuge(new)) {
161 if (PageAnon(new))
162 hugepage_add_anon_rmap(new, vma, addr);
163 else
164 page_dup_rmap(new);
165 } else if (PageAnon(new))
04e62a29
CL
166 page_add_anon_rmap(new, vma, addr);
167 else
168 page_add_file_rmap(new);
169
170 /* No need to invalidate - it was non-present before */
4b3073e1 171 update_mmu_cache(vma, addr, ptep);
e9995ef9 172unlock:
0697212a 173 pte_unmap_unlock(ptep, ptl);
e9995ef9
HD
174out:
175 return SWAP_AGAIN;
0697212a
CL
176}
177
7e09e738
HD
178/*
179 * Congratulations to trinity for discovering this bug.
180 * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
181 * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
182 * replace the specified range by file ptes throughout (maybe populated after).
183 * If page migration finds a page within that range, while it's still located
184 * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
185 * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
186 * But if the migrating page is in a part of the vma outside the range to be
187 * remapped, then it will not be cleared, and remove_migration_ptes() needs to
188 * deal with it. Fortunately, this part of the vma is of course still linear,
189 * so we just need to use linear location on the nonlinear list.
190 */
191static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
192 struct address_space *mapping, void *arg)
193{
194 struct vm_area_struct *vma;
195 /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
196 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
197 unsigned long addr;
198
199 list_for_each_entry(vma,
200 &mapping->i_mmap_nonlinear, shared.nonlinear) {
201
202 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
203 if (addr >= vma->vm_start && addr < vma->vm_end)
204 remove_migration_pte(page, vma, addr, arg);
205 }
206 return SWAP_AGAIN;
207}
208
04e62a29
CL
209/*
210 * Get rid of all migration entries and replace them by
211 * references to the indicated page.
212 */
213static void remove_migration_ptes(struct page *old, struct page *new)
214{
051ac83a
JK
215 struct rmap_walk_control rwc = {
216 .rmap_one = remove_migration_pte,
217 .arg = old,
7e09e738 218 .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
051ac83a
JK
219 };
220
221 rmap_walk(new, &rwc);
04e62a29
CL
222}
223
0697212a
CL
224/*
225 * Something used the pte of a page under migration. We need to
226 * get to the page and wait until migration is finished.
227 * When we return from this function the fault will be retried.
0697212a 228 */
30dad309
NH
229static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
230 spinlock_t *ptl)
0697212a 231{
30dad309 232 pte_t pte;
0697212a
CL
233 swp_entry_t entry;
234 struct page *page;
235
30dad309 236 spin_lock(ptl);
0697212a
CL
237 pte = *ptep;
238 if (!is_swap_pte(pte))
239 goto out;
240
241 entry = pte_to_swp_entry(pte);
242 if (!is_migration_entry(entry))
243 goto out;
244
245 page = migration_entry_to_page(entry);
246
e286781d
NP
247 /*
248 * Once radix-tree replacement of page migration started, page_count
249 * *must* be zero. And, we don't want to call wait_on_page_locked()
250 * against a page without get_page().
251 * So, we use get_page_unless_zero(), here. Even failed, page fault
252 * will occur again.
253 */
254 if (!get_page_unless_zero(page))
255 goto out;
0697212a
CL
256 pte_unmap_unlock(ptep, ptl);
257 wait_on_page_locked(page);
258 put_page(page);
259 return;
260out:
261 pte_unmap_unlock(ptep, ptl);
262}
263
30dad309
NH
264void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
265 unsigned long address)
266{
267 spinlock_t *ptl = pte_lockptr(mm, pmd);
268 pte_t *ptep = pte_offset_map(pmd, address);
269 __migration_entry_wait(mm, ptep, ptl);
270}
271
cb900f41
KS
272void migration_entry_wait_huge(struct vm_area_struct *vma,
273 struct mm_struct *mm, pte_t *pte)
30dad309 274{
cb900f41 275 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
30dad309
NH
276 __migration_entry_wait(mm, pte, ptl);
277}
278
b969c4ab
MG
279#ifdef CONFIG_BLOCK
280/* Returns true if all buffers are successfully locked */
a6bc32b8
MG
281static bool buffer_migrate_lock_buffers(struct buffer_head *head,
282 enum migrate_mode mode)
b969c4ab
MG
283{
284 struct buffer_head *bh = head;
285
286 /* Simple case, sync compaction */
a6bc32b8 287 if (mode != MIGRATE_ASYNC) {
b969c4ab
MG
288 do {
289 get_bh(bh);
290 lock_buffer(bh);
291 bh = bh->b_this_page;
292
293 } while (bh != head);
294
295 return true;
296 }
297
298 /* async case, we cannot block on lock_buffer so use trylock_buffer */
299 do {
300 get_bh(bh);
301 if (!trylock_buffer(bh)) {
302 /*
303 * We failed to lock the buffer and cannot stall in
304 * async migration. Release the taken locks
305 */
306 struct buffer_head *failed_bh = bh;
307 put_bh(failed_bh);
308 bh = head;
309 while (bh != failed_bh) {
310 unlock_buffer(bh);
311 put_bh(bh);
312 bh = bh->b_this_page;
313 }
314 return false;
315 }
316
317 bh = bh->b_this_page;
318 } while (bh != head);
319 return true;
320}
321#else
322static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
a6bc32b8 323 enum migrate_mode mode)
b969c4ab
MG
324{
325 return true;
326}
327#endif /* CONFIG_BLOCK */
328
b20a3503 329/*
c3fcf8a5 330 * Replace the page in the mapping.
5b5c7120
CL
331 *
332 * The number of remaining references must be:
333 * 1 for anonymous pages without a mapping
334 * 2 for pages with a mapping
266cf658 335 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 336 */
36bc08cc 337int migrate_page_move_mapping(struct address_space *mapping,
b969c4ab 338 struct page *newpage, struct page *page,
8e321fef
BL
339 struct buffer_head *head, enum migrate_mode mode,
340 int extra_count)
b20a3503 341{
8e321fef 342 int expected_count = 1 + extra_count;
7cf9c2c7 343 void **pslot;
b20a3503 344
6c5240ae 345 if (!mapping) {
0e8c7d0f 346 /* Anonymous page without mapping */
8e321fef 347 if (page_count(page) != expected_count)
6c5240ae 348 return -EAGAIN;
78bd5209 349 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
350 }
351
19fd6231 352 spin_lock_irq(&mapping->tree_lock);
b20a3503 353
7cf9c2c7
NP
354 pslot = radix_tree_lookup_slot(&mapping->page_tree,
355 page_index(page));
b20a3503 356
8e321fef 357 expected_count += 1 + page_has_private(page);
e286781d 358 if (page_count(page) != expected_count ||
29c1f677 359 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
19fd6231 360 spin_unlock_irq(&mapping->tree_lock);
e23ca00b 361 return -EAGAIN;
b20a3503
CL
362 }
363
e286781d 364 if (!page_freeze_refs(page, expected_count)) {
19fd6231 365 spin_unlock_irq(&mapping->tree_lock);
e286781d
NP
366 return -EAGAIN;
367 }
368
b969c4ab
MG
369 /*
370 * In the async migration case of moving a page with buffers, lock the
371 * buffers using trylock before the mapping is moved. If the mapping
372 * was moved, we later failed to lock the buffers and could not move
373 * the mapping back due to an elevated page count, we would have to
374 * block waiting on other references to be dropped.
375 */
a6bc32b8
MG
376 if (mode == MIGRATE_ASYNC && head &&
377 !buffer_migrate_lock_buffers(head, mode)) {
b969c4ab
MG
378 page_unfreeze_refs(page, expected_count);
379 spin_unlock_irq(&mapping->tree_lock);
380 return -EAGAIN;
381 }
382
b20a3503
CL
383 /*
384 * Now we know that no one else is looking at the page.
b20a3503 385 */
7cf9c2c7 386 get_page(newpage); /* add cache reference */
b20a3503
CL
387 if (PageSwapCache(page)) {
388 SetPageSwapCache(newpage);
389 set_page_private(newpage, page_private(page));
390 }
391
7cf9c2c7
NP
392 radix_tree_replace_slot(pslot, newpage);
393
394 /*
937a94c9
JG
395 * Drop cache reference from old page by unfreezing
396 * to one less reference.
7cf9c2c7
NP
397 * We know this isn't the last reference.
398 */
937a94c9 399 page_unfreeze_refs(page, expected_count - 1);
7cf9c2c7 400
0e8c7d0f
CL
401 /*
402 * If moved to a different zone then also account
403 * the page for that zone. Other VM counters will be
404 * taken care of when we establish references to the
405 * new page and drop references to the old page.
406 *
407 * Note that anonymous pages are accounted for
408 * via NR_FILE_PAGES and NR_ANON_PAGES if they
409 * are mapped to swap space.
410 */
411 __dec_zone_page_state(page, NR_FILE_PAGES);
412 __inc_zone_page_state(newpage, NR_FILE_PAGES);
99a15e21 413 if (!PageSwapCache(page) && PageSwapBacked(page)) {
4b02108a
KM
414 __dec_zone_page_state(page, NR_SHMEM);
415 __inc_zone_page_state(newpage, NR_SHMEM);
416 }
19fd6231 417 spin_unlock_irq(&mapping->tree_lock);
b20a3503 418
78bd5209 419 return MIGRATEPAGE_SUCCESS;
b20a3503 420}
b20a3503 421
290408d4
NH
422/*
423 * The expected number of remaining references is the same as that
424 * of migrate_page_move_mapping().
425 */
426int migrate_huge_page_move_mapping(struct address_space *mapping,
427 struct page *newpage, struct page *page)
428{
429 int expected_count;
430 void **pslot;
431
432 if (!mapping) {
433 if (page_count(page) != 1)
434 return -EAGAIN;
78bd5209 435 return MIGRATEPAGE_SUCCESS;
290408d4
NH
436 }
437
438 spin_lock_irq(&mapping->tree_lock);
439
440 pslot = radix_tree_lookup_slot(&mapping->page_tree,
441 page_index(page));
442
443 expected_count = 2 + page_has_private(page);
444 if (page_count(page) != expected_count ||
29c1f677 445 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
290408d4
NH
446 spin_unlock_irq(&mapping->tree_lock);
447 return -EAGAIN;
448 }
449
450 if (!page_freeze_refs(page, expected_count)) {
451 spin_unlock_irq(&mapping->tree_lock);
452 return -EAGAIN;
453 }
454
455 get_page(newpage);
456
457 radix_tree_replace_slot(pslot, newpage);
458
937a94c9 459 page_unfreeze_refs(page, expected_count - 1);
290408d4
NH
460
461 spin_unlock_irq(&mapping->tree_lock);
78bd5209 462 return MIGRATEPAGE_SUCCESS;
290408d4
NH
463}
464
30b0a105
DH
465/*
466 * Gigantic pages are so large that we do not guarantee that page++ pointer
467 * arithmetic will work across the entire page. We need something more
468 * specialized.
469 */
470static void __copy_gigantic_page(struct page *dst, struct page *src,
471 int nr_pages)
472{
473 int i;
474 struct page *dst_base = dst;
475 struct page *src_base = src;
476
477 for (i = 0; i < nr_pages; ) {
478 cond_resched();
479 copy_highpage(dst, src);
480
481 i++;
482 dst = mem_map_next(dst, dst_base, i);
483 src = mem_map_next(src, src_base, i);
484 }
485}
486
487static void copy_huge_page(struct page *dst, struct page *src)
488{
489 int i;
490 int nr_pages;
491
492 if (PageHuge(src)) {
493 /* hugetlbfs page */
494 struct hstate *h = page_hstate(src);
495 nr_pages = pages_per_huge_page(h);
496
497 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
498 __copy_gigantic_page(dst, src, nr_pages);
499 return;
500 }
501 } else {
502 /* thp page */
503 BUG_ON(!PageTransHuge(src));
504 nr_pages = hpage_nr_pages(src);
505 }
506
507 for (i = 0; i < nr_pages; i++) {
508 cond_resched();
509 copy_highpage(dst + i, src + i);
510 }
511}
512
b20a3503
CL
513/*
514 * Copy the page to its new location
515 */
290408d4 516void migrate_page_copy(struct page *newpage, struct page *page)
b20a3503 517{
7851a45c
RR
518 int cpupid;
519
b32967ff 520 if (PageHuge(page) || PageTransHuge(page))
290408d4
NH
521 copy_huge_page(newpage, page);
522 else
523 copy_highpage(newpage, page);
b20a3503
CL
524
525 if (PageError(page))
526 SetPageError(newpage);
527 if (PageReferenced(page))
528 SetPageReferenced(newpage);
529 if (PageUptodate(page))
530 SetPageUptodate(newpage);
894bc310 531 if (TestClearPageActive(page)) {
309381fe 532 VM_BUG_ON_PAGE(PageUnevictable(page), page);
b20a3503 533 SetPageActive(newpage);
418b27ef
LS
534 } else if (TestClearPageUnevictable(page))
535 SetPageUnevictable(newpage);
b20a3503
CL
536 if (PageChecked(page))
537 SetPageChecked(newpage);
538 if (PageMappedToDisk(page))
539 SetPageMappedToDisk(newpage);
540
541 if (PageDirty(page)) {
542 clear_page_dirty_for_io(page);
3a902c5f
NP
543 /*
544 * Want to mark the page and the radix tree as dirty, and
545 * redo the accounting that clear_page_dirty_for_io undid,
546 * but we can't use set_page_dirty because that function
547 * is actually a signal that all of the page has become dirty.
25985edc 548 * Whereas only part of our page may be dirty.
3a902c5f 549 */
752dc185
HD
550 if (PageSwapBacked(page))
551 SetPageDirty(newpage);
552 else
553 __set_page_dirty_nobuffers(newpage);
b20a3503
CL
554 }
555
7851a45c
RR
556 /*
557 * Copy NUMA information to the new page, to prevent over-eager
558 * future migrations of this same page.
559 */
560 cpupid = page_cpupid_xchg_last(page, -1);
561 page_cpupid_xchg_last(newpage, cpupid);
562
b291f000 563 mlock_migrate_page(newpage, page);
e9995ef9 564 ksm_migrate_page(newpage, page);
c8d6553b
HD
565 /*
566 * Please do not reorder this without considering how mm/ksm.c's
567 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
568 */
b20a3503 569 ClearPageSwapCache(page);
b20a3503
CL
570 ClearPagePrivate(page);
571 set_page_private(page, 0);
b20a3503
CL
572
573 /*
574 * If any waiters have accumulated on the new page then
575 * wake them up.
576 */
577 if (PageWriteback(newpage))
578 end_page_writeback(newpage);
579}
b20a3503 580
1d8b85cc
CL
581/************************************************************
582 * Migration functions
583 ***********************************************************/
584
b20a3503
CL
585/*
586 * Common logic to directly migrate a single page suitable for
266cf658 587 * pages that do not use PagePrivate/PagePrivate2.
b20a3503
CL
588 *
589 * Pages are locked upon entry and exit.
590 */
2d1db3b1 591int migrate_page(struct address_space *mapping,
a6bc32b8
MG
592 struct page *newpage, struct page *page,
593 enum migrate_mode mode)
b20a3503
CL
594{
595 int rc;
596
597 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
598
8e321fef 599 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
b20a3503 600
78bd5209 601 if (rc != MIGRATEPAGE_SUCCESS)
b20a3503
CL
602 return rc;
603
604 migrate_page_copy(newpage, page);
78bd5209 605 return MIGRATEPAGE_SUCCESS;
b20a3503
CL
606}
607EXPORT_SYMBOL(migrate_page);
608
9361401e 609#ifdef CONFIG_BLOCK
1d8b85cc
CL
610/*
611 * Migration function for pages with buffers. This function can only be used
612 * if the underlying filesystem guarantees that no other references to "page"
613 * exist.
614 */
2d1db3b1 615int buffer_migrate_page(struct address_space *mapping,
a6bc32b8 616 struct page *newpage, struct page *page, enum migrate_mode mode)
1d8b85cc 617{
1d8b85cc
CL
618 struct buffer_head *bh, *head;
619 int rc;
620
1d8b85cc 621 if (!page_has_buffers(page))
a6bc32b8 622 return migrate_page(mapping, newpage, page, mode);
1d8b85cc
CL
623
624 head = page_buffers(page);
625
8e321fef 626 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
1d8b85cc 627
78bd5209 628 if (rc != MIGRATEPAGE_SUCCESS)
1d8b85cc
CL
629 return rc;
630
b969c4ab
MG
631 /*
632 * In the async case, migrate_page_move_mapping locked the buffers
633 * with an IRQ-safe spinlock held. In the sync case, the buffers
634 * need to be locked now
635 */
a6bc32b8
MG
636 if (mode != MIGRATE_ASYNC)
637 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
1d8b85cc
CL
638
639 ClearPagePrivate(page);
640 set_page_private(newpage, page_private(page));
641 set_page_private(page, 0);
642 put_page(page);
643 get_page(newpage);
644
645 bh = head;
646 do {
647 set_bh_page(bh, newpage, bh_offset(bh));
648 bh = bh->b_this_page;
649
650 } while (bh != head);
651
652 SetPagePrivate(newpage);
653
654 migrate_page_copy(newpage, page);
655
656 bh = head;
657 do {
658 unlock_buffer(bh);
659 put_bh(bh);
660 bh = bh->b_this_page;
661
662 } while (bh != head);
663
78bd5209 664 return MIGRATEPAGE_SUCCESS;
1d8b85cc
CL
665}
666EXPORT_SYMBOL(buffer_migrate_page);
9361401e 667#endif
1d8b85cc 668
04e62a29
CL
669/*
670 * Writeback a page to clean the dirty state
671 */
672static int writeout(struct address_space *mapping, struct page *page)
8351a6e4 673{
04e62a29
CL
674 struct writeback_control wbc = {
675 .sync_mode = WB_SYNC_NONE,
676 .nr_to_write = 1,
677 .range_start = 0,
678 .range_end = LLONG_MAX,
04e62a29
CL
679 .for_reclaim = 1
680 };
681 int rc;
682
683 if (!mapping->a_ops->writepage)
684 /* No write method for the address space */
685 return -EINVAL;
686
687 if (!clear_page_dirty_for_io(page))
688 /* Someone else already triggered a write */
689 return -EAGAIN;
690
8351a6e4 691 /*
04e62a29
CL
692 * A dirty page may imply that the underlying filesystem has
693 * the page on some queue. So the page must be clean for
694 * migration. Writeout may mean we loose the lock and the
695 * page state is no longer what we checked for earlier.
696 * At this point we know that the migration attempt cannot
697 * be successful.
8351a6e4 698 */
04e62a29 699 remove_migration_ptes(page, page);
8351a6e4 700
04e62a29 701 rc = mapping->a_ops->writepage(page, &wbc);
8351a6e4 702
04e62a29
CL
703 if (rc != AOP_WRITEPAGE_ACTIVATE)
704 /* unlocked. Relock */
705 lock_page(page);
706
bda8550d 707 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
708}
709
710/*
711 * Default handling if a filesystem does not provide a migration function.
712 */
713static int fallback_migrate_page(struct address_space *mapping,
a6bc32b8 714 struct page *newpage, struct page *page, enum migrate_mode mode)
04e62a29 715{
b969c4ab 716 if (PageDirty(page)) {
a6bc32b8
MG
717 /* Only writeback pages in full synchronous migration */
718 if (mode != MIGRATE_SYNC)
b969c4ab 719 return -EBUSY;
04e62a29 720 return writeout(mapping, page);
b969c4ab 721 }
8351a6e4
CL
722
723 /*
724 * Buffers may be managed in a filesystem specific way.
725 * We must have no buffers or drop them.
726 */
266cf658 727 if (page_has_private(page) &&
8351a6e4
CL
728 !try_to_release_page(page, GFP_KERNEL))
729 return -EAGAIN;
730
a6bc32b8 731 return migrate_page(mapping, newpage, page, mode);
8351a6e4
CL
732}
733
e24f0b8f
CL
734/*
735 * Move a page to a newly allocated page
736 * The page is locked and all ptes have been successfully removed.
737 *
738 * The new page will have replaced the old page if this function
739 * is successful.
894bc310
LS
740 *
741 * Return value:
742 * < 0 - error code
78bd5209 743 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 744 */
3fe2011f 745static int move_to_new_page(struct page *newpage, struct page *page,
a6bc32b8 746 int remap_swapcache, enum migrate_mode mode)
e24f0b8f
CL
747{
748 struct address_space *mapping;
749 int rc;
750
751 /*
752 * Block others from accessing the page when we get around to
753 * establishing additional references. We are the only one
754 * holding a reference to the new page at this point.
755 */
529ae9aa 756 if (!trylock_page(newpage))
e24f0b8f
CL
757 BUG();
758
759 /* Prepare mapping for the new page.*/
760 newpage->index = page->index;
761 newpage->mapping = page->mapping;
b2e18538
RR
762 if (PageSwapBacked(page))
763 SetPageSwapBacked(newpage);
e24f0b8f
CL
764
765 mapping = page_mapping(page);
766 if (!mapping)
a6bc32b8 767 rc = migrate_page(mapping, newpage, page, mode);
b969c4ab 768 else if (mapping->a_ops->migratepage)
e24f0b8f 769 /*
b969c4ab
MG
770 * Most pages have a mapping and most filesystems provide a
771 * migratepage callback. Anonymous pages are part of swap
772 * space which also has its own migratepage callback. This
773 * is the most common path for page migration.
e24f0b8f 774 */
b969c4ab 775 rc = mapping->a_ops->migratepage(mapping,
a6bc32b8 776 newpage, page, mode);
b969c4ab 777 else
a6bc32b8 778 rc = fallback_migrate_page(mapping, newpage, page, mode);
e24f0b8f 779
78bd5209 780 if (rc != MIGRATEPAGE_SUCCESS) {
e24f0b8f 781 newpage->mapping = NULL;
3fe2011f
MG
782 } else {
783 if (remap_swapcache)
784 remove_migration_ptes(page, newpage);
35512eca 785 page->mapping = NULL;
3fe2011f 786 }
e24f0b8f
CL
787
788 unlock_page(newpage);
789
790 return rc;
791}
792
0dabec93 793static int __unmap_and_move(struct page *page, struct page *newpage,
9c620e2b 794 int force, enum migrate_mode mode)
e24f0b8f 795{
0dabec93 796 int rc = -EAGAIN;
3fe2011f 797 int remap_swapcache = 1;
56039efa 798 struct mem_cgroup *mem;
3f6c8272 799 struct anon_vma *anon_vma = NULL;
95a402c3 800
529ae9aa 801 if (!trylock_page(page)) {
a6bc32b8 802 if (!force || mode == MIGRATE_ASYNC)
0dabec93 803 goto out;
3e7d3449
MG
804
805 /*
806 * It's not safe for direct compaction to call lock_page.
807 * For example, during page readahead pages are added locked
808 * to the LRU. Later, when the IO completes the pages are
809 * marked uptodate and unlocked. However, the queueing
810 * could be merging multiple pages for one bio (e.g.
811 * mpage_readpages). If an allocation happens for the
812 * second or third page, the process can end up locking
813 * the same page twice and deadlocking. Rather than
814 * trying to be clever about what pages can be locked,
815 * avoid the use of lock_page for direct compaction
816 * altogether.
817 */
818 if (current->flags & PF_MEMALLOC)
0dabec93 819 goto out;
3e7d3449 820
e24f0b8f
CL
821 lock_page(page);
822 }
823
01b1ae63 824 /* charge against new page */
0030f535 825 mem_cgroup_prepare_migration(page, newpage, &mem);
01b1ae63 826
e24f0b8f 827 if (PageWriteback(page)) {
11bc82d6 828 /*
fed5b64a 829 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
830 * necessary to wait for PageWriteback. In the async case,
831 * the retry loop is too short and in the sync-light case,
832 * the overhead of stalling is too much
11bc82d6 833 */
a6bc32b8 834 if (mode != MIGRATE_SYNC) {
11bc82d6
AA
835 rc = -EBUSY;
836 goto uncharge;
837 }
838 if (!force)
01b1ae63 839 goto uncharge;
e24f0b8f
CL
840 wait_on_page_writeback(page);
841 }
e24f0b8f 842 /*
dc386d4d
KH
843 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
844 * we cannot notice that anon_vma is freed while we migrates a page.
1ce82b69 845 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 846 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
847 * File Caches may use write_page() or lock_page() in migration, then,
848 * just care Anon page here.
dc386d4d 849 */
b79bc0a0 850 if (PageAnon(page) && !PageKsm(page)) {
1ce82b69 851 /*
4fc3f1d6 852 * Only page_lock_anon_vma_read() understands the subtleties of
1ce82b69
HD
853 * getting a hold on an anon_vma from outside one of its mms.
854 */
746b18d4 855 anon_vma = page_get_anon_vma(page);
1ce82b69
HD
856 if (anon_vma) {
857 /*
746b18d4 858 * Anon page
1ce82b69 859 */
1ce82b69 860 } else if (PageSwapCache(page)) {
3fe2011f
MG
861 /*
862 * We cannot be sure that the anon_vma of an unmapped
863 * swapcache page is safe to use because we don't
864 * know in advance if the VMA that this page belonged
865 * to still exists. If the VMA and others sharing the
866 * data have been freed, then the anon_vma could
867 * already be invalid.
868 *
869 * To avoid this possibility, swapcache pages get
870 * migrated but are not remapped when migration
871 * completes
872 */
873 remap_swapcache = 0;
874 } else {
1ce82b69 875 goto uncharge;
3fe2011f 876 }
989f89c5 877 }
62e1c553 878
bf6bddf1
RA
879 if (unlikely(balloon_page_movable(page))) {
880 /*
881 * A ballooned page does not need any special attention from
882 * physical to virtual reverse mapping procedures.
883 * Skip any attempt to unmap PTEs or to remap swap cache,
884 * in order to avoid burning cycles at rmap level, and perform
885 * the page migration right away (proteced by page lock).
886 */
887 rc = balloon_page_migrate(newpage, page, mode);
888 goto uncharge;
889 }
890
dc386d4d 891 /*
62e1c553
SL
892 * Corner case handling:
893 * 1. When a new swap-cache page is read into, it is added to the LRU
894 * and treated as swapcache but it has no rmap yet.
895 * Calling try_to_unmap() against a page->mapping==NULL page will
896 * trigger a BUG. So handle it here.
897 * 2. An orphaned page (see truncate_complete_page) might have
898 * fs-private metadata. The page can be picked up due to memory
899 * offlining. Everywhere else except page reclaim, the page is
900 * invisible to the vm, so the page can not be migrated. So try to
901 * free the metadata, so the page can be freed.
e24f0b8f 902 */
62e1c553 903 if (!page->mapping) {
309381fe 904 VM_BUG_ON_PAGE(PageAnon(page), page);
1ce82b69 905 if (page_has_private(page)) {
62e1c553 906 try_to_free_buffers(page);
1ce82b69 907 goto uncharge;
62e1c553 908 }
abfc3488 909 goto skip_unmap;
62e1c553
SL
910 }
911
dc386d4d 912 /* Establish migration ptes or remove ptes */
14fa31b8 913 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
dc386d4d 914
abfc3488 915skip_unmap:
e6a1530d 916 if (!page_mapped(page))
a6bc32b8 917 rc = move_to_new_page(newpage, page, remap_swapcache, mode);
e24f0b8f 918
3fe2011f 919 if (rc && remap_swapcache)
e24f0b8f 920 remove_migration_ptes(page, page);
3f6c8272
MG
921
922 /* Drop an anon_vma reference if we took one */
76545066 923 if (anon_vma)
9e60109f 924 put_anon_vma(anon_vma);
3f6c8272 925
01b1ae63 926uncharge:
bf6bddf1
RA
927 mem_cgroup_end_migration(mem, page, newpage,
928 (rc == MIGRATEPAGE_SUCCESS ||
929 rc == MIGRATEPAGE_BALLOON_SUCCESS));
e24f0b8f 930 unlock_page(page);
0dabec93
MK
931out:
932 return rc;
933}
95a402c3 934
0dabec93
MK
935/*
936 * Obtain the lock on page, remove all ptes and migrate the page
937 * to the newly allocated page in newpage.
938 */
68711a74
DR
939static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
940 unsigned long private, struct page *page, int force,
941 enum migrate_mode mode)
0dabec93
MK
942{
943 int rc = 0;
944 int *result = NULL;
945 struct page *newpage = get_new_page(page, private, &result);
946
947 if (!newpage)
948 return -ENOMEM;
949
950 if (page_count(page) == 1) {
951 /* page was freed from under us. So we are done. */
952 goto out;
953 }
954
955 if (unlikely(PageTransHuge(page)))
956 if (unlikely(split_huge_page(page)))
957 goto out;
958
9c620e2b 959 rc = __unmap_and_move(page, newpage, force, mode);
bf6bddf1
RA
960
961 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
962 /*
963 * A ballooned page has been migrated already.
964 * Now, it's the time to wrap-up counters,
965 * handle the page back to Buddy and return.
966 */
967 dec_zone_page_state(page, NR_ISOLATED_ANON +
968 page_is_file_cache(page));
969 balloon_page_free(page);
970 return MIGRATEPAGE_SUCCESS;
971 }
0dabec93 972out:
e24f0b8f 973 if (rc != -EAGAIN) {
0dabec93
MK
974 /*
975 * A page that has been migrated has all references
976 * removed and will be freed. A page that has not been
977 * migrated will have kepts its references and be
978 * restored.
979 */
980 list_del(&page->lru);
a731286d 981 dec_zone_page_state(page, NR_ISOLATED_ANON +
6c0b1351 982 page_is_file_cache(page));
894bc310 983 putback_lru_page(page);
e24f0b8f 984 }
68711a74 985
95a402c3 986 /*
68711a74
DR
987 * If migration was not successful and there's a freeing callback, use
988 * it. Otherwise, putback_lru_page() will drop the reference grabbed
989 * during isolation.
95a402c3 990 */
8bdd6380
HD
991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
992 ClearPageSwapBacked(newpage);
68711a74 993 put_new_page(newpage, private);
8bdd6380 994 } else
68711a74
DR
995 putback_lru_page(newpage);
996
742755a1
CL
997 if (result) {
998 if (rc)
999 *result = rc;
1000 else
1001 *result = page_to_nid(newpage);
1002 }
e24f0b8f
CL
1003 return rc;
1004}
1005
290408d4
NH
1006/*
1007 * Counterpart of unmap_and_move_page() for hugepage migration.
1008 *
1009 * This function doesn't wait the completion of hugepage I/O
1010 * because there is no race between I/O and migration for hugepage.
1011 * Note that currently hugepage I/O occurs only in direct I/O
1012 * where no lock is held and PG_writeback is irrelevant,
1013 * and writeback status of all subpages are counted in the reference
1014 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1015 * under direct I/O, the reference of the head page is 512 and a bit more.)
1016 * This means that when we try to migrate hugepage whose subpages are
1017 * doing direct I/O, some references remain after try_to_unmap() and
1018 * hugepage migration fails without data corruption.
1019 *
1020 * There is also no race when direct I/O is issued on the page under migration,
1021 * because then pte is replaced with migration swap entry and direct I/O code
1022 * will wait in the page fault for migration to complete.
1023 */
1024static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1025 free_page_t put_new_page, unsigned long private,
1026 struct page *hpage, int force,
1027 enum migrate_mode mode)
290408d4
NH
1028{
1029 int rc = 0;
1030 int *result = NULL;
32665f2b 1031 struct page *new_hpage;
290408d4
NH
1032 struct anon_vma *anon_vma = NULL;
1033
83467efb
NH
1034 /*
1035 * Movability of hugepages depends on architectures and hugepage size.
1036 * This check is necessary because some callers of hugepage migration
1037 * like soft offline and memory hotremove don't walk through page
1038 * tables or check whether the hugepage is pmd-based or not before
1039 * kicking migration.
1040 */
100873d7 1041 if (!hugepage_migration_supported(page_hstate(hpage))) {
32665f2b 1042 putback_active_hugepage(hpage);
83467efb 1043 return -ENOSYS;
32665f2b 1044 }
83467efb 1045
32665f2b 1046 new_hpage = get_new_page(hpage, private, &result);
290408d4
NH
1047 if (!new_hpage)
1048 return -ENOMEM;
1049
1050 rc = -EAGAIN;
1051
1052 if (!trylock_page(hpage)) {
a6bc32b8 1053 if (!force || mode != MIGRATE_SYNC)
290408d4
NH
1054 goto out;
1055 lock_page(hpage);
1056 }
1057
746b18d4
PZ
1058 if (PageAnon(hpage))
1059 anon_vma = page_get_anon_vma(hpage);
290408d4
NH
1060
1061 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1062
1063 if (!page_mapped(hpage))
a6bc32b8 1064 rc = move_to_new_page(new_hpage, hpage, 1, mode);
290408d4 1065
68711a74 1066 if (rc != MIGRATEPAGE_SUCCESS)
290408d4
NH
1067 remove_migration_ptes(hpage, hpage);
1068
fd4a4663 1069 if (anon_vma)
9e60109f 1070 put_anon_vma(anon_vma);
8e6ac7fa 1071
68711a74 1072 if (rc == MIGRATEPAGE_SUCCESS)
8e6ac7fa
AK
1073 hugetlb_cgroup_migrate(hpage, new_hpage);
1074
290408d4 1075 unlock_page(hpage);
09761333 1076out:
b8ec1cee
NH
1077 if (rc != -EAGAIN)
1078 putback_active_hugepage(hpage);
68711a74
DR
1079
1080 /*
1081 * If migration was not successful and there's a freeing callback, use
1082 * it. Otherwise, put_page() will drop the reference grabbed during
1083 * isolation.
1084 */
1085 if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
1086 put_new_page(new_hpage, private);
1087 else
1088 put_page(new_hpage);
1089
290408d4
NH
1090 if (result) {
1091 if (rc)
1092 *result = rc;
1093 else
1094 *result = page_to_nid(new_hpage);
1095 }
1096 return rc;
1097}
1098
b20a3503 1099/*
c73e5c9c
SB
1100 * migrate_pages - migrate the pages specified in a list, to the free pages
1101 * supplied as the target for the page migration
b20a3503 1102 *
c73e5c9c
SB
1103 * @from: The list of pages to be migrated.
1104 * @get_new_page: The function used to allocate free pages to be used
1105 * as the target of the page migration.
68711a74
DR
1106 * @put_new_page: The function used to free target pages if migration
1107 * fails, or NULL if no special handling is necessary.
c73e5c9c
SB
1108 * @private: Private data to be passed on to get_new_page()
1109 * @mode: The migration mode that specifies the constraints for
1110 * page migration, if any.
1111 * @reason: The reason for page migration.
b20a3503 1112 *
c73e5c9c
SB
1113 * The function returns after 10 attempts or if no pages are movable any more
1114 * because the list has become empty or no retryable pages exist any more.
1115 * The caller should call putback_lru_pages() to return pages to the LRU
28bd6578 1116 * or free list only if ret != 0.
b20a3503 1117 *
c73e5c9c 1118 * Returns the number of pages that were not migrated, or an error code.
b20a3503 1119 */
9c620e2b 1120int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a74
DR
1121 free_page_t put_new_page, unsigned long private,
1122 enum migrate_mode mode, int reason)
b20a3503 1123{
e24f0b8f 1124 int retry = 1;
b20a3503 1125 int nr_failed = 0;
5647bc29 1126 int nr_succeeded = 0;
b20a3503
CL
1127 int pass = 0;
1128 struct page *page;
1129 struct page *page2;
1130 int swapwrite = current->flags & PF_SWAPWRITE;
1131 int rc;
1132
1133 if (!swapwrite)
1134 current->flags |= PF_SWAPWRITE;
1135
e24f0b8f
CL
1136 for(pass = 0; pass < 10 && retry; pass++) {
1137 retry = 0;
b20a3503 1138
e24f0b8f 1139 list_for_each_entry_safe(page, page2, from, lru) {
e24f0b8f 1140 cond_resched();
2d1db3b1 1141
31caf665
NH
1142 if (PageHuge(page))
1143 rc = unmap_and_move_huge_page(get_new_page,
68711a74
DR
1144 put_new_page, private, page,
1145 pass > 2, mode);
31caf665 1146 else
68711a74
DR
1147 rc = unmap_and_move(get_new_page, put_new_page,
1148 private, page, pass > 2, mode);
2d1db3b1 1149
e24f0b8f 1150 switch(rc) {
95a402c3
CL
1151 case -ENOMEM:
1152 goto out;
e24f0b8f 1153 case -EAGAIN:
2d1db3b1 1154 retry++;
e24f0b8f 1155 break;
78bd5209 1156 case MIGRATEPAGE_SUCCESS:
5647bc29 1157 nr_succeeded++;
e24f0b8f
CL
1158 break;
1159 default:
354a3363
NH
1160 /*
1161 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1162 * unlike -EAGAIN case, the failed page is
1163 * removed from migration page list and not
1164 * retried in the next outer loop.
1165 */
2d1db3b1 1166 nr_failed++;
e24f0b8f 1167 break;
2d1db3b1 1168 }
b20a3503
CL
1169 }
1170 }
78bd5209 1171 rc = nr_failed + retry;
95a402c3 1172out:
5647bc29
MG
1173 if (nr_succeeded)
1174 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1175 if (nr_failed)
1176 count_vm_events(PGMIGRATE_FAIL, nr_failed);
7b2a2d4a
MG
1177 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1178
b20a3503
CL
1179 if (!swapwrite)
1180 current->flags &= ~PF_SWAPWRITE;
1181
78bd5209 1182 return rc;
b20a3503 1183}
95a402c3 1184
742755a1
CL
1185#ifdef CONFIG_NUMA
1186/*
1187 * Move a list of individual pages
1188 */
1189struct page_to_node {
1190 unsigned long addr;
1191 struct page *page;
1192 int node;
1193 int status;
1194};
1195
1196static struct page *new_page_node(struct page *p, unsigned long private,
1197 int **result)
1198{
1199 struct page_to_node *pm = (struct page_to_node *)private;
1200
1201 while (pm->node != MAX_NUMNODES && pm->page != p)
1202 pm++;
1203
1204 if (pm->node == MAX_NUMNODES)
1205 return NULL;
1206
1207 *result = &pm->status;
1208
e632a938
NH
1209 if (PageHuge(p))
1210 return alloc_huge_page_node(page_hstate(compound_head(p)),
1211 pm->node);
1212 else
1213 return alloc_pages_exact_node(pm->node,
e97ca8e5 1214 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
742755a1
CL
1215}
1216
1217/*
1218 * Move a set of pages as indicated in the pm array. The addr
1219 * field must be set to the virtual address of the page to be moved
1220 * and the node number must contain a valid target node.
5e9a0f02 1221 * The pm array ends with node = MAX_NUMNODES.
742755a1 1222 */
5e9a0f02
BG
1223static int do_move_page_to_node_array(struct mm_struct *mm,
1224 struct page_to_node *pm,
1225 int migrate_all)
742755a1
CL
1226{
1227 int err;
1228 struct page_to_node *pp;
1229 LIST_HEAD(pagelist);
1230
1231 down_read(&mm->mmap_sem);
1232
1233 /*
1234 * Build a list of pages to migrate
1235 */
742755a1
CL
1236 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1237 struct vm_area_struct *vma;
1238 struct page *page;
1239
742755a1
CL
1240 err = -EFAULT;
1241 vma = find_vma(mm, pp->addr);
70384dc6 1242 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
742755a1
CL
1243 goto set_status;
1244
500d65d4 1245 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
89f5b7da
LT
1246
1247 err = PTR_ERR(page);
1248 if (IS_ERR(page))
1249 goto set_status;
1250
742755a1
CL
1251 err = -ENOENT;
1252 if (!page)
1253 goto set_status;
1254
62b61f61 1255 /* Use PageReserved to check for zero page */
b79bc0a0 1256 if (PageReserved(page))
742755a1
CL
1257 goto put_and_set;
1258
1259 pp->page = page;
1260 err = page_to_nid(page);
1261
1262 if (err == pp->node)
1263 /*
1264 * Node already in the right place
1265 */
1266 goto put_and_set;
1267
1268 err = -EACCES;
1269 if (page_mapcount(page) > 1 &&
1270 !migrate_all)
1271 goto put_and_set;
1272
e632a938
NH
1273 if (PageHuge(page)) {
1274 isolate_huge_page(page, &pagelist);
1275 goto put_and_set;
1276 }
1277
62695a84 1278 err = isolate_lru_page(page);
6d9c285a 1279 if (!err) {
62695a84 1280 list_add_tail(&page->lru, &pagelist);
6d9c285a
KM
1281 inc_zone_page_state(page, NR_ISOLATED_ANON +
1282 page_is_file_cache(page));
1283 }
742755a1
CL
1284put_and_set:
1285 /*
1286 * Either remove the duplicate refcount from
1287 * isolate_lru_page() or drop the page ref if it was
1288 * not isolated.
1289 */
1290 put_page(page);
1291set_status:
1292 pp->status = err;
1293 }
1294
e78bbfa8 1295 err = 0;
cf608ac1 1296 if (!list_empty(&pagelist)) {
68711a74 1297 err = migrate_pages(&pagelist, new_page_node, NULL,
9c620e2b 1298 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1299 if (err)
e632a938 1300 putback_movable_pages(&pagelist);
cf608ac1 1301 }
742755a1
CL
1302
1303 up_read(&mm->mmap_sem);
1304 return err;
1305}
1306
5e9a0f02
BG
1307/*
1308 * Migrate an array of page address onto an array of nodes and fill
1309 * the corresponding array of status.
1310 */
3268c63e 1311static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
1312 unsigned long nr_pages,
1313 const void __user * __user *pages,
1314 const int __user *nodes,
1315 int __user *status, int flags)
1316{
3140a227 1317 struct page_to_node *pm;
3140a227
BG
1318 unsigned long chunk_nr_pages;
1319 unsigned long chunk_start;
1320 int err;
5e9a0f02 1321
3140a227
BG
1322 err = -ENOMEM;
1323 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1324 if (!pm)
5e9a0f02 1325 goto out;
35282a2d
BG
1326
1327 migrate_prep();
1328
5e9a0f02 1329 /*
3140a227
BG
1330 * Store a chunk of page_to_node array in a page,
1331 * but keep the last one as a marker
5e9a0f02 1332 */
3140a227 1333 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
5e9a0f02 1334
3140a227
BG
1335 for (chunk_start = 0;
1336 chunk_start < nr_pages;
1337 chunk_start += chunk_nr_pages) {
1338 int j;
5e9a0f02 1339
3140a227
BG
1340 if (chunk_start + chunk_nr_pages > nr_pages)
1341 chunk_nr_pages = nr_pages - chunk_start;
1342
1343 /* fill the chunk pm with addrs and nodes from user-space */
1344 for (j = 0; j < chunk_nr_pages; j++) {
1345 const void __user *p;
5e9a0f02
BG
1346 int node;
1347
3140a227
BG
1348 err = -EFAULT;
1349 if (get_user(p, pages + j + chunk_start))
1350 goto out_pm;
1351 pm[j].addr = (unsigned long) p;
1352
1353 if (get_user(node, nodes + j + chunk_start))
5e9a0f02
BG
1354 goto out_pm;
1355
1356 err = -ENODEV;
6f5a55f1
LT
1357 if (node < 0 || node >= MAX_NUMNODES)
1358 goto out_pm;
1359
389162c2 1360 if (!node_state(node, N_MEMORY))
5e9a0f02
BG
1361 goto out_pm;
1362
1363 err = -EACCES;
1364 if (!node_isset(node, task_nodes))
1365 goto out_pm;
1366
3140a227
BG
1367 pm[j].node = node;
1368 }
1369
1370 /* End marker for this chunk */
1371 pm[chunk_nr_pages].node = MAX_NUMNODES;
1372
1373 /* Migrate this chunk */
1374 err = do_move_page_to_node_array(mm, pm,
1375 flags & MPOL_MF_MOVE_ALL);
1376 if (err < 0)
1377 goto out_pm;
5e9a0f02 1378
5e9a0f02 1379 /* Return status information */
3140a227
BG
1380 for (j = 0; j < chunk_nr_pages; j++)
1381 if (put_user(pm[j].status, status + j + chunk_start)) {
5e9a0f02 1382 err = -EFAULT;
3140a227
BG
1383 goto out_pm;
1384 }
1385 }
1386 err = 0;
5e9a0f02
BG
1387
1388out_pm:
3140a227 1389 free_page((unsigned long)pm);
5e9a0f02
BG
1390out:
1391 return err;
1392}
1393
742755a1 1394/*
2f007e74 1395 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 1396 */
80bba129
BG
1397static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1398 const void __user **pages, int *status)
742755a1 1399{
2f007e74 1400 unsigned long i;
2f007e74 1401
742755a1
CL
1402 down_read(&mm->mmap_sem);
1403
2f007e74 1404 for (i = 0; i < nr_pages; i++) {
80bba129 1405 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
1406 struct vm_area_struct *vma;
1407 struct page *page;
c095adbc 1408 int err = -EFAULT;
2f007e74
BG
1409
1410 vma = find_vma(mm, addr);
70384dc6 1411 if (!vma || addr < vma->vm_start)
742755a1
CL
1412 goto set_status;
1413
2f007e74 1414 page = follow_page(vma, addr, 0);
89f5b7da
LT
1415
1416 err = PTR_ERR(page);
1417 if (IS_ERR(page))
1418 goto set_status;
1419
742755a1
CL
1420 err = -ENOENT;
1421 /* Use PageReserved to check for zero page */
b79bc0a0 1422 if (!page || PageReserved(page))
742755a1
CL
1423 goto set_status;
1424
1425 err = page_to_nid(page);
1426set_status:
80bba129
BG
1427 *status = err;
1428
1429 pages++;
1430 status++;
1431 }
1432
1433 up_read(&mm->mmap_sem);
1434}
1435
1436/*
1437 * Determine the nodes of a user array of pages and store it in
1438 * a user array of status.
1439 */
1440static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1441 const void __user * __user *pages,
1442 int __user *status)
1443{
1444#define DO_PAGES_STAT_CHUNK_NR 16
1445 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1446 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 1447
87b8d1ad
PA
1448 while (nr_pages) {
1449 unsigned long chunk_nr;
80bba129 1450
87b8d1ad
PA
1451 chunk_nr = nr_pages;
1452 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1453 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1454
1455 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1456 break;
80bba129
BG
1457
1458 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1459
87b8d1ad
PA
1460 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1461 break;
742755a1 1462
87b8d1ad
PA
1463 pages += chunk_nr;
1464 status += chunk_nr;
1465 nr_pages -= chunk_nr;
1466 }
1467 return nr_pages ? -EFAULT : 0;
742755a1
CL
1468}
1469
1470/*
1471 * Move a list of pages in the address space of the currently executing
1472 * process.
1473 */
938bb9f5
HC
1474SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1475 const void __user * __user *, pages,
1476 const int __user *, nodes,
1477 int __user *, status, int, flags)
742755a1 1478{
c69e8d9c 1479 const struct cred *cred = current_cred(), *tcred;
742755a1 1480 struct task_struct *task;
742755a1 1481 struct mm_struct *mm;
5e9a0f02 1482 int err;
3268c63e 1483 nodemask_t task_nodes;
742755a1
CL
1484
1485 /* Check flags */
1486 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1487 return -EINVAL;
1488
1489 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1490 return -EPERM;
1491
1492 /* Find the mm_struct */
a879bf58 1493 rcu_read_lock();
228ebcbe 1494 task = pid ? find_task_by_vpid(pid) : current;
742755a1 1495 if (!task) {
a879bf58 1496 rcu_read_unlock();
742755a1
CL
1497 return -ESRCH;
1498 }
3268c63e 1499 get_task_struct(task);
742755a1
CL
1500
1501 /*
1502 * Check if this process has the right to modify the specified
1503 * process. The right exists if the process has administrative
1504 * capabilities, superuser privileges or the same
1505 * userid as the target process.
1506 */
c69e8d9c 1507 tcred = __task_cred(task);
b38a86eb
EB
1508 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1509 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
742755a1 1510 !capable(CAP_SYS_NICE)) {
c69e8d9c 1511 rcu_read_unlock();
742755a1 1512 err = -EPERM;
5e9a0f02 1513 goto out;
742755a1 1514 }
c69e8d9c 1515 rcu_read_unlock();
742755a1 1516
86c3a764
DQ
1517 err = security_task_movememory(task);
1518 if (err)
5e9a0f02 1519 goto out;
86c3a764 1520
3268c63e
CL
1521 task_nodes = cpuset_mems_allowed(task);
1522 mm = get_task_mm(task);
1523 put_task_struct(task);
1524
6e8b09ea
SL
1525 if (!mm)
1526 return -EINVAL;
1527
1528 if (nodes)
1529 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1530 nodes, status, flags);
1531 else
1532 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 1533
742755a1
CL
1534 mmput(mm);
1535 return err;
3268c63e
CL
1536
1537out:
1538 put_task_struct(task);
1539 return err;
742755a1 1540}
742755a1 1541
7b2259b3
CL
1542/*
1543 * Call migration functions in the vma_ops that may prepare
1544 * memory in a vm for migration. migration functions may perform
1545 * the migration for vmas that do not have an underlying page struct.
1546 */
1547int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1548 const nodemask_t *from, unsigned long flags)
1549{
1550 struct vm_area_struct *vma;
1551 int err = 0;
1552
1001c9fb 1553 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
7b2259b3
CL
1554 if (vma->vm_ops && vma->vm_ops->migrate) {
1555 err = vma->vm_ops->migrate(vma, to, from, flags);
1556 if (err)
1557 break;
1558 }
1559 }
1560 return err;
1561}
7039e1db
PZ
1562
1563#ifdef CONFIG_NUMA_BALANCING
1564/*
1565 * Returns true if this is a safe migration target node for misplaced NUMA
1566 * pages. Currently it only checks the watermarks which crude
1567 */
1568static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 1569 unsigned long nr_migrate_pages)
7039e1db
PZ
1570{
1571 int z;
1572 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1573 struct zone *zone = pgdat->node_zones + z;
1574
1575 if (!populated_zone(zone))
1576 continue;
1577
6e543d57 1578 if (!zone_reclaimable(zone))
7039e1db
PZ
1579 continue;
1580
1581 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1582 if (!zone_watermark_ok(zone, 0,
1583 high_wmark_pages(zone) +
1584 nr_migrate_pages,
1585 0, 0))
1586 continue;
1587 return true;
1588 }
1589 return false;
1590}
1591
1592static struct page *alloc_misplaced_dst_page(struct page *page,
1593 unsigned long data,
1594 int **result)
1595{
1596 int nid = (int) data;
1597 struct page *newpage;
1598
1599 newpage = alloc_pages_exact_node(nid,
e97ca8e5
JW
1600 (GFP_HIGHUSER_MOVABLE |
1601 __GFP_THISNODE | __GFP_NOMEMALLOC |
1602 __GFP_NORETRY | __GFP_NOWARN) &
7039e1db 1603 ~GFP_IOFS, 0);
bac0382c 1604
7039e1db
PZ
1605 return newpage;
1606}
1607
a8f60772
MG
1608/*
1609 * page migration rate limiting control.
1610 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1611 * window of time. Default here says do not migrate more than 1280M per second.
e14808b4
MG
1612 * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
1613 * as it is faults that reset the window, pte updates will happen unconditionally
1614 * if there has not been a fault since @pteupdate_interval_millisecs after the
1615 * throttle window closed.
a8f60772
MG
1616 */
1617static unsigned int migrate_interval_millisecs __read_mostly = 100;
e14808b4 1618static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
a8f60772
MG
1619static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1620
e14808b4
MG
1621/* Returns true if NUMA migration is currently rate limited */
1622bool migrate_ratelimited(int node)
1623{
1624 pg_data_t *pgdat = NODE_DATA(node);
1625
1626 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
1627 msecs_to_jiffies(pteupdate_interval_millisecs)))
1628 return false;
1629
1630 if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
1631 return false;
1632
1633 return true;
1634}
1635
b32967ff 1636/* Returns true if the node is migrate rate-limited after the update */
1c30e017
MG
1637static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1638 unsigned long nr_pages)
7039e1db 1639{
a8f60772
MG
1640 /*
1641 * Rate-limit the amount of data that is being migrated to a node.
1642 * Optimal placement is no good if the memory bus is saturated and
1643 * all the time is being spent migrating!
1644 */
a8f60772 1645 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1c5e9c27 1646 spin_lock(&pgdat->numabalancing_migrate_lock);
a8f60772
MG
1647 pgdat->numabalancing_migrate_nr_pages = 0;
1648 pgdat->numabalancing_migrate_next_window = jiffies +
1649 msecs_to_jiffies(migrate_interval_millisecs);
1c5e9c27 1650 spin_unlock(&pgdat->numabalancing_migrate_lock);
a8f60772 1651 }
af1839d7
MG
1652 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1653 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1654 nr_pages);
1c5e9c27 1655 return true;
af1839d7 1656 }
1c5e9c27
MG
1657
1658 /*
1659 * This is an unlocked non-atomic update so errors are possible.
1660 * The consequences are failing to migrate when we potentiall should
1661 * have which is not severe enough to warrant locking. If it is ever
1662 * a problem, it can be converted to a per-cpu counter.
1663 */
1664 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1665 return false;
b32967ff
MG
1666}
1667
1c30e017 1668static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 1669{
340ef390 1670 int page_lru;
a8f60772 1671
309381fe 1672 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
3abef4e6 1673
7039e1db 1674 /* Avoid migrating to a node that is nearly full */
340ef390
HD
1675 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1676 return 0;
7039e1db 1677
340ef390
HD
1678 if (isolate_lru_page(page))
1679 return 0;
7039e1db 1680
340ef390
HD
1681 /*
1682 * migrate_misplaced_transhuge_page() skips page migration's usual
1683 * check on page_count(), so we must do it here, now that the page
1684 * has been isolated: a GUP pin, or any other pin, prevents migration.
1685 * The expected page count is 3: 1 for page's mapcount and 1 for the
1686 * caller's pin and 1 for the reference taken by isolate_lru_page().
1687 */
1688 if (PageTransHuge(page) && page_count(page) != 3) {
1689 putback_lru_page(page);
1690 return 0;
7039e1db
PZ
1691 }
1692
340ef390
HD
1693 page_lru = page_is_file_cache(page);
1694 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1695 hpage_nr_pages(page));
1696
149c33e1 1697 /*
340ef390
HD
1698 * Isolating the page has taken another reference, so the
1699 * caller's reference can be safely dropped without the page
1700 * disappearing underneath us during migration.
149c33e1
MG
1701 */
1702 put_page(page);
340ef390 1703 return 1;
b32967ff
MG
1704}
1705
de466bd6
MG
1706bool pmd_trans_migrating(pmd_t pmd)
1707{
1708 struct page *page = pmd_page(pmd);
1709 return PageLocked(page);
1710}
1711
1712void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
1713{
1714 struct page *page = pmd_page(*pmd);
1715 wait_on_page_locked(page);
1716}
1717
b32967ff
MG
1718/*
1719 * Attempt to migrate a misplaced page to the specified destination
1720 * node. Caller is expected to have an elevated reference count on
1721 * the page that will be dropped by this function before returning.
1722 */
1bc115d8
MG
1723int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1724 int node)
b32967ff
MG
1725{
1726 pg_data_t *pgdat = NODE_DATA(node);
340ef390 1727 int isolated;
b32967ff
MG
1728 int nr_remaining;
1729 LIST_HEAD(migratepages);
1730
1731 /*
1bc115d8
MG
1732 * Don't migrate file pages that are mapped in multiple processes
1733 * with execute permissions as they are probably shared libraries.
b32967ff 1734 */
1bc115d8
MG
1735 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1736 (vma->vm_flags & VM_EXEC))
b32967ff 1737 goto out;
b32967ff
MG
1738
1739 /*
1740 * Rate-limit the amount of data that is being migrated to a node.
1741 * Optimal placement is no good if the memory bus is saturated and
1742 * all the time is being spent migrating!
1743 */
340ef390 1744 if (numamigrate_update_ratelimit(pgdat, 1))
b32967ff 1745 goto out;
b32967ff
MG
1746
1747 isolated = numamigrate_isolate_page(pgdat, page);
1748 if (!isolated)
1749 goto out;
1750
1751 list_add(&page->lru, &migratepages);
9c620e2b 1752 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
68711a74
DR
1753 NULL, node, MIGRATE_ASYNC,
1754 MR_NUMA_MISPLACED);
b32967ff 1755 if (nr_remaining) {
59c82b70
JK
1756 if (!list_empty(&migratepages)) {
1757 list_del(&page->lru);
1758 dec_zone_page_state(page, NR_ISOLATED_ANON +
1759 page_is_file_cache(page));
1760 putback_lru_page(page);
1761 }
b32967ff
MG
1762 isolated = 0;
1763 } else
1764 count_vm_numa_event(NUMA_PAGE_MIGRATE);
7039e1db 1765 BUG_ON(!list_empty(&migratepages));
7039e1db 1766 return isolated;
340ef390
HD
1767
1768out:
1769 put_page(page);
1770 return 0;
7039e1db 1771}
220018d3 1772#endif /* CONFIG_NUMA_BALANCING */
b32967ff 1773
220018d3 1774#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
340ef390
HD
1775/*
1776 * Migrates a THP to a given target node. page must be locked and is unlocked
1777 * before returning.
1778 */
b32967ff
MG
1779int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1780 struct vm_area_struct *vma,
1781 pmd_t *pmd, pmd_t entry,
1782 unsigned long address,
1783 struct page *page, int node)
1784{
c4088ebd 1785 spinlock_t *ptl;
b32967ff
MG
1786 pg_data_t *pgdat = NODE_DATA(node);
1787 int isolated = 0;
1788 struct page *new_page = NULL;
1789 struct mem_cgroup *memcg = NULL;
1790 int page_lru = page_is_file_cache(page);
f714f4f2
MG
1791 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1792 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
2b4847e7 1793 pmd_t orig_entry;
b32967ff 1794
b32967ff
MG
1795 /*
1796 * Rate-limit the amount of data that is being migrated to a node.
1797 * Optimal placement is no good if the memory bus is saturated and
1798 * all the time is being spent migrating!
1799 */
d28d4335 1800 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
b32967ff
MG
1801 goto out_dropref;
1802
1803 new_page = alloc_pages_node(node,
e97ca8e5
JW
1804 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
1805 HPAGE_PMD_ORDER);
340ef390
HD
1806 if (!new_page)
1807 goto out_fail;
1808
b32967ff 1809 isolated = numamigrate_isolate_page(pgdat, page);
340ef390 1810 if (!isolated) {
b32967ff 1811 put_page(new_page);
340ef390 1812 goto out_fail;
b32967ff
MG
1813 }
1814
b0943d61
MG
1815 if (mm_tlb_flush_pending(mm))
1816 flush_tlb_range(vma, mmun_start, mmun_end);
1817
b32967ff
MG
1818 /* Prepare a page as a migration target */
1819 __set_page_locked(new_page);
1820 SetPageSwapBacked(new_page);
1821
1822 /* anon mapping, we can simply copy page->mapping to the new page: */
1823 new_page->mapping = page->mapping;
1824 new_page->index = page->index;
1825 migrate_page_copy(new_page, page);
1826 WARN_ON(PageLRU(new_page));
1827
1828 /* Recheck the target PMD */
f714f4f2 1829 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
c4088ebd 1830 ptl = pmd_lock(mm, pmd);
2b4847e7
MG
1831 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1832fail_putback:
c4088ebd 1833 spin_unlock(ptl);
f714f4f2 1834 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff
MG
1835
1836 /* Reverse changes made by migrate_page_copy() */
1837 if (TestClearPageActive(new_page))
1838 SetPageActive(page);
1839 if (TestClearPageUnevictable(new_page))
1840 SetPageUnevictable(page);
1841 mlock_migrate_page(page, new_page);
1842
1843 unlock_page(new_page);
1844 put_page(new_page); /* Free it */
1845
a54a407f
MG
1846 /* Retake the callers reference and putback on LRU */
1847 get_page(page);
b32967ff 1848 putback_lru_page(page);
a54a407f
MG
1849 mod_zone_page_state(page_zone(page),
1850 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
eb4489f6
MG
1851
1852 goto out_unlock;
b32967ff
MG
1853 }
1854
1855 /*
1856 * Traditional migration needs to prepare the memcg charge
1857 * transaction early to prevent the old page from being
1858 * uncharged when installing migration entries. Here we can
1859 * save the potential rollback and start the charge transfer
1860 * only when migration is already known to end successfully.
1861 */
1862 mem_cgroup_prepare_migration(page, new_page, &memcg);
1863
2b4847e7 1864 orig_entry = *pmd;
b32967ff 1865 entry = mk_pmd(new_page, vma->vm_page_prot);
b32967ff 1866 entry = pmd_mkhuge(entry);
2b4847e7 1867 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
b32967ff 1868
2b4847e7
MG
1869 /*
1870 * Clear the old entry under pagetable lock and establish the new PTE.
1871 * Any parallel GUP will either observe the old page blocking on the
1872 * page lock, block on the page table lock or observe the new page.
1873 * The SetPageUptodate on the new page and page_add_new_anon_rmap
1874 * guarantee the copy is visible before the pagetable update.
1875 */
f714f4f2 1876 flush_cache_range(vma, mmun_start, mmun_end);
11de9927 1877 page_add_anon_rmap(new_page, vma, mmun_start);
f714f4f2
MG
1878 pmdp_clear_flush(vma, mmun_start, pmd);
1879 set_pmd_at(mm, mmun_start, pmd, entry);
1880 flush_tlb_range(vma, mmun_start, mmun_end);
ce4a9cc5 1881 update_mmu_cache_pmd(vma, address, &entry);
2b4847e7
MG
1882
1883 if (page_count(page) != 2) {
f714f4f2
MG
1884 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1885 flush_tlb_range(vma, mmun_start, mmun_end);
2b4847e7
MG
1886 update_mmu_cache_pmd(vma, address, &entry);
1887 page_remove_rmap(new_page);
1888 goto fail_putback;
1889 }
1890
b32967ff 1891 page_remove_rmap(page);
2b4847e7 1892
b32967ff
MG
1893 /*
1894 * Finish the charge transaction under the page table lock to
1895 * prevent split_huge_page() from dividing up the charge
1896 * before it's fully transferred to the new page.
1897 */
1898 mem_cgroup_end_migration(memcg, page, new_page, true);
c4088ebd 1899 spin_unlock(ptl);
f714f4f2 1900 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff 1901
11de9927
MG
1902 /* Take an "isolate" reference and put new page on the LRU. */
1903 get_page(new_page);
1904 putback_lru_page(new_page);
1905
b32967ff
MG
1906 unlock_page(new_page);
1907 unlock_page(page);
1908 put_page(page); /* Drop the rmap reference */
1909 put_page(page); /* Drop the LRU isolation reference */
1910
1911 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1912 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1913
b32967ff
MG
1914 mod_zone_page_state(page_zone(page),
1915 NR_ISOLATED_ANON + page_lru,
1916 -HPAGE_PMD_NR);
1917 return isolated;
1918
340ef390
HD
1919out_fail:
1920 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
b32967ff 1921out_dropref:
2b4847e7
MG
1922 ptl = pmd_lock(mm, pmd);
1923 if (pmd_same(*pmd, entry)) {
1924 entry = pmd_mknonnuma(entry);
f714f4f2 1925 set_pmd_at(mm, mmun_start, pmd, entry);
2b4847e7
MG
1926 update_mmu_cache_pmd(vma, address, &entry);
1927 }
1928 spin_unlock(ptl);
a54a407f 1929
eb4489f6 1930out_unlock:
340ef390 1931 unlock_page(page);
b32967ff 1932 put_page(page);
b32967ff
MG
1933 return 0;
1934}
7039e1db
PZ
1935#endif /* CONFIG_NUMA_BALANCING */
1936
1937#endif /* CONFIG_NUMA */