1 // SPDX-License-Identifier: GPL-2.0
3 * Memory Migration functionality - linux/mm/migrate.c
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
54 #include <asm/tlbflush.h>
56 #include <trace/events/migrate.h>
60 bool isolate_movable_page(struct page
*page
, isolate_mode_t mode
)
62 struct folio
*folio
= folio_get_nontail_page(page
);
63 const struct movable_operations
*mops
;
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
77 if (unlikely(folio_test_slab(folio
)))
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
86 if (unlikely(!__folio_test_movable(folio
)))
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 if (unlikely(folio_test_slab(folio
)))
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
104 if (unlikely(!folio_trylock(folio
)))
107 if (!folio_test_movable(folio
) || folio_test_isolated(folio
))
108 goto out_no_isolated
;
110 mops
= folio_movable_ops(folio
);
111 VM_BUG_ON_FOLIO(!mops
, folio
);
113 if (!mops
->isolate_page(&folio
->page
, mode
))
114 goto out_no_isolated
;
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(folio_test_isolated(folio
));
118 folio_set_isolated(folio
);
131 static void putback_movable_folio(struct folio
*folio
)
133 const struct movable_operations
*mops
= folio_movable_ops(folio
);
135 mops
->putback_page(&folio
->page
);
136 folio_clear_isolated(folio
);
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
147 void putback_movable_pages(struct list_head
*l
)
150 struct folio
*folio2
;
152 list_for_each_entry_safe(folio
, folio2
, l
, lru
) {
153 if (unlikely(folio_test_hugetlb(folio
))) {
154 folio_putback_active_hugetlb(folio
);
157 list_del(&folio
->lru
);
159 * We isolated non-lru movable folio so here we can use
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
163 if (unlikely(__folio_test_movable(folio
))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio
), folio
);
166 if (folio_test_movable(folio
))
167 putback_movable_folio(folio
);
169 folio_clear_isolated(folio
);
173 node_stat_mod_folio(folio
, NR_ISOLATED_ANON
+
174 folio_is_file_lru(folio
), -folio_nr_pages(folio
));
175 folio_putback_lru(folio
);
181 * Restore a potential migration pte to a working pte entry
183 static bool remove_migration_pte(struct folio
*folio
,
184 struct vm_area_struct
*vma
, unsigned long addr
, void *old
)
186 DEFINE_FOLIO_VMA_WALK(pvmw
, old
, vma
, addr
, PVMW_SYNC
| PVMW_MIGRATION
);
188 while (page_vma_mapped_walk(&pvmw
)) {
189 rmap_t rmap_flags
= RMAP_NONE
;
194 unsigned long idx
= 0;
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio
) && !folio_test_hugetlb(folio
))
198 idx
= linear_page_index(vma
, pvmw
.address
) - pvmw
.pgoff
;
199 new = folio_page(folio
, idx
);
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio
) ||
205 !folio_test_pmd_mappable(folio
), folio
);
206 remove_migration_pmd(&pvmw
, new);
212 pte
= mk_pte(new, READ_ONCE(vma
->vm_page_prot
));
213 old_pte
= ptep_get(pvmw
.pte
);
214 if (pte_swp_soft_dirty(old_pte
))
215 pte
= pte_mksoft_dirty(pte
);
217 entry
= pte_to_swp_entry(old_pte
);
218 if (!is_migration_entry_young(entry
))
219 pte
= pte_mkold(pte
);
220 if (folio_test_dirty(folio
) && is_migration_entry_dirty(entry
))
221 pte
= pte_mkdirty(pte
);
222 if (is_writable_migration_entry(entry
))
223 pte
= pte_mkwrite(pte
, vma
);
224 else if (pte_swp_uffd_wp(old_pte
))
225 pte
= pte_mkuffd_wp(pte
);
227 if (folio_test_anon(folio
) && !is_readable_migration_entry(entry
))
228 rmap_flags
|= RMAP_EXCLUSIVE
;
230 if (unlikely(is_device_private_page(new))) {
232 entry
= make_writable_device_private_entry(
235 entry
= make_readable_device_private_entry(
237 pte
= swp_entry_to_pte(entry
);
238 if (pte_swp_soft_dirty(old_pte
))
239 pte
= pte_swp_mksoft_dirty(pte
);
240 if (pte_swp_uffd_wp(old_pte
))
241 pte
= pte_swp_mkuffd_wp(pte
);
244 #ifdef CONFIG_HUGETLB_PAGE
245 if (folio_test_hugetlb(folio
)) {
246 struct hstate
*h
= hstate_vma(vma
);
247 unsigned int shift
= huge_page_shift(h
);
248 unsigned long psize
= huge_page_size(h
);
250 pte
= arch_make_huge_pte(pte
, shift
, vma
->vm_flags
);
251 if (folio_test_anon(folio
))
252 hugetlb_add_anon_rmap(folio
, vma
, pvmw
.address
,
255 hugetlb_add_file_rmap(folio
);
256 set_huge_pte_at(vma
->vm_mm
, pvmw
.address
, pvmw
.pte
, pte
,
261 if (folio_test_anon(folio
))
262 folio_add_anon_rmap_pte(folio
, new, vma
,
263 pvmw
.address
, rmap_flags
);
265 folio_add_file_rmap_pte(folio
, new, vma
);
266 set_pte_at(vma
->vm_mm
, pvmw
.address
, pvmw
.pte
, pte
);
268 if (vma
->vm_flags
& VM_LOCKED
)
271 trace_remove_migration_pte(pvmw
.address
, pte_val(pte
),
272 compound_order(new));
274 /* No need to invalidate - it was non-present before */
275 update_mmu_cache(vma
, pvmw
.address
, pvmw
.pte
);
282 * Get rid of all migration entries and replace them by
283 * references to the indicated page.
285 void remove_migration_ptes(struct folio
*src
, struct folio
*dst
, bool locked
)
287 struct rmap_walk_control rwc
= {
288 .rmap_one
= remove_migration_pte
,
293 rmap_walk_locked(dst
, &rwc
);
295 rmap_walk(dst
, &rwc
);
299 * Something used the pte of a page under migration. We need to
300 * get to the page and wait until migration is finished.
301 * When we return from this function the fault will be retried.
303 void migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
,
304 unsigned long address
)
311 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
315 pte
= ptep_get(ptep
);
318 if (!is_swap_pte(pte
))
321 entry
= pte_to_swp_entry(pte
);
322 if (!is_migration_entry(entry
))
325 migration_entry_wait_on_locked(entry
, ptl
);
331 #ifdef CONFIG_HUGETLB_PAGE
333 * The vma read lock must be held upon entry. Holding that lock prevents either
334 * the pte or the ptl from being freed.
336 * This function will release the vma lock before returning.
338 void migration_entry_wait_huge(struct vm_area_struct
*vma
, pte_t
*ptep
)
340 spinlock_t
*ptl
= huge_pte_lockptr(hstate_vma(vma
), vma
->vm_mm
, ptep
);
343 hugetlb_vma_assert_locked(vma
);
345 pte
= huge_ptep_get(ptep
);
347 if (unlikely(!is_hugetlb_entry_migration(pte
))) {
349 hugetlb_vma_unlock_read(vma
);
352 * If migration entry existed, safe to release vma lock
353 * here because the pgtable page won't be freed without the
354 * pgtable lock released. See comment right above pgtable
355 * lock release in migration_entry_wait_on_locked().
357 hugetlb_vma_unlock_read(vma
);
358 migration_entry_wait_on_locked(pte_to_swp_entry(pte
), ptl
);
363 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
364 void pmd_migration_entry_wait(struct mm_struct
*mm
, pmd_t
*pmd
)
368 ptl
= pmd_lock(mm
, pmd
);
369 if (!is_pmd_migration_entry(*pmd
))
371 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd
), ptl
);
378 static int folio_expected_refs(struct address_space
*mapping
,
385 refs
+= folio_nr_pages(folio
);
386 if (folio_test_private(folio
))
393 * Replace the page in the mapping.
395 * The number of remaining references must be:
396 * 1 for anonymous pages without a mapping
397 * 2 for pages with a mapping
398 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
400 int folio_migrate_mapping(struct address_space
*mapping
,
401 struct folio
*newfolio
, struct folio
*folio
, int extra_count
)
403 XA_STATE(xas
, &mapping
->i_pages
, folio_index(folio
));
404 struct zone
*oldzone
, *newzone
;
406 int expected_count
= folio_expected_refs(mapping
, folio
) + extra_count
;
407 long nr
= folio_nr_pages(folio
);
411 /* Anonymous page without mapping */
412 if (folio_ref_count(folio
) != expected_count
)
415 /* No turning back from here */
416 newfolio
->index
= folio
->index
;
417 newfolio
->mapping
= folio
->mapping
;
418 if (folio_test_swapbacked(folio
))
419 __folio_set_swapbacked(newfolio
);
421 return MIGRATEPAGE_SUCCESS
;
424 oldzone
= folio_zone(folio
);
425 newzone
= folio_zone(newfolio
);
428 if (!folio_ref_freeze(folio
, expected_count
)) {
429 xas_unlock_irq(&xas
);
434 * Now we know that no one else is looking at the folio:
435 * no turning back from here.
437 newfolio
->index
= folio
->index
;
438 newfolio
->mapping
= folio
->mapping
;
439 folio_ref_add(newfolio
, nr
); /* add cache reference */
440 if (folio_test_swapbacked(folio
)) {
441 __folio_set_swapbacked(newfolio
);
442 if (folio_test_swapcache(folio
)) {
443 folio_set_swapcache(newfolio
);
444 newfolio
->private = folio_get_private(folio
);
448 VM_BUG_ON_FOLIO(folio_test_swapcache(folio
), folio
);
452 /* Move dirty while page refs frozen and newpage not yet exposed */
453 dirty
= folio_test_dirty(folio
);
455 folio_clear_dirty(folio
);
456 folio_set_dirty(newfolio
);
459 /* Swap cache still stores N entries instead of a high-order entry */
460 for (i
= 0; i
< entries
; i
++) {
461 xas_store(&xas
, newfolio
);
466 * Drop cache reference from old page by unfreezing
467 * to one less reference.
468 * We know this isn't the last reference.
470 folio_ref_unfreeze(folio
, expected_count
- nr
);
473 /* Leave irq disabled to prevent preemption while updating stats */
476 * If moved to a different zone then also account
477 * the page for that zone. Other VM counters will be
478 * taken care of when we establish references to the
479 * new page and drop references to the old page.
481 * Note that anonymous pages are accounted for
482 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
483 * are mapped to swap space.
485 if (newzone
!= oldzone
) {
486 struct lruvec
*old_lruvec
, *new_lruvec
;
487 struct mem_cgroup
*memcg
;
489 memcg
= folio_memcg(folio
);
490 old_lruvec
= mem_cgroup_lruvec(memcg
, oldzone
->zone_pgdat
);
491 new_lruvec
= mem_cgroup_lruvec(memcg
, newzone
->zone_pgdat
);
493 __mod_lruvec_state(old_lruvec
, NR_FILE_PAGES
, -nr
);
494 __mod_lruvec_state(new_lruvec
, NR_FILE_PAGES
, nr
);
495 if (folio_test_swapbacked(folio
) && !folio_test_swapcache(folio
)) {
496 __mod_lruvec_state(old_lruvec
, NR_SHMEM
, -nr
);
497 __mod_lruvec_state(new_lruvec
, NR_SHMEM
, nr
);
499 if (folio_test_pmd_mappable(folio
)) {
500 __mod_lruvec_state(old_lruvec
, NR_SHMEM_THPS
, -nr
);
501 __mod_lruvec_state(new_lruvec
, NR_SHMEM_THPS
, nr
);
505 if (folio_test_swapcache(folio
)) {
506 __mod_lruvec_state(old_lruvec
, NR_SWAPCACHE
, -nr
);
507 __mod_lruvec_state(new_lruvec
, NR_SWAPCACHE
, nr
);
510 if (dirty
&& mapping_can_writeback(mapping
)) {
511 __mod_lruvec_state(old_lruvec
, NR_FILE_DIRTY
, -nr
);
512 __mod_zone_page_state(oldzone
, NR_ZONE_WRITE_PENDING
, -nr
);
513 __mod_lruvec_state(new_lruvec
, NR_FILE_DIRTY
, nr
);
514 __mod_zone_page_state(newzone
, NR_ZONE_WRITE_PENDING
, nr
);
519 return MIGRATEPAGE_SUCCESS
;
521 EXPORT_SYMBOL(folio_migrate_mapping
);
524 * The expected number of remaining references is the same as that
525 * of folio_migrate_mapping().
527 int migrate_huge_page_move_mapping(struct address_space
*mapping
,
528 struct folio
*dst
, struct folio
*src
)
530 XA_STATE(xas
, &mapping
->i_pages
, folio_index(src
));
534 expected_count
= folio_expected_refs(mapping
, src
);
535 if (!folio_ref_freeze(src
, expected_count
)) {
536 xas_unlock_irq(&xas
);
540 dst
->index
= src
->index
;
541 dst
->mapping
= src
->mapping
;
543 folio_ref_add(dst
, folio_nr_pages(dst
));
545 xas_store(&xas
, dst
);
547 folio_ref_unfreeze(src
, expected_count
- folio_nr_pages(src
));
549 xas_unlock_irq(&xas
);
551 return MIGRATEPAGE_SUCCESS
;
555 * Copy the flags and some other ancillary information
557 void folio_migrate_flags(struct folio
*newfolio
, struct folio
*folio
)
561 if (folio_test_error(folio
))
562 folio_set_error(newfolio
);
563 if (folio_test_referenced(folio
))
564 folio_set_referenced(newfolio
);
565 if (folio_test_uptodate(folio
))
566 folio_mark_uptodate(newfolio
);
567 if (folio_test_clear_active(folio
)) {
568 VM_BUG_ON_FOLIO(folio_test_unevictable(folio
), folio
);
569 folio_set_active(newfolio
);
570 } else if (folio_test_clear_unevictable(folio
))
571 folio_set_unevictable(newfolio
);
572 if (folio_test_workingset(folio
))
573 folio_set_workingset(newfolio
);
574 if (folio_test_checked(folio
))
575 folio_set_checked(newfolio
);
577 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
578 * migration entries. We can still have PG_anon_exclusive set on an
579 * effectively unmapped and unreferenced first sub-pages of an
580 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
582 if (folio_test_mappedtodisk(folio
))
583 folio_set_mappedtodisk(newfolio
);
585 /* Move dirty on pages not done by folio_migrate_mapping() */
586 if (folio_test_dirty(folio
))
587 folio_set_dirty(newfolio
);
589 if (folio_test_young(folio
))
590 folio_set_young(newfolio
);
591 if (folio_test_idle(folio
))
592 folio_set_idle(newfolio
);
595 * Copy NUMA information to the new page, to prevent over-eager
596 * future migrations of this same page.
598 cpupid
= folio_xchg_last_cpupid(folio
, -1);
600 * For memory tiering mode, when migrate between slow and fast
601 * memory node, reset cpupid, because that is used to record
602 * page access time in slow memory node.
604 if (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
) {
605 bool f_toptier
= node_is_toptier(folio_nid(folio
));
606 bool t_toptier
= node_is_toptier(folio_nid(newfolio
));
608 if (f_toptier
!= t_toptier
)
611 folio_xchg_last_cpupid(newfolio
, cpupid
);
613 folio_migrate_ksm(newfolio
, folio
);
615 * Please do not reorder this without considering how mm/ksm.c's
616 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
618 if (folio_test_swapcache(folio
))
619 folio_clear_swapcache(folio
);
620 folio_clear_private(folio
);
622 /* page->private contains hugetlb specific flags */
623 if (!folio_test_hugetlb(folio
))
624 folio
->private = NULL
;
627 * If any waiters have accumulated on the new page then
630 if (folio_test_writeback(newfolio
))
631 folio_end_writeback(newfolio
);
634 * PG_readahead shares the same bit with PG_reclaim. The above
635 * end_page_writeback() may clear PG_readahead mistakenly, so set the
638 if (folio_test_readahead(folio
))
639 folio_set_readahead(newfolio
);
641 folio_copy_owner(newfolio
, folio
);
643 mem_cgroup_migrate(folio
, newfolio
);
645 EXPORT_SYMBOL(folio_migrate_flags
);
647 void folio_migrate_copy(struct folio
*newfolio
, struct folio
*folio
)
649 folio_copy(newfolio
, folio
);
650 folio_migrate_flags(newfolio
, folio
);
652 EXPORT_SYMBOL(folio_migrate_copy
);
654 /************************************************************
655 * Migration functions
656 ***********************************************************/
658 int migrate_folio_extra(struct address_space
*mapping
, struct folio
*dst
,
659 struct folio
*src
, enum migrate_mode mode
, int extra_count
)
663 BUG_ON(folio_test_writeback(src
)); /* Writeback must be complete */
665 rc
= folio_migrate_mapping(mapping
, dst
, src
, extra_count
);
667 if (rc
!= MIGRATEPAGE_SUCCESS
)
670 if (mode
!= MIGRATE_SYNC_NO_COPY
)
671 folio_migrate_copy(dst
, src
);
673 folio_migrate_flags(dst
, src
);
674 return MIGRATEPAGE_SUCCESS
;
678 * migrate_folio() - Simple folio migration.
679 * @mapping: The address_space containing the folio.
680 * @dst: The folio to migrate the data to.
681 * @src: The folio containing the current data.
682 * @mode: How to migrate the page.
684 * Common logic to directly migrate a single LRU folio suitable for
685 * folios that do not use PagePrivate/PagePrivate2.
687 * Folios are locked upon entry and exit.
689 int migrate_folio(struct address_space
*mapping
, struct folio
*dst
,
690 struct folio
*src
, enum migrate_mode mode
)
692 return migrate_folio_extra(mapping
, dst
, src
, mode
, 0);
694 EXPORT_SYMBOL(migrate_folio
);
696 #ifdef CONFIG_BUFFER_HEAD
697 /* Returns true if all buffers are successfully locked */
698 static bool buffer_migrate_lock_buffers(struct buffer_head
*head
,
699 enum migrate_mode mode
)
701 struct buffer_head
*bh
= head
;
702 struct buffer_head
*failed_bh
;
705 if (!trylock_buffer(bh
)) {
706 if (mode
== MIGRATE_ASYNC
)
708 if (mode
== MIGRATE_SYNC_LIGHT
&& !buffer_uptodate(bh
))
713 bh
= bh
->b_this_page
;
714 } while (bh
!= head
);
719 /* We failed to lock the buffer and cannot stall. */
722 while (bh
!= failed_bh
) {
724 bh
= bh
->b_this_page
;
730 static int __buffer_migrate_folio(struct address_space
*mapping
,
731 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
,
734 struct buffer_head
*bh
, *head
;
738 head
= folio_buffers(src
);
740 return migrate_folio(mapping
, dst
, src
, mode
);
742 /* Check whether page does not have extra refs before we do more work */
743 expected_count
= folio_expected_refs(mapping
, src
);
744 if (folio_ref_count(src
) != expected_count
)
747 if (!buffer_migrate_lock_buffers(head
, mode
))
752 bool invalidated
= false;
756 spin_lock(&mapping
->i_private_lock
);
759 if (atomic_read(&bh
->b_count
)) {
763 bh
= bh
->b_this_page
;
764 } while (bh
!= head
);
770 spin_unlock(&mapping
->i_private_lock
);
771 invalidate_bh_lrus();
773 goto recheck_buffers
;
777 rc
= folio_migrate_mapping(mapping
, dst
, src
, 0);
778 if (rc
!= MIGRATEPAGE_SUCCESS
)
781 folio_attach_private(dst
, folio_detach_private(src
));
785 folio_set_bh(bh
, dst
, bh_offset(bh
));
786 bh
= bh
->b_this_page
;
787 } while (bh
!= head
);
789 if (mode
!= MIGRATE_SYNC_NO_COPY
)
790 folio_migrate_copy(dst
, src
);
792 folio_migrate_flags(dst
, src
);
794 rc
= MIGRATEPAGE_SUCCESS
;
797 spin_unlock(&mapping
->i_private_lock
);
801 bh
= bh
->b_this_page
;
802 } while (bh
!= head
);
808 * buffer_migrate_folio() - Migration function for folios with buffers.
809 * @mapping: The address space containing @src.
810 * @dst: The folio to migrate to.
811 * @src: The folio to migrate from.
812 * @mode: How to migrate the folio.
814 * This function can only be used if the underlying filesystem guarantees
815 * that no other references to @src exist. For example attached buffer
816 * heads are accessed only under the folio lock. If your filesystem cannot
817 * provide this guarantee, buffer_migrate_folio_norefs() may be more
820 * Return: 0 on success or a negative errno on failure.
822 int buffer_migrate_folio(struct address_space
*mapping
,
823 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
)
825 return __buffer_migrate_folio(mapping
, dst
, src
, mode
, false);
827 EXPORT_SYMBOL(buffer_migrate_folio
);
830 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
831 * @mapping: The address space containing @src.
832 * @dst: The folio to migrate to.
833 * @src: The folio to migrate from.
834 * @mode: How to migrate the folio.
836 * Like buffer_migrate_folio() except that this variant is more careful
837 * and checks that there are also no buffer head references. This function
838 * is the right one for mappings where buffer heads are directly looked
839 * up and referenced (such as block device mappings).
841 * Return: 0 on success or a negative errno on failure.
843 int buffer_migrate_folio_norefs(struct address_space
*mapping
,
844 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
)
846 return __buffer_migrate_folio(mapping
, dst
, src
, mode
, true);
848 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs
);
849 #endif /* CONFIG_BUFFER_HEAD */
851 int filemap_migrate_folio(struct address_space
*mapping
,
852 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
)
856 ret
= folio_migrate_mapping(mapping
, dst
, src
, 0);
857 if (ret
!= MIGRATEPAGE_SUCCESS
)
860 if (folio_get_private(src
))
861 folio_attach_private(dst
, folio_detach_private(src
));
863 if (mode
!= MIGRATE_SYNC_NO_COPY
)
864 folio_migrate_copy(dst
, src
);
866 folio_migrate_flags(dst
, src
);
867 return MIGRATEPAGE_SUCCESS
;
869 EXPORT_SYMBOL_GPL(filemap_migrate_folio
);
872 * Writeback a folio to clean the dirty state
874 static int writeout(struct address_space
*mapping
, struct folio
*folio
)
876 struct writeback_control wbc
= {
877 .sync_mode
= WB_SYNC_NONE
,
880 .range_end
= LLONG_MAX
,
885 if (!mapping
->a_ops
->writepage
)
886 /* No write method for the address space */
889 if (!folio_clear_dirty_for_io(folio
))
890 /* Someone else already triggered a write */
894 * A dirty folio may imply that the underlying filesystem has
895 * the folio on some queue. So the folio must be clean for
896 * migration. Writeout may mean we lose the lock and the
897 * folio state is no longer what we checked for earlier.
898 * At this point we know that the migration attempt cannot
901 remove_migration_ptes(folio
, folio
, false);
903 rc
= mapping
->a_ops
->writepage(&folio
->page
, &wbc
);
905 if (rc
!= AOP_WRITEPAGE_ACTIVATE
)
906 /* unlocked. Relock */
909 return (rc
< 0) ? -EIO
: -EAGAIN
;
913 * Default handling if a filesystem does not provide a migration function.
915 static int fallback_migrate_folio(struct address_space
*mapping
,
916 struct folio
*dst
, struct folio
*src
, enum migrate_mode mode
)
918 if (folio_test_dirty(src
)) {
919 /* Only writeback folios in full synchronous migration */
922 case MIGRATE_SYNC_NO_COPY
:
927 return writeout(mapping
, src
);
931 * Buffers may be managed in a filesystem specific way.
932 * We must have no buffers or drop them.
934 if (!filemap_release_folio(src
, GFP_KERNEL
))
935 return mode
== MIGRATE_SYNC
? -EAGAIN
: -EBUSY
;
937 return migrate_folio(mapping
, dst
, src
, mode
);
941 * Move a page to a newly allocated page
942 * The page is locked and all ptes have been successfully removed.
944 * The new page will have replaced the old page if this function
949 * MIGRATEPAGE_SUCCESS - success
951 static int move_to_new_folio(struct folio
*dst
, struct folio
*src
,
952 enum migrate_mode mode
)
955 bool is_lru
= !__folio_test_movable(src
);
957 VM_BUG_ON_FOLIO(!folio_test_locked(src
), src
);
958 VM_BUG_ON_FOLIO(!folio_test_locked(dst
), dst
);
960 if (likely(is_lru
)) {
961 struct address_space
*mapping
= folio_mapping(src
);
964 rc
= migrate_folio(mapping
, dst
, src
, mode
);
965 else if (mapping_unmovable(mapping
))
967 else if (mapping
->a_ops
->migrate_folio
)
969 * Most folios have a mapping and most filesystems
970 * provide a migrate_folio callback. Anonymous folios
971 * are part of swap space which also has its own
972 * migrate_folio callback. This is the most common path
973 * for page migration.
975 rc
= mapping
->a_ops
->migrate_folio(mapping
, dst
, src
,
978 rc
= fallback_migrate_folio(mapping
, dst
, src
, mode
);
980 const struct movable_operations
*mops
;
983 * In case of non-lru page, it could be released after
984 * isolation step. In that case, we shouldn't try migration.
986 VM_BUG_ON_FOLIO(!folio_test_isolated(src
), src
);
987 if (!folio_test_movable(src
)) {
988 rc
= MIGRATEPAGE_SUCCESS
;
989 folio_clear_isolated(src
);
993 mops
= folio_movable_ops(src
);
994 rc
= mops
->migrate_page(&dst
->page
, &src
->page
, mode
);
995 WARN_ON_ONCE(rc
== MIGRATEPAGE_SUCCESS
&&
996 !folio_test_isolated(src
));
1000 * When successful, old pagecache src->mapping must be cleared before
1001 * src is freed; but stats require that PageAnon be left as PageAnon.
1003 if (rc
== MIGRATEPAGE_SUCCESS
) {
1004 if (__folio_test_movable(src
)) {
1005 VM_BUG_ON_FOLIO(!folio_test_isolated(src
), src
);
1008 * We clear PG_movable under page_lock so any compactor
1009 * cannot try to migrate this page.
1011 folio_clear_isolated(src
);
1015 * Anonymous and movable src->mapping will be cleared by
1016 * free_pages_prepare so don't reset it here for keeping
1017 * the type to work PageAnon, for example.
1019 if (!folio_mapping_flags(src
))
1020 src
->mapping
= NULL
;
1022 if (likely(!folio_is_zone_device(dst
)))
1023 flush_dcache_folio(dst
);
1030 * To record some information during migration, we use unused private
1031 * field of struct folio of the newly allocated destination folio.
1032 * This is safe because nobody is using it except us.
1035 PAGE_WAS_MAPPED
= BIT(0),
1036 PAGE_WAS_MLOCKED
= BIT(1),
1037 PAGE_OLD_STATES
= PAGE_WAS_MAPPED
| PAGE_WAS_MLOCKED
,
1040 static void __migrate_folio_record(struct folio
*dst
,
1042 struct anon_vma
*anon_vma
)
1044 dst
->private = (void *)anon_vma
+ old_page_state
;
1047 static void __migrate_folio_extract(struct folio
*dst
,
1048 int *old_page_state
,
1049 struct anon_vma
**anon_vmap
)
1051 unsigned long private = (unsigned long)dst
->private;
1053 *anon_vmap
= (struct anon_vma
*)(private & ~PAGE_OLD_STATES
);
1054 *old_page_state
= private & PAGE_OLD_STATES
;
1055 dst
->private = NULL
;
1058 /* Restore the source folio to the original state upon failure */
1059 static void migrate_folio_undo_src(struct folio
*src
,
1060 int page_was_mapped
,
1061 struct anon_vma
*anon_vma
,
1063 struct list_head
*ret
)
1065 if (page_was_mapped
)
1066 remove_migration_ptes(src
, src
, false);
1067 /* Drop an anon_vma reference if we took one */
1069 put_anon_vma(anon_vma
);
1073 list_move_tail(&src
->lru
, ret
);
1076 /* Restore the destination folio to the original state upon failure */
1077 static void migrate_folio_undo_dst(struct folio
*dst
, bool locked
,
1078 free_folio_t put_new_folio
, unsigned long private)
1083 put_new_folio(dst
, private);
1088 /* Cleanup src folio upon migration success */
1089 static void migrate_folio_done(struct folio
*src
,
1090 enum migrate_reason reason
)
1093 * Compaction can migrate also non-LRU pages which are
1094 * not accounted to NR_ISOLATED_*. They can be recognized
1095 * as __folio_test_movable
1097 if (likely(!__folio_test_movable(src
)))
1098 mod_node_page_state(folio_pgdat(src
), NR_ISOLATED_ANON
+
1099 folio_is_file_lru(src
), -folio_nr_pages(src
));
1101 if (reason
!= MR_MEMORY_FAILURE
)
1102 /* We release the page in page_handle_poison. */
1106 /* Obtain the lock on page, remove all ptes. */
1107 static int migrate_folio_unmap(new_folio_t get_new_folio
,
1108 free_folio_t put_new_folio
, unsigned long private,
1109 struct folio
*src
, struct folio
**dstp
, enum migrate_mode mode
,
1110 enum migrate_reason reason
, struct list_head
*ret
)
1114 int old_page_state
= 0;
1115 struct anon_vma
*anon_vma
= NULL
;
1116 bool is_lru
= !__folio_test_movable(src
);
1117 bool locked
= false;
1118 bool dst_locked
= false;
1120 if (folio_ref_count(src
) == 1) {
1121 /* Folio was freed from under us. So we are done. */
1122 folio_clear_active(src
);
1123 folio_clear_unevictable(src
);
1124 /* free_pages_prepare() will clear PG_isolated. */
1125 list_del(&src
->lru
);
1126 migrate_folio_done(src
, reason
);
1127 return MIGRATEPAGE_SUCCESS
;
1130 dst
= get_new_folio(src
, private);
1135 dst
->private = NULL
;
1137 if (!folio_trylock(src
)) {
1138 if (mode
== MIGRATE_ASYNC
)
1142 * It's not safe for direct compaction to call lock_page.
1143 * For example, during page readahead pages are added locked
1144 * to the LRU. Later, when the IO completes the pages are
1145 * marked uptodate and unlocked. However, the queueing
1146 * could be merging multiple pages for one bio (e.g.
1147 * mpage_readahead). If an allocation happens for the
1148 * second or third page, the process can end up locking
1149 * the same page twice and deadlocking. Rather than
1150 * trying to be clever about what pages can be locked,
1151 * avoid the use of lock_page for direct compaction
1154 if (current
->flags
& PF_MEMALLOC
)
1158 * In "light" mode, we can wait for transient locks (eg
1159 * inserting a page into the page table), but it's not
1160 * worth waiting for I/O.
1162 if (mode
== MIGRATE_SYNC_LIGHT
&& !folio_test_uptodate(src
))
1168 if (folio_test_mlocked(src
))
1169 old_page_state
|= PAGE_WAS_MLOCKED
;
1171 if (folio_test_writeback(src
)) {
1173 * Only in the case of a full synchronous migration is it
1174 * necessary to wait for PageWriteback. In the async case,
1175 * the retry loop is too short and in the sync-light case,
1176 * the overhead of stalling is too much
1180 case MIGRATE_SYNC_NO_COPY
:
1186 folio_wait_writeback(src
);
1190 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1191 * we cannot notice that anon_vma is freed while we migrate a page.
1192 * This get_anon_vma() delays freeing anon_vma pointer until the end
1193 * of migration. File cache pages are no problem because of page_lock()
1194 * File Caches may use write_page() or lock_page() in migration, then,
1195 * just care Anon page here.
1197 * Only folio_get_anon_vma() understands the subtleties of
1198 * getting a hold on an anon_vma from outside one of its mms.
1199 * But if we cannot get anon_vma, then we won't need it anyway,
1200 * because that implies that the anon page is no longer mapped
1201 * (and cannot be remapped so long as we hold the page lock).
1203 if (folio_test_anon(src
) && !folio_test_ksm(src
))
1204 anon_vma
= folio_get_anon_vma(src
);
1207 * Block others from accessing the new page when we get around to
1208 * establishing additional references. We are usually the only one
1209 * holding a reference to dst at this point. We used to have a BUG
1210 * here if folio_trylock(dst) fails, but would like to allow for
1211 * cases where there might be a race with the previous use of dst.
1212 * This is much like races on refcount of oldpage: just don't BUG().
1214 if (unlikely(!folio_trylock(dst
)))
1218 if (unlikely(!is_lru
)) {
1219 __migrate_folio_record(dst
, old_page_state
, anon_vma
);
1220 return MIGRATEPAGE_UNMAP
;
1224 * Corner case handling:
1225 * 1. When a new swap-cache page is read into, it is added to the LRU
1226 * and treated as swapcache but it has no rmap yet.
1227 * Calling try_to_unmap() against a src->mapping==NULL page will
1228 * trigger a BUG. So handle it here.
1229 * 2. An orphaned page (see truncate_cleanup_page) might have
1230 * fs-private metadata. The page can be picked up due to memory
1231 * offlining. Everywhere else except page reclaim, the page is
1232 * invisible to the vm, so the page can not be migrated. So try to
1233 * free the metadata, so the page can be freed.
1235 if (!src
->mapping
) {
1236 if (folio_test_private(src
)) {
1237 try_to_free_buffers(src
);
1240 } else if (folio_mapped(src
)) {
1241 /* Establish migration ptes */
1242 VM_BUG_ON_FOLIO(folio_test_anon(src
) &&
1243 !folio_test_ksm(src
) && !anon_vma
, src
);
1244 try_to_migrate(src
, mode
== MIGRATE_ASYNC
? TTU_BATCH_FLUSH
: 0);
1245 old_page_state
|= PAGE_WAS_MAPPED
;
1248 if (!folio_mapped(src
)) {
1249 __migrate_folio_record(dst
, old_page_state
, anon_vma
);
1250 return MIGRATEPAGE_UNMAP
;
1255 * A folio that has not been unmapped will be restored to
1256 * right list unless we want to retry.
1261 migrate_folio_undo_src(src
, old_page_state
& PAGE_WAS_MAPPED
,
1262 anon_vma
, locked
, ret
);
1263 migrate_folio_undo_dst(dst
, dst_locked
, put_new_folio
, private);
1268 /* Migrate the folio to the newly allocated folio in dst. */
1269 static int migrate_folio_move(free_folio_t put_new_folio
, unsigned long private,
1270 struct folio
*src
, struct folio
*dst
,
1271 enum migrate_mode mode
, enum migrate_reason reason
,
1272 struct list_head
*ret
)
1275 int old_page_state
= 0;
1276 struct anon_vma
*anon_vma
= NULL
;
1277 bool is_lru
= !__folio_test_movable(src
);
1278 struct list_head
*prev
;
1280 __migrate_folio_extract(dst
, &old_page_state
, &anon_vma
);
1281 prev
= dst
->lru
.prev
;
1282 list_del(&dst
->lru
);
1284 rc
= move_to_new_folio(dst
, src
, mode
);
1288 if (unlikely(!is_lru
))
1289 goto out_unlock_both
;
1292 * When successful, push dst to LRU immediately: so that if it
1293 * turns out to be an mlocked page, remove_migration_ptes() will
1294 * automatically build up the correct dst->mlock_count for it.
1296 * We would like to do something similar for the old page, when
1297 * unsuccessful, and other cases when a page has been temporarily
1298 * isolated from the unevictable LRU: but this case is the easiest.
1301 if (old_page_state
& PAGE_WAS_MLOCKED
)
1304 if (old_page_state
& PAGE_WAS_MAPPED
)
1305 remove_migration_ptes(src
, dst
, false);
1309 set_page_owner_migrate_reason(&dst
->page
, reason
);
1311 * If migration is successful, decrease refcount of dst,
1312 * which will not free the page because new page owner increased
1318 * A folio that has been migrated has all references removed
1319 * and will be freed.
1321 list_del(&src
->lru
);
1322 /* Drop an anon_vma reference if we took one */
1324 put_anon_vma(anon_vma
);
1326 migrate_folio_done(src
, reason
);
1331 * A folio that has not been migrated will be restored to
1332 * right list unless we want to retry.
1334 if (rc
== -EAGAIN
) {
1335 list_add(&dst
->lru
, prev
);
1336 __migrate_folio_record(dst
, old_page_state
, anon_vma
);
1340 migrate_folio_undo_src(src
, old_page_state
& PAGE_WAS_MAPPED
,
1341 anon_vma
, true, ret
);
1342 migrate_folio_undo_dst(dst
, true, put_new_folio
, private);
1348 * Counterpart of unmap_and_move_page() for hugepage migration.
1350 * This function doesn't wait the completion of hugepage I/O
1351 * because there is no race between I/O and migration for hugepage.
1352 * Note that currently hugepage I/O occurs only in direct I/O
1353 * where no lock is held and PG_writeback is irrelevant,
1354 * and writeback status of all subpages are counted in the reference
1355 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1356 * under direct I/O, the reference of the head page is 512 and a bit more.)
1357 * This means that when we try to migrate hugepage whose subpages are
1358 * doing direct I/O, some references remain after try_to_unmap() and
1359 * hugepage migration fails without data corruption.
1361 * There is also no race when direct I/O is issued on the page under migration,
1362 * because then pte is replaced with migration swap entry and direct I/O code
1363 * will wait in the page fault for migration to complete.
1365 static int unmap_and_move_huge_page(new_folio_t get_new_folio
,
1366 free_folio_t put_new_folio
, unsigned long private,
1367 struct folio
*src
, int force
, enum migrate_mode mode
,
1368 int reason
, struct list_head
*ret
)
1372 int page_was_mapped
= 0;
1373 struct anon_vma
*anon_vma
= NULL
;
1374 struct address_space
*mapping
= NULL
;
1376 if (folio_ref_count(src
) == 1) {
1377 /* page was freed from under us. So we are done. */
1378 folio_putback_active_hugetlb(src
);
1379 return MIGRATEPAGE_SUCCESS
;
1382 dst
= get_new_folio(src
, private);
1386 if (!folio_trylock(src
)) {
1391 case MIGRATE_SYNC_NO_COPY
:
1400 * Check for pages which are in the process of being freed. Without
1401 * folio_mapping() set, hugetlbfs specific move page routine will not
1402 * be called and we could leak usage counts for subpools.
1404 if (hugetlb_folio_subpool(src
) && !folio_mapping(src
)) {
1409 if (folio_test_anon(src
))
1410 anon_vma
= folio_get_anon_vma(src
);
1412 if (unlikely(!folio_trylock(dst
)))
1415 if (folio_mapped(src
)) {
1416 enum ttu_flags ttu
= 0;
1418 if (!folio_test_anon(src
)) {
1420 * In shared mappings, try_to_unmap could potentially
1421 * call huge_pmd_unshare. Because of this, take
1422 * semaphore in write mode here and set TTU_RMAP_LOCKED
1423 * to let lower levels know we have taken the lock.
1425 mapping
= hugetlb_page_mapping_lock_write(&src
->page
);
1426 if (unlikely(!mapping
))
1427 goto unlock_put_anon
;
1429 ttu
= TTU_RMAP_LOCKED
;
1432 try_to_migrate(src
, ttu
);
1433 page_was_mapped
= 1;
1435 if (ttu
& TTU_RMAP_LOCKED
)
1436 i_mmap_unlock_write(mapping
);
1439 if (!folio_mapped(src
))
1440 rc
= move_to_new_folio(dst
, src
, mode
);
1442 if (page_was_mapped
)
1443 remove_migration_ptes(src
,
1444 rc
== MIGRATEPAGE_SUCCESS
? dst
: src
, false);
1451 put_anon_vma(anon_vma
);
1453 if (rc
== MIGRATEPAGE_SUCCESS
) {
1454 move_hugetlb_state(src
, dst
, reason
);
1455 put_new_folio
= NULL
;
1461 if (rc
== MIGRATEPAGE_SUCCESS
)
1462 folio_putback_active_hugetlb(src
);
1463 else if (rc
!= -EAGAIN
)
1464 list_move_tail(&src
->lru
, ret
);
1467 * If migration was not successful and there's a freeing callback, use
1468 * it. Otherwise, put_page() will drop the reference grabbed during
1472 put_new_folio(dst
, private);
1474 folio_putback_active_hugetlb(dst
);
1479 static inline int try_split_folio(struct folio
*folio
, struct list_head
*split_folios
)
1484 rc
= split_folio_to_list(folio
, split_folios
);
1485 folio_unlock(folio
);
1487 list_move_tail(&folio
->lru
, split_folios
);
1492 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1493 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1495 #define NR_MAX_BATCHED_MIGRATION 512
1497 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1498 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1499 #define NR_MAX_MIGRATE_SYNC_RETRY \
1500 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1502 struct migrate_pages_stats
{
1503 int nr_succeeded
; /* Normal and large folios migrated successfully, in
1504 units of base pages */
1505 int nr_failed_pages
; /* Normal and large folios failed to be migrated, in
1506 units of base pages. Untried folios aren't counted */
1507 int nr_thp_succeeded
; /* THP migrated successfully */
1508 int nr_thp_failed
; /* THP failed to be migrated */
1509 int nr_thp_split
; /* THP split before migrating */
1510 int nr_split
; /* Large folio (include THP) split before migrating */
1514 * Returns the number of hugetlb folios that were not migrated, or an error code
1515 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1516 * any more because the list has become empty or no retryable hugetlb folios
1517 * exist any more. It is caller's responsibility to call putback_movable_pages()
1520 static int migrate_hugetlbs(struct list_head
*from
, new_folio_t get_new_folio
,
1521 free_folio_t put_new_folio
, unsigned long private,
1522 enum migrate_mode mode
, int reason
,
1523 struct migrate_pages_stats
*stats
,
1524 struct list_head
*ret_folios
)
1528 int nr_retry_pages
= 0;
1530 struct folio
*folio
, *folio2
;
1533 for (pass
= 0; pass
< NR_MAX_MIGRATE_PAGES_RETRY
&& retry
; pass
++) {
1537 list_for_each_entry_safe(folio
, folio2
, from
, lru
) {
1538 if (!folio_test_hugetlb(folio
))
1541 nr_pages
= folio_nr_pages(folio
);
1546 * Migratability of hugepages depends on architectures and
1547 * their size. This check is necessary because some callers
1548 * of hugepage migration like soft offline and memory
1549 * hotremove don't walk through page tables or check whether
1550 * the hugepage is pmd-based or not before kicking migration.
1552 if (!hugepage_migration_supported(folio_hstate(folio
))) {
1554 stats
->nr_failed_pages
+= nr_pages
;
1555 list_move_tail(&folio
->lru
, ret_folios
);
1559 rc
= unmap_and_move_huge_page(get_new_folio
,
1560 put_new_folio
, private,
1561 folio
, pass
> 2, mode
,
1562 reason
, ret_folios
);
1565 * Success: hugetlb folio will be put back
1566 * -EAGAIN: stay on the from list
1567 * -ENOMEM: stay on the from list
1568 * Other errno: put on ret_folios list
1573 * When memory is low, don't bother to try to migrate
1574 * other folios, just exit.
1576 stats
->nr_failed_pages
+= nr_pages
+ nr_retry_pages
;
1580 nr_retry_pages
+= nr_pages
;
1582 case MIGRATEPAGE_SUCCESS
:
1583 stats
->nr_succeeded
+= nr_pages
;
1587 * Permanent failure (-EBUSY, etc.):
1588 * unlike -EAGAIN case, the failed folio is
1589 * removed from migration folio list and not
1590 * retried in the next outer loop.
1593 stats
->nr_failed_pages
+= nr_pages
;
1599 * nr_failed is number of hugetlb folios failed to be migrated. After
1600 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604 stats
->nr_failed_pages
+= nr_retry_pages
;
1610 * migrate_pages_batch() first unmaps folios in the from list as many as
1611 * possible, then move the unmapped folios.
1613 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1614 * lock or bit when we have locked more than one folio. Which may cause
1615 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1616 * length of the from list must be <= 1.
1618 static int migrate_pages_batch(struct list_head
*from
,
1619 new_folio_t get_new_folio
, free_folio_t put_new_folio
,
1620 unsigned long private, enum migrate_mode mode
, int reason
,
1621 struct list_head
*ret_folios
, struct list_head
*split_folios
,
1622 struct migrate_pages_stats
*stats
, int nr_pass
)
1627 int nr_retry_pages
= 0;
1629 bool is_thp
= false;
1630 bool is_large
= false;
1631 struct folio
*folio
, *folio2
, *dst
= NULL
, *dst2
;
1632 int rc
, rc_saved
= 0, nr_pages
;
1633 LIST_HEAD(unmap_folios
);
1634 LIST_HEAD(dst_folios
);
1635 bool nosplit
= (reason
== MR_NUMA_MISPLACED
);
1637 VM_WARN_ON_ONCE(mode
!= MIGRATE_ASYNC
&&
1638 !list_empty(from
) && !list_is_singular(from
));
1640 for (pass
= 0; pass
< nr_pass
&& retry
; pass
++) {
1645 list_for_each_entry_safe(folio
, folio2
, from
, lru
) {
1646 is_large
= folio_test_large(folio
);
1647 is_thp
= is_large
&& folio_test_pmd_mappable(folio
);
1648 nr_pages
= folio_nr_pages(folio
);
1653 * Large folio migration might be unsupported or
1654 * the allocation might be failed so we should retry
1655 * on the same folio with the large folio split
1658 * Split folios are put in split_folios, and
1659 * we will migrate them after the rest of the
1660 * list is processed.
1662 if (!thp_migration_supported() && is_thp
) {
1664 stats
->nr_thp_failed
++;
1665 if (!try_split_folio(folio
, split_folios
)) {
1666 stats
->nr_thp_split
++;
1670 stats
->nr_failed_pages
+= nr_pages
;
1671 list_move_tail(&folio
->lru
, ret_folios
);
1675 rc
= migrate_folio_unmap(get_new_folio
, put_new_folio
,
1676 private, folio
, &dst
, mode
, reason
,
1680 * Success: folio will be freed
1681 * Unmap: folio will be put on unmap_folios list,
1682 * dst folio put on dst_folios list
1683 * -EAGAIN: stay on the from list
1684 * -ENOMEM: stay on the from list
1685 * Other errno: put on ret_folios list
1690 * When memory is low, don't bother to try to migrate
1691 * other folios, move unmapped folios, then exit.
1694 stats
->nr_thp_failed
+= is_thp
;
1695 /* Large folio NUMA faulting doesn't split to retry. */
1696 if (is_large
&& !nosplit
) {
1697 int ret
= try_split_folio(folio
, split_folios
);
1700 stats
->nr_thp_split
+= is_thp
;
1703 } else if (reason
== MR_LONGTERM_PIN
&&
1706 * Try again to split large folio to
1707 * mitigate the failure of longterm pinning.
1710 thp_retry
+= is_thp
;
1711 nr_retry_pages
+= nr_pages
;
1712 /* Undo duplicated failure counting. */
1714 stats
->nr_thp_failed
-= is_thp
;
1719 stats
->nr_failed_pages
+= nr_pages
+ nr_retry_pages
;
1720 /* nr_failed isn't updated for not used */
1721 stats
->nr_thp_failed
+= thp_retry
;
1723 if (list_empty(&unmap_folios
))
1729 thp_retry
+= is_thp
;
1730 nr_retry_pages
+= nr_pages
;
1732 case MIGRATEPAGE_SUCCESS
:
1733 stats
->nr_succeeded
+= nr_pages
;
1734 stats
->nr_thp_succeeded
+= is_thp
;
1736 case MIGRATEPAGE_UNMAP
:
1737 list_move_tail(&folio
->lru
, &unmap_folios
);
1738 list_add_tail(&dst
->lru
, &dst_folios
);
1742 * Permanent failure (-EBUSY, etc.):
1743 * unlike -EAGAIN case, the failed folio is
1744 * removed from migration folio list and not
1745 * retried in the next outer loop.
1748 stats
->nr_thp_failed
+= is_thp
;
1749 stats
->nr_failed_pages
+= nr_pages
;
1755 stats
->nr_thp_failed
+= thp_retry
;
1756 stats
->nr_failed_pages
+= nr_retry_pages
;
1758 /* Flush TLBs for all unmapped folios */
1759 try_to_unmap_flush();
1762 for (pass
= 0; pass
< nr_pass
&& retry
; pass
++) {
1767 dst
= list_first_entry(&dst_folios
, struct folio
, lru
);
1768 dst2
= list_next_entry(dst
, lru
);
1769 list_for_each_entry_safe(folio
, folio2
, &unmap_folios
, lru
) {
1770 is_thp
= folio_test_large(folio
) && folio_test_pmd_mappable(folio
);
1771 nr_pages
= folio_nr_pages(folio
);
1775 rc
= migrate_folio_move(put_new_folio
, private,
1777 reason
, ret_folios
);
1780 * Success: folio will be freed
1781 * -EAGAIN: stay on the unmap_folios list
1782 * Other errno: put on ret_folios list
1787 thp_retry
+= is_thp
;
1788 nr_retry_pages
+= nr_pages
;
1790 case MIGRATEPAGE_SUCCESS
:
1791 stats
->nr_succeeded
+= nr_pages
;
1792 stats
->nr_thp_succeeded
+= is_thp
;
1796 stats
->nr_thp_failed
+= is_thp
;
1797 stats
->nr_failed_pages
+= nr_pages
;
1801 dst2
= list_next_entry(dst
, lru
);
1805 stats
->nr_thp_failed
+= thp_retry
;
1806 stats
->nr_failed_pages
+= nr_retry_pages
;
1808 rc
= rc_saved
? : nr_failed
;
1810 /* Cleanup remaining folios */
1811 dst
= list_first_entry(&dst_folios
, struct folio
, lru
);
1812 dst2
= list_next_entry(dst
, lru
);
1813 list_for_each_entry_safe(folio
, folio2
, &unmap_folios
, lru
) {
1814 int old_page_state
= 0;
1815 struct anon_vma
*anon_vma
= NULL
;
1817 __migrate_folio_extract(dst
, &old_page_state
, &anon_vma
);
1818 migrate_folio_undo_src(folio
, old_page_state
& PAGE_WAS_MAPPED
,
1819 anon_vma
, true, ret_folios
);
1820 list_del(&dst
->lru
);
1821 migrate_folio_undo_dst(dst
, true, put_new_folio
, private);
1823 dst2
= list_next_entry(dst
, lru
);
1829 static int migrate_pages_sync(struct list_head
*from
, new_folio_t get_new_folio
,
1830 free_folio_t put_new_folio
, unsigned long private,
1831 enum migrate_mode mode
, int reason
,
1832 struct list_head
*ret_folios
, struct list_head
*split_folios
,
1833 struct migrate_pages_stats
*stats
)
1835 int rc
, nr_failed
= 0;
1837 struct migrate_pages_stats astats
;
1839 memset(&astats
, 0, sizeof(astats
));
1840 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1841 rc
= migrate_pages_batch(from
, get_new_folio
, put_new_folio
, private, MIGRATE_ASYNC
,
1842 reason
, &folios
, split_folios
, &astats
,
1843 NR_MAX_MIGRATE_ASYNC_RETRY
);
1844 stats
->nr_succeeded
+= astats
.nr_succeeded
;
1845 stats
->nr_thp_succeeded
+= astats
.nr_thp_succeeded
;
1846 stats
->nr_thp_split
+= astats
.nr_thp_split
;
1847 stats
->nr_split
+= astats
.nr_split
;
1849 stats
->nr_failed_pages
+= astats
.nr_failed_pages
;
1850 stats
->nr_thp_failed
+= astats
.nr_thp_failed
;
1851 list_splice_tail(&folios
, ret_folios
);
1854 stats
->nr_thp_failed
+= astats
.nr_thp_split
;
1856 * Do not count rc, as pages will be retried below.
1857 * Count nr_split only, since it includes nr_thp_split.
1859 nr_failed
+= astats
.nr_split
;
1861 * Fall back to migrate all failed folios one by one synchronously. All
1862 * failed folios except split THPs will be retried, so their failure
1865 list_splice_tail_init(&folios
, from
);
1866 while (!list_empty(from
)) {
1867 list_move(from
->next
, &folios
);
1868 rc
= migrate_pages_batch(&folios
, get_new_folio
, put_new_folio
,
1869 private, mode
, reason
, ret_folios
,
1870 split_folios
, stats
, NR_MAX_MIGRATE_SYNC_RETRY
);
1871 list_splice_tail_init(&folios
, ret_folios
);
1881 * migrate_pages - migrate the folios specified in a list, to the free folios
1882 * supplied as the target for the page migration
1884 * @from: The list of folios to be migrated.
1885 * @get_new_folio: The function used to allocate free folios to be used
1886 * as the target of the folio migration.
1887 * @put_new_folio: The function used to free target folios if migration
1888 * fails, or NULL if no special handling is necessary.
1889 * @private: Private data to be passed on to get_new_folio()
1890 * @mode: The migration mode that specifies the constraints for
1891 * folio migration, if any.
1892 * @reason: The reason for folio migration.
1893 * @ret_succeeded: Set to the number of folios migrated successfully if
1894 * the caller passes a non-NULL pointer.
1896 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1897 * are movable any more because the list has become empty or no retryable folios
1898 * exist any more. It is caller's responsibility to call putback_movable_pages()
1901 * Returns the number of {normal folio, large folio, hugetlb} that were not
1902 * migrated, or an error code. The number of large folio splits will be
1903 * considered as the number of non-migrated large folio, no matter how many
1904 * split folios of the large folio are migrated successfully.
1906 int migrate_pages(struct list_head
*from
, new_folio_t get_new_folio
,
1907 free_folio_t put_new_folio
, unsigned long private,
1908 enum migrate_mode mode
, int reason
, unsigned int *ret_succeeded
)
1912 struct folio
*folio
, *folio2
;
1914 LIST_HEAD(ret_folios
);
1915 LIST_HEAD(split_folios
);
1916 struct migrate_pages_stats stats
;
1918 trace_mm_migrate_pages_start(mode
, reason
);
1920 memset(&stats
, 0, sizeof(stats
));
1922 rc_gather
= migrate_hugetlbs(from
, get_new_folio
, put_new_folio
, private,
1923 mode
, reason
, &stats
, &ret_folios
);
1929 list_for_each_entry_safe(folio
, folio2
, from
, lru
) {
1930 /* Retried hugetlb folios will be kept in list */
1931 if (folio_test_hugetlb(folio
)) {
1932 list_move_tail(&folio
->lru
, &ret_folios
);
1936 nr_pages
+= folio_nr_pages(folio
);
1937 if (nr_pages
>= NR_MAX_BATCHED_MIGRATION
)
1940 if (nr_pages
>= NR_MAX_BATCHED_MIGRATION
)
1941 list_cut_before(&folios
, from
, &folio2
->lru
);
1943 list_splice_init(from
, &folios
);
1944 if (mode
== MIGRATE_ASYNC
)
1945 rc
= migrate_pages_batch(&folios
, get_new_folio
, put_new_folio
,
1946 private, mode
, reason
, &ret_folios
,
1947 &split_folios
, &stats
,
1948 NR_MAX_MIGRATE_PAGES_RETRY
);
1950 rc
= migrate_pages_sync(&folios
, get_new_folio
, put_new_folio
,
1951 private, mode
, reason
, &ret_folios
,
1952 &split_folios
, &stats
);
1953 list_splice_tail_init(&folios
, &ret_folios
);
1956 list_splice_tail(&split_folios
, &ret_folios
);
1959 if (!list_empty(&split_folios
)) {
1961 * Failure isn't counted since all split folios of a large folio
1962 * is counted as 1 failure already. And, we only try to migrate
1963 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1965 migrate_pages_batch(&split_folios
, get_new_folio
,
1966 put_new_folio
, private, MIGRATE_ASYNC
, reason
,
1967 &ret_folios
, NULL
, &stats
, 1);
1968 list_splice_tail_init(&split_folios
, &ret_folios
);
1971 if (!list_empty(from
))
1975 * Put the permanent failure folio back to migration list, they
1976 * will be put back to the right list by the caller.
1978 list_splice(&ret_folios
, from
);
1981 * Return 0 in case all split folios of fail-to-migrate large folios
1982 * are migrated successfully.
1984 if (list_empty(from
))
1987 count_vm_events(PGMIGRATE_SUCCESS
, stats
.nr_succeeded
);
1988 count_vm_events(PGMIGRATE_FAIL
, stats
.nr_failed_pages
);
1989 count_vm_events(THP_MIGRATION_SUCCESS
, stats
.nr_thp_succeeded
);
1990 count_vm_events(THP_MIGRATION_FAIL
, stats
.nr_thp_failed
);
1991 count_vm_events(THP_MIGRATION_SPLIT
, stats
.nr_thp_split
);
1992 trace_mm_migrate_pages(stats
.nr_succeeded
, stats
.nr_failed_pages
,
1993 stats
.nr_thp_succeeded
, stats
.nr_thp_failed
,
1994 stats
.nr_thp_split
, stats
.nr_split
, mode
,
1998 *ret_succeeded
= stats
.nr_succeeded
;
2003 struct folio
*alloc_migration_target(struct folio
*src
, unsigned long private)
2005 struct migration_target_control
*mtc
;
2007 unsigned int order
= 0;
2011 mtc
= (struct migration_target_control
*)private;
2012 gfp_mask
= mtc
->gfp_mask
;
2014 if (nid
== NUMA_NO_NODE
)
2015 nid
= folio_nid(src
);
2017 if (folio_test_hugetlb(src
)) {
2018 struct hstate
*h
= folio_hstate(src
);
2020 gfp_mask
= htlb_modify_alloc_mask(h
, gfp_mask
);
2021 return alloc_hugetlb_folio_nodemask(h
, nid
,
2022 mtc
->nmask
, gfp_mask
);
2025 if (folio_test_large(src
)) {
2027 * clear __GFP_RECLAIM to make the migration callback
2028 * consistent with regular THP allocations.
2030 gfp_mask
&= ~__GFP_RECLAIM
;
2031 gfp_mask
|= GFP_TRANSHUGE
;
2032 order
= folio_order(src
);
2034 zidx
= zone_idx(folio_zone(src
));
2035 if (is_highmem_idx(zidx
) || zidx
== ZONE_MOVABLE
)
2036 gfp_mask
|= __GFP_HIGHMEM
;
2038 return __folio_alloc(gfp_mask
, order
, nid
, mtc
->nmask
);
2043 static int store_status(int __user
*status
, int start
, int value
, int nr
)
2046 if (put_user(value
, status
+ start
))
2054 static int do_move_pages_to_node(struct list_head
*pagelist
, int node
)
2057 struct migration_target_control mtc
= {
2059 .gfp_mask
= GFP_HIGHUSER_MOVABLE
| __GFP_THISNODE
,
2062 err
= migrate_pages(pagelist
, alloc_migration_target
, NULL
,
2063 (unsigned long)&mtc
, MIGRATE_SYNC
, MR_SYSCALL
, NULL
);
2065 putback_movable_pages(pagelist
);
2070 * Resolves the given address to a struct page, isolates it from the LRU and
2071 * puts it to the given pagelist.
2073 * errno - if the page cannot be found/isolated
2074 * 0 - when it doesn't have to be migrated because it is already on the
2076 * 1 - when it has been queued
2078 static int add_page_for_migration(struct mm_struct
*mm
, const void __user
*p
,
2079 int node
, struct list_head
*pagelist
, bool migrate_all
)
2081 struct vm_area_struct
*vma
;
2084 struct folio
*folio
;
2088 addr
= (unsigned long)untagged_addr_remote(mm
, p
);
2091 vma
= vma_lookup(mm
, addr
);
2092 if (!vma
|| !vma_migratable(vma
))
2095 /* FOLL_DUMP to ignore special (like zero) pages */
2096 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
2098 err
= PTR_ERR(page
);
2106 folio
= page_folio(page
);
2107 if (folio_is_zone_device(folio
))
2111 if (folio_nid(folio
) == node
)
2115 if (page_mapcount(page
) > 1 && !migrate_all
)
2119 if (folio_test_hugetlb(folio
)) {
2120 if (isolate_hugetlb(folio
, pagelist
))
2123 if (!folio_isolate_lru(folio
))
2127 list_add_tail(&folio
->lru
, pagelist
);
2128 node_stat_mod_folio(folio
,
2129 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
2130 folio_nr_pages(folio
));
2134 * Either remove the duplicate refcount from folio_isolate_lru()
2135 * or drop the folio ref if it was not isolated.
2139 mmap_read_unlock(mm
);
2143 static int move_pages_and_store_status(int node
,
2144 struct list_head
*pagelist
, int __user
*status
,
2145 int start
, int i
, unsigned long nr_pages
)
2149 if (list_empty(pagelist
))
2152 err
= do_move_pages_to_node(pagelist
, node
);
2155 * Positive err means the number of failed
2156 * pages to migrate. Since we are going to
2157 * abort and return the number of non-migrated
2158 * pages, so need to include the rest of the
2159 * nr_pages that have not been attempted as
2163 err
+= nr_pages
- i
;
2166 return store_status(status
, start
, node
, i
- start
);
2170 * Migrate an array of page address onto an array of nodes and fill
2171 * the corresponding array of status.
2173 static int do_pages_move(struct mm_struct
*mm
, nodemask_t task_nodes
,
2174 unsigned long nr_pages
,
2175 const void __user
* __user
*pages
,
2176 const int __user
*nodes
,
2177 int __user
*status
, int flags
)
2179 compat_uptr_t __user
*compat_pages
= (void __user
*)pages
;
2180 int current_node
= NUMA_NO_NODE
;
2181 LIST_HEAD(pagelist
);
2185 lru_cache_disable();
2187 for (i
= start
= 0; i
< nr_pages
; i
++) {
2188 const void __user
*p
;
2192 if (in_compat_syscall()) {
2195 if (get_user(cp
, compat_pages
+ i
))
2200 if (get_user(p
, pages
+ i
))
2203 if (get_user(node
, nodes
+ i
))
2207 if (node
< 0 || node
>= MAX_NUMNODES
)
2209 if (!node_state(node
, N_MEMORY
))
2213 if (!node_isset(node
, task_nodes
))
2216 if (current_node
== NUMA_NO_NODE
) {
2217 current_node
= node
;
2219 } else if (node
!= current_node
) {
2220 err
= move_pages_and_store_status(current_node
,
2221 &pagelist
, status
, start
, i
, nr_pages
);
2225 current_node
= node
;
2229 * Errors in the page lookup or isolation are not fatal and we simply
2230 * report them via status
2232 err
= add_page_for_migration(mm
, p
, current_node
, &pagelist
,
2233 flags
& MPOL_MF_MOVE_ALL
);
2236 /* The page is successfully queued for migration */
2241 * The move_pages() man page does not have an -EEXIST choice, so
2242 * use -EFAULT instead.
2248 * If the page is already on the target node (!err), store the
2249 * node, otherwise, store the err.
2251 err
= store_status(status
, i
, err
? : current_node
, 1);
2255 err
= move_pages_and_store_status(current_node
, &pagelist
,
2256 status
, start
, i
, nr_pages
);
2258 /* We have accounted for page i */
2263 current_node
= NUMA_NO_NODE
;
2266 /* Make sure we do not overwrite the existing error */
2267 err1
= move_pages_and_store_status(current_node
, &pagelist
,
2268 status
, start
, i
, nr_pages
);
2277 * Determine the nodes of an array of pages and store it in an array of status.
2279 static void do_pages_stat_array(struct mm_struct
*mm
, unsigned long nr_pages
,
2280 const void __user
**pages
, int *status
)
2286 for (i
= 0; i
< nr_pages
; i
++) {
2287 unsigned long addr
= (unsigned long)(*pages
);
2288 struct vm_area_struct
*vma
;
2292 vma
= vma_lookup(mm
, addr
);
2296 /* FOLL_DUMP to ignore special (like zero) pages */
2297 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
2299 err
= PTR_ERR(page
);
2307 if (!is_zone_device_page(page
))
2308 err
= page_to_nid(page
);
2318 mmap_read_unlock(mm
);
2321 static int get_compat_pages_array(const void __user
*chunk_pages
[],
2322 const void __user
* __user
*pages
,
2323 unsigned long chunk_nr
)
2325 compat_uptr_t __user
*pages32
= (compat_uptr_t __user
*)pages
;
2329 for (i
= 0; i
< chunk_nr
; i
++) {
2330 if (get_user(p
, pages32
+ i
))
2332 chunk_pages
[i
] = compat_ptr(p
);
2339 * Determine the nodes of a user array of pages and store it in
2340 * a user array of status.
2342 static int do_pages_stat(struct mm_struct
*mm
, unsigned long nr_pages
,
2343 const void __user
* __user
*pages
,
2346 #define DO_PAGES_STAT_CHUNK_NR 16UL
2347 const void __user
*chunk_pages
[DO_PAGES_STAT_CHUNK_NR
];
2348 int chunk_status
[DO_PAGES_STAT_CHUNK_NR
];
2351 unsigned long chunk_nr
= min(nr_pages
, DO_PAGES_STAT_CHUNK_NR
);
2353 if (in_compat_syscall()) {
2354 if (get_compat_pages_array(chunk_pages
, pages
,
2358 if (copy_from_user(chunk_pages
, pages
,
2359 chunk_nr
* sizeof(*chunk_pages
)))
2363 do_pages_stat_array(mm
, chunk_nr
, chunk_pages
, chunk_status
);
2365 if (copy_to_user(status
, chunk_status
, chunk_nr
* sizeof(*status
)))
2370 nr_pages
-= chunk_nr
;
2372 return nr_pages
? -EFAULT
: 0;
2375 static struct mm_struct
*find_mm_struct(pid_t pid
, nodemask_t
*mem_nodes
)
2377 struct task_struct
*task
;
2378 struct mm_struct
*mm
;
2381 * There is no need to check if current process has the right to modify
2382 * the specified process when they are same.
2386 *mem_nodes
= cpuset_mems_allowed(current
);
2390 /* Find the mm_struct */
2392 task
= find_task_by_vpid(pid
);
2395 return ERR_PTR(-ESRCH
);
2397 get_task_struct(task
);
2400 * Check if this process has the right to modify the specified
2401 * process. Use the regular "ptrace_may_access()" checks.
2403 if (!ptrace_may_access(task
, PTRACE_MODE_READ_REALCREDS
)) {
2405 mm
= ERR_PTR(-EPERM
);
2410 mm
= ERR_PTR(security_task_movememory(task
));
2413 *mem_nodes
= cpuset_mems_allowed(task
);
2414 mm
= get_task_mm(task
);
2416 put_task_struct(task
);
2418 mm
= ERR_PTR(-EINVAL
);
2423 * Move a list of pages in the address space of the currently executing
2426 static int kernel_move_pages(pid_t pid
, unsigned long nr_pages
,
2427 const void __user
* __user
*pages
,
2428 const int __user
*nodes
,
2429 int __user
*status
, int flags
)
2431 struct mm_struct
*mm
;
2433 nodemask_t task_nodes
;
2436 if (flags
& ~(MPOL_MF_MOVE
|MPOL_MF_MOVE_ALL
))
2439 if ((flags
& MPOL_MF_MOVE_ALL
) && !capable(CAP_SYS_NICE
))
2442 mm
= find_mm_struct(pid
, &task_nodes
);
2447 err
= do_pages_move(mm
, task_nodes
, nr_pages
, pages
,
2448 nodes
, status
, flags
);
2450 err
= do_pages_stat(mm
, nr_pages
, pages
, status
);
2456 SYSCALL_DEFINE6(move_pages
, pid_t
, pid
, unsigned long, nr_pages
,
2457 const void __user
* __user
*, pages
,
2458 const int __user
*, nodes
,
2459 int __user
*, status
, int, flags
)
2461 return kernel_move_pages(pid
, nr_pages
, pages
, nodes
, status
, flags
);
2464 #ifdef CONFIG_NUMA_BALANCING
2466 * Returns true if this is a safe migration target node for misplaced NUMA
2467 * pages. Currently it only checks the watermarks which is crude.
2469 static bool migrate_balanced_pgdat(struct pglist_data
*pgdat
,
2470 unsigned long nr_migrate_pages
)
2474 for (z
= pgdat
->nr_zones
- 1; z
>= 0; z
--) {
2475 struct zone
*zone
= pgdat
->node_zones
+ z
;
2477 if (!managed_zone(zone
))
2480 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2481 if (!zone_watermark_ok(zone
, 0,
2482 high_wmark_pages(zone
) +
2491 static struct folio
*alloc_misplaced_dst_folio(struct folio
*src
,
2494 int nid
= (int) data
;
2495 int order
= folio_order(src
);
2496 gfp_t gfp
= __GFP_THISNODE
;
2499 gfp
|= GFP_TRANSHUGE_LIGHT
;
2501 gfp
|= GFP_HIGHUSER_MOVABLE
| __GFP_NOMEMALLOC
| __GFP_NORETRY
|
2503 gfp
&= ~__GFP_RECLAIM
;
2505 return __folio_alloc_node(gfp
, order
, nid
);
2508 static int numamigrate_isolate_folio(pg_data_t
*pgdat
, struct folio
*folio
)
2510 int nr_pages
= folio_nr_pages(folio
);
2512 /* Avoid migrating to a node that is nearly full */
2513 if (!migrate_balanced_pgdat(pgdat
, nr_pages
)) {
2516 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
))
2518 for (z
= pgdat
->nr_zones
- 1; z
>= 0; z
--) {
2519 if (managed_zone(pgdat
->node_zones
+ z
))
2522 wakeup_kswapd(pgdat
->node_zones
+ z
, 0,
2523 folio_order(folio
), ZONE_MOVABLE
);
2527 if (!folio_isolate_lru(folio
))
2530 node_stat_mod_folio(folio
, NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
2534 * Isolating the folio has taken another reference, so the
2535 * caller's reference can be safely dropped without the folio
2536 * disappearing underneath us during migration.
2543 * Attempt to migrate a misplaced folio to the specified destination
2544 * node. Caller is expected to have an elevated reference count on
2545 * the folio that will be dropped by this function before returning.
2547 int migrate_misplaced_folio(struct folio
*folio
, struct vm_area_struct
*vma
,
2550 pg_data_t
*pgdat
= NODE_DATA(node
);
2553 unsigned int nr_succeeded
;
2554 LIST_HEAD(migratepages
);
2555 int nr_pages
= folio_nr_pages(folio
);
2558 * Don't migrate file folios that are mapped in multiple processes
2559 * with execute permissions as they are probably shared libraries.
2560 * To check if the folio is shared, ideally we want to make sure
2561 * every page is mapped to the same process. Doing that is very
2562 * expensive, so check the estimated mapcount of the folio instead.
2564 if (folio_estimated_sharers(folio
) != 1 && folio_is_file_lru(folio
) &&
2565 (vma
->vm_flags
& VM_EXEC
))
2569 * Also do not migrate dirty folios as not all filesystems can move
2570 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2572 if (folio_is_file_lru(folio
) && folio_test_dirty(folio
))
2575 isolated
= numamigrate_isolate_folio(pgdat
, folio
);
2579 list_add(&folio
->lru
, &migratepages
);
2580 nr_remaining
= migrate_pages(&migratepages
, alloc_misplaced_dst_folio
,
2581 NULL
, node
, MIGRATE_ASYNC
,
2582 MR_NUMA_MISPLACED
, &nr_succeeded
);
2584 if (!list_empty(&migratepages
)) {
2585 list_del(&folio
->lru
);
2586 node_stat_mod_folio(folio
, NR_ISOLATED_ANON
+
2587 folio_is_file_lru(folio
), -nr_pages
);
2588 folio_putback_lru(folio
);
2593 count_vm_numa_events(NUMA_PAGE_MIGRATE
, nr_succeeded
);
2594 if (!node_is_toptier(folio_nid(folio
)) && node_is_toptier(node
))
2595 mod_node_page_state(pgdat
, PGPROMOTE_SUCCESS
,
2598 BUG_ON(!list_empty(&migratepages
));
2605 #endif /* CONFIG_NUMA_BALANCING */
2606 #endif /* CONFIG_NUMA */