1 // SPDX-License-Identifier: GPL-2.0
3 * Device Memory Migration functionality.
5 * Originally written by Jérôme Glisse.
7 #include <linux/export.h>
8 #include <linux/memremap.h>
9 #include <linux/migrate.h>
11 #include <linux/mm_inline.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/oom.h>
14 #include <linux/pagewalk.h>
15 #include <linux/rmap.h>
16 #include <linux/swapops.h>
17 #include <asm/tlbflush.h>
20 static int migrate_vma_collect_skip(unsigned long start
,
24 struct migrate_vma
*migrate
= walk
->private;
27 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
28 migrate
->dst
[migrate
->npages
] = 0;
29 migrate
->src
[migrate
->npages
++] = 0;
35 static int migrate_vma_collect_hole(unsigned long start
,
37 __always_unused
int depth
,
40 struct migrate_vma
*migrate
= walk
->private;
43 /* Only allow populating anonymous memory. */
44 if (!vma_is_anonymous(walk
->vma
))
45 return migrate_vma_collect_skip(start
, end
, walk
);
47 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
48 migrate
->src
[migrate
->npages
] = MIGRATE_PFN_MIGRATE
;
49 migrate
->dst
[migrate
->npages
] = 0;
57 static int migrate_vma_collect_pmd(pmd_t
*pmdp
,
62 struct migrate_vma
*migrate
= walk
->private;
63 struct vm_area_struct
*vma
= walk
->vma
;
64 struct mm_struct
*mm
= vma
->vm_mm
;
65 unsigned long addr
= start
, unmapped
= 0;
71 return migrate_vma_collect_hole(start
, end
, -1, walk
);
73 if (pmd_trans_huge(*pmdp
)) {
76 ptl
= pmd_lock(mm
, pmdp
);
77 if (unlikely(!pmd_trans_huge(*pmdp
))) {
82 page
= pmd_page(*pmdp
);
83 if (is_huge_zero_page(page
)) {
85 split_huge_pmd(vma
, pmdp
, addr
);
91 if (unlikely(!trylock_page(page
)))
92 return migrate_vma_collect_skip(start
, end
,
94 ret
= split_huge_page(page
);
98 return migrate_vma_collect_skip(start
, end
,
103 ptep
= pte_offset_map_lock(mm
, pmdp
, addr
, &ptl
);
106 arch_enter_lazy_mmu_mode();
108 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++) {
109 unsigned long mpfn
= 0, pfn
;
114 pte
= ptep_get(ptep
);
117 if (vma_is_anonymous(vma
)) {
118 mpfn
= MIGRATE_PFN_MIGRATE
;
124 if (!pte_present(pte
)) {
126 * Only care about unaddressable device page special
127 * page table entry. Other special swap entries are not
128 * migratable, and we ignore regular swapped page.
130 entry
= pte_to_swp_entry(pte
);
131 if (!is_device_private_entry(entry
))
134 page
= pfn_swap_entry_to_page(entry
);
135 if (!(migrate
->flags
&
136 MIGRATE_VMA_SELECT_DEVICE_PRIVATE
) ||
137 page
->pgmap
->owner
!= migrate
->pgmap_owner
)
140 mpfn
= migrate_pfn(page_to_pfn(page
)) |
142 if (is_writable_device_private_entry(entry
))
143 mpfn
|= MIGRATE_PFN_WRITE
;
146 if (is_zero_pfn(pfn
) &&
147 (migrate
->flags
& MIGRATE_VMA_SELECT_SYSTEM
)) {
148 mpfn
= MIGRATE_PFN_MIGRATE
;
152 page
= vm_normal_page(migrate
->vma
, addr
, pte
);
153 if (page
&& !is_zone_device_page(page
) &&
154 !(migrate
->flags
& MIGRATE_VMA_SELECT_SYSTEM
))
156 else if (page
&& is_device_coherent_page(page
) &&
157 (!(migrate
->flags
& MIGRATE_VMA_SELECT_DEVICE_COHERENT
) ||
158 page
->pgmap
->owner
!= migrate
->pgmap_owner
))
160 mpfn
= migrate_pfn(pfn
) | MIGRATE_PFN_MIGRATE
;
161 mpfn
|= pte_write(pte
) ? MIGRATE_PFN_WRITE
: 0;
164 /* FIXME support THP */
165 if (!page
|| !page
->mapping
|| PageTransCompound(page
)) {
171 * By getting a reference on the page we pin it and that blocks
172 * any kind of migration. Side effect is that it "freezes" the
175 * We drop this reference after isolating the page from the lru
176 * for non device page (device page are not on the lru and thus
177 * can't be dropped from it).
182 * We rely on trylock_page() to avoid deadlock between
183 * concurrent migrations where each is waiting on the others
184 * page lock. If we can't immediately lock the page we fail this
185 * migration as it is only best effort anyway.
187 * If we can lock the page it's safe to set up a migration entry
188 * now. In the common case where the page is mapped once in a
189 * single process setting up the migration entry now is an
190 * optimisation to avoid walking the rmap later with
193 if (trylock_page(page
)) {
197 flush_cache_page(vma
, addr
, pte_pfn(pte
));
198 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
199 if (anon_exclusive
) {
200 pte
= ptep_clear_flush(vma
, addr
, ptep
);
202 if (page_try_share_anon_rmap(page
)) {
203 set_pte_at(mm
, addr
, ptep
, pte
);
210 pte
= ptep_get_and_clear(mm
, addr
, ptep
);
215 /* Set the dirty flag on the folio now the pte is gone. */
217 folio_mark_dirty(page_folio(page
));
219 /* Setup special migration page table entry */
220 if (mpfn
& MIGRATE_PFN_WRITE
)
221 entry
= make_writable_migration_entry(
223 else if (anon_exclusive
)
224 entry
= make_readable_exclusive_migration_entry(
227 entry
= make_readable_migration_entry(
229 if (pte_present(pte
)) {
231 entry
= make_migration_entry_young(entry
);
233 entry
= make_migration_entry_dirty(entry
);
235 swp_pte
= swp_entry_to_pte(entry
);
236 if (pte_present(pte
)) {
237 if (pte_soft_dirty(pte
))
238 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
239 if (pte_uffd_wp(pte
))
240 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
242 if (pte_swp_soft_dirty(pte
))
243 swp_pte
= pte_swp_mksoft_dirty(swp_pte
);
244 if (pte_swp_uffd_wp(pte
))
245 swp_pte
= pte_swp_mkuffd_wp(swp_pte
);
247 set_pte_at(mm
, addr
, ptep
, swp_pte
);
250 * This is like regular unmap: we remove the rmap and
251 * drop page refcount. Page won't be freed, as we took
252 * a reference just above.
254 page_remove_rmap(page
, vma
, false);
257 if (pte_present(pte
))
265 migrate
->dst
[migrate
->npages
] = 0;
266 migrate
->src
[migrate
->npages
++] = mpfn
;
269 /* Only flush the TLB if we actually modified any entries */
271 flush_tlb_range(walk
->vma
, start
, end
);
273 arch_leave_lazy_mmu_mode();
274 pte_unmap_unlock(ptep
- 1, ptl
);
279 static const struct mm_walk_ops migrate_vma_walk_ops
= {
280 .pmd_entry
= migrate_vma_collect_pmd
,
281 .pte_hole
= migrate_vma_collect_hole
,
282 .walk_lock
= PGWALK_RDLOCK
,
286 * migrate_vma_collect() - collect pages over a range of virtual addresses
287 * @migrate: migrate struct containing all migration information
289 * This will walk the CPU page table. For each virtual address backed by a
290 * valid page, it updates the src array and takes a reference on the page, in
291 * order to pin the page until we lock it and unmap it.
293 static void migrate_vma_collect(struct migrate_vma
*migrate
)
295 struct mmu_notifier_range range
;
298 * Note that the pgmap_owner is passed to the mmu notifier callback so
299 * that the registered device driver can skip invalidating device
300 * private page mappings that won't be migrated.
302 mmu_notifier_range_init_owner(&range
, MMU_NOTIFY_MIGRATE
, 0,
303 migrate
->vma
->vm_mm
, migrate
->start
, migrate
->end
,
304 migrate
->pgmap_owner
);
305 mmu_notifier_invalidate_range_start(&range
);
307 walk_page_range(migrate
->vma
->vm_mm
, migrate
->start
, migrate
->end
,
308 &migrate_vma_walk_ops
, migrate
);
310 mmu_notifier_invalidate_range_end(&range
);
311 migrate
->end
= migrate
->start
+ (migrate
->npages
<< PAGE_SHIFT
);
315 * migrate_vma_check_page() - check if page is pinned or not
316 * @page: struct page to check
318 * Pinned pages cannot be migrated. This is the same test as in
319 * folio_migrate_mapping(), except that here we allow migration of a
322 static bool migrate_vma_check_page(struct page
*page
, struct page
*fault_page
)
325 * One extra ref because caller holds an extra reference, either from
326 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
329 int extra
= 1 + (page
== fault_page
);
332 * FIXME support THP (transparent huge page), it is bit more complex to
333 * check them than regular pages, because they can be mapped with a pmd
334 * or with a pte (split pte mapping).
336 if (PageCompound(page
))
339 /* Page from ZONE_DEVICE have one extra reference */
340 if (is_zone_device_page(page
))
343 /* For file back page */
344 if (page_mapping(page
))
345 extra
+= 1 + page_has_private(page
);
347 if ((page_count(page
) - extra
) > page_mapcount(page
))
354 * Unmaps pages for migration. Returns number of source pfns marked as
357 static unsigned long migrate_device_unmap(unsigned long *src_pfns
,
358 unsigned long npages
,
359 struct page
*fault_page
)
361 unsigned long i
, restore
= 0;
362 bool allow_drain
= true;
363 unsigned long unmapped
= 0;
367 for (i
= 0; i
< npages
; i
++) {
368 struct page
*page
= migrate_pfn_to_page(src_pfns
[i
]);
372 if (src_pfns
[i
] & MIGRATE_PFN_MIGRATE
)
377 /* ZONE_DEVICE pages are not on LRU */
378 if (!is_zone_device_page(page
)) {
379 if (!PageLRU(page
) && allow_drain
) {
380 /* Drain CPU's lru cache */
385 if (!isolate_lru_page(page
)) {
386 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
391 /* Drop the reference we took in collect */
395 folio
= page_folio(page
);
396 if (folio_mapped(folio
))
397 try_to_migrate(folio
, 0);
399 if (page_mapped(page
) ||
400 !migrate_vma_check_page(page
, fault_page
)) {
401 if (!is_zone_device_page(page
)) {
403 putback_lru_page(page
);
406 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
414 for (i
= 0; i
< npages
&& restore
; i
++) {
415 struct page
*page
= migrate_pfn_to_page(src_pfns
[i
]);
418 if (!page
|| (src_pfns
[i
] & MIGRATE_PFN_MIGRATE
))
421 folio
= page_folio(page
);
422 remove_migration_ptes(folio
, folio
, false);
434 * migrate_vma_unmap() - replace page mapping with special migration pte entry
435 * @migrate: migrate struct containing all migration information
437 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
438 * special migration pte entry and check if it has been pinned. Pinned pages are
439 * restored because we cannot migrate them.
441 * This is the last step before we call the device driver callback to allocate
442 * destination memory and copy contents of original page over to new page.
444 static void migrate_vma_unmap(struct migrate_vma
*migrate
)
446 migrate
->cpages
= migrate_device_unmap(migrate
->src
, migrate
->npages
,
447 migrate
->fault_page
);
451 * migrate_vma_setup() - prepare to migrate a range of memory
452 * @args: contains the vma, start, and pfns arrays for the migration
454 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
457 * Prepare to migrate a range of memory virtual address range by collecting all
458 * the pages backing each virtual address in the range, saving them inside the
459 * src array. Then lock those pages and unmap them. Once the pages are locked
460 * and unmapped, check whether each page is pinned or not. Pages that aren't
461 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
462 * corresponding src array entry. Then restores any pages that are pinned, by
463 * remapping and unlocking those pages.
465 * The caller should then allocate destination memory and copy source memory to
466 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
467 * flag set). Once these are allocated and copied, the caller must update each
468 * corresponding entry in the dst array with the pfn value of the destination
469 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
472 * Note that the caller does not have to migrate all the pages that are marked
473 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
474 * device memory to system memory. If the caller cannot migrate a device page
475 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
476 * consequences for the userspace process, so it must be avoided if at all
479 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
480 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
481 * allowing the caller to allocate device memory for those unbacked virtual
482 * addresses. For this the caller simply has to allocate device memory and
483 * properly set the destination entry like for regular migration. Note that
484 * this can still fail, and thus inside the device driver you must check if the
485 * migration was successful for those entries after calling migrate_vma_pages(),
486 * just like for regular migration.
488 * After that, the callers must call migrate_vma_pages() to go over each entry
489 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
490 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
491 * then migrate_vma_pages() to migrate struct page information from the source
492 * struct page to the destination struct page. If it fails to migrate the
493 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
496 * At this point all successfully migrated pages have an entry in the src
497 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
498 * array entry with MIGRATE_PFN_VALID flag set.
500 * Once migrate_vma_pages() returns the caller may inspect which pages were
501 * successfully migrated, and which were not. Successfully migrated pages will
502 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
504 * It is safe to update device page table after migrate_vma_pages() because
505 * both destination and source page are still locked, and the mmap_lock is held
506 * in read mode (hence no one can unmap the range being migrated).
508 * Once the caller is done cleaning up things and updating its page table (if it
509 * chose to do so, this is not an obligation) it finally calls
510 * migrate_vma_finalize() to update the CPU page table to point to new pages
511 * for successfully migrated pages or otherwise restore the CPU page table to
512 * point to the original source pages.
514 int migrate_vma_setup(struct migrate_vma
*args
)
516 long nr_pages
= (args
->end
- args
->start
) >> PAGE_SHIFT
;
518 args
->start
&= PAGE_MASK
;
519 args
->end
&= PAGE_MASK
;
520 if (!args
->vma
|| is_vm_hugetlb_page(args
->vma
) ||
521 (args
->vma
->vm_flags
& VM_SPECIAL
) || vma_is_dax(args
->vma
))
525 if (args
->start
< args
->vma
->vm_start
||
526 args
->start
>= args
->vma
->vm_end
)
528 if (args
->end
<= args
->vma
->vm_start
|| args
->end
> args
->vma
->vm_end
)
530 if (!args
->src
|| !args
->dst
)
532 if (args
->fault_page
&& !is_device_private_page(args
->fault_page
))
535 memset(args
->src
, 0, sizeof(*args
->src
) * nr_pages
);
539 migrate_vma_collect(args
);
542 migrate_vma_unmap(args
);
545 * At this point pages are locked and unmapped, and thus they have
546 * stable content and can safely be copied to destination memory that
547 * is allocated by the drivers.
552 EXPORT_SYMBOL(migrate_vma_setup
);
555 * This code closely matches the code in:
556 * __handle_mm_fault()
558 * do_anonymous_page()
559 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
560 * private or coherent page.
562 static void migrate_vma_insert_page(struct migrate_vma
*migrate
,
567 struct vm_area_struct
*vma
= migrate
->vma
;
568 struct mm_struct
*mm
= vma
->vm_mm
;
579 /* Only allow populating anonymous memory */
580 if (!vma_is_anonymous(vma
))
583 pgdp
= pgd_offset(mm
, addr
);
584 p4dp
= p4d_alloc(mm
, pgdp
, addr
);
587 pudp
= pud_alloc(mm
, p4dp
, addr
);
590 pmdp
= pmd_alloc(mm
, pudp
, addr
);
593 if (pmd_trans_huge(*pmdp
) || pmd_devmap(*pmdp
))
595 if (pte_alloc(mm
, pmdp
))
597 if (unlikely(anon_vma_prepare(vma
)))
599 if (mem_cgroup_charge(page_folio(page
), vma
->vm_mm
, GFP_KERNEL
))
603 * The memory barrier inside __SetPageUptodate makes sure that
604 * preceding stores to the page contents become visible before
605 * the set_pte_at() write.
607 __SetPageUptodate(page
);
609 if (is_device_private_page(page
)) {
610 swp_entry_t swp_entry
;
612 if (vma
->vm_flags
& VM_WRITE
)
613 swp_entry
= make_writable_device_private_entry(
616 swp_entry
= make_readable_device_private_entry(
618 entry
= swp_entry_to_pte(swp_entry
);
620 if (is_zone_device_page(page
) &&
621 !is_device_coherent_page(page
)) {
622 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
625 entry
= mk_pte(page
, vma
->vm_page_prot
);
626 if (vma
->vm_flags
& VM_WRITE
)
627 entry
= pte_mkwrite(pte_mkdirty(entry
), vma
);
630 ptep
= pte_offset_map_lock(mm
, pmdp
, addr
, &ptl
);
633 orig_pte
= ptep_get(ptep
);
635 if (check_stable_address_space(mm
))
638 if (pte_present(orig_pte
)) {
639 unsigned long pfn
= pte_pfn(orig_pte
);
641 if (!is_zero_pfn(pfn
))
644 } else if (!pte_none(orig_pte
))
648 * Check for userfaultfd but do not deliver the fault. Instead,
651 if (userfaultfd_missing(vma
))
654 inc_mm_counter(mm
, MM_ANONPAGES
);
655 page_add_new_anon_rmap(page
, vma
, addr
);
656 if (!is_zone_device_page(page
))
657 lru_cache_add_inactive_or_unevictable(page
, vma
);
661 flush_cache_page(vma
, addr
, pte_pfn(orig_pte
));
662 ptep_clear_flush(vma
, addr
, ptep
);
663 set_pte_at_notify(mm
, addr
, ptep
, entry
);
664 update_mmu_cache(vma
, addr
, ptep
);
666 /* No need to invalidate - it was non-present before */
667 set_pte_at(mm
, addr
, ptep
, entry
);
668 update_mmu_cache(vma
, addr
, ptep
);
671 pte_unmap_unlock(ptep
, ptl
);
672 *src
= MIGRATE_PFN_MIGRATE
;
676 pte_unmap_unlock(ptep
, ptl
);
678 *src
&= ~MIGRATE_PFN_MIGRATE
;
681 static void __migrate_device_pages(unsigned long *src_pfns
,
682 unsigned long *dst_pfns
, unsigned long npages
,
683 struct migrate_vma
*migrate
)
685 struct mmu_notifier_range range
;
687 bool notified
= false;
689 for (i
= 0; i
< npages
; i
++) {
690 struct page
*newpage
= migrate_pfn_to_page(dst_pfns
[i
]);
691 struct page
*page
= migrate_pfn_to_page(src_pfns
[i
]);
692 struct address_space
*mapping
;
696 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
703 if (!(src_pfns
[i
] & MIGRATE_PFN_MIGRATE
))
707 * The only time there is no vma is when called from
708 * migrate_device_coherent_page(). However this isn't
709 * called if the page could not be unmapped.
712 addr
= migrate
->start
+ i
*PAGE_SIZE
;
716 mmu_notifier_range_init_owner(&range
,
717 MMU_NOTIFY_MIGRATE
, 0,
718 migrate
->vma
->vm_mm
, addr
, migrate
->end
,
719 migrate
->pgmap_owner
);
720 mmu_notifier_invalidate_range_start(&range
);
722 migrate_vma_insert_page(migrate
, addr
, newpage
,
727 mapping
= page_mapping(page
);
729 if (is_device_private_page(newpage
) ||
730 is_device_coherent_page(newpage
)) {
734 folio
= page_folio(page
);
737 * For now only support anonymous memory migrating to
738 * device private or coherent memory.
740 * Try to get rid of swap cache if possible.
742 if (!folio_test_anon(folio
) ||
743 !folio_free_swap(folio
)) {
744 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
748 } else if (is_zone_device_page(newpage
)) {
750 * Other types of ZONE_DEVICE page are not supported.
752 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
756 if (migrate
&& migrate
->fault_page
== page
)
757 r
= migrate_folio_extra(mapping
, page_folio(newpage
),
759 MIGRATE_SYNC_NO_COPY
, 1);
761 r
= migrate_folio(mapping
, page_folio(newpage
),
762 page_folio(page
), MIGRATE_SYNC_NO_COPY
);
763 if (r
!= MIGRATEPAGE_SUCCESS
)
764 src_pfns
[i
] &= ~MIGRATE_PFN_MIGRATE
;
768 mmu_notifier_invalidate_range_end(&range
);
772 * migrate_device_pages() - migrate meta-data from src page to dst page
773 * @src_pfns: src_pfns returned from migrate_device_range()
774 * @dst_pfns: array of pfns allocated by the driver to migrate memory to
775 * @npages: number of pages in the range
777 * Equivalent to migrate_vma_pages(). This is called to migrate struct page
778 * meta-data from source struct page to destination.
780 void migrate_device_pages(unsigned long *src_pfns
, unsigned long *dst_pfns
,
781 unsigned long npages
)
783 __migrate_device_pages(src_pfns
, dst_pfns
, npages
, NULL
);
785 EXPORT_SYMBOL(migrate_device_pages
);
788 * migrate_vma_pages() - migrate meta-data from src page to dst page
789 * @migrate: migrate struct containing all migration information
791 * This migrates struct page meta-data from source struct page to destination
792 * struct page. This effectively finishes the migration from source page to the
795 void migrate_vma_pages(struct migrate_vma
*migrate
)
797 __migrate_device_pages(migrate
->src
, migrate
->dst
, migrate
->npages
, migrate
);
799 EXPORT_SYMBOL(migrate_vma_pages
);
802 * migrate_device_finalize() - complete page migration
803 * @src_pfns: src_pfns returned from migrate_device_range()
804 * @dst_pfns: array of pfns allocated by the driver to migrate memory to
805 * @npages: number of pages in the range
807 * Completes migration of the page by removing special migration entries.
808 * Drivers must ensure copying of page data is complete and visible to the CPU
809 * before calling this.
811 void migrate_device_finalize(unsigned long *src_pfns
,
812 unsigned long *dst_pfns
, unsigned long npages
)
816 for (i
= 0; i
< npages
; i
++) {
817 struct folio
*dst
, *src
;
818 struct page
*newpage
= migrate_pfn_to_page(dst_pfns
[i
]);
819 struct page
*page
= migrate_pfn_to_page(src_pfns
[i
]);
823 unlock_page(newpage
);
829 if (!(src_pfns
[i
] & MIGRATE_PFN_MIGRATE
) || !newpage
) {
831 unlock_page(newpage
);
837 src
= page_folio(page
);
838 dst
= page_folio(newpage
);
839 remove_migration_ptes(src
, dst
, false);
842 if (is_zone_device_page(page
))
845 putback_lru_page(page
);
847 if (newpage
!= page
) {
848 unlock_page(newpage
);
849 if (is_zone_device_page(newpage
))
852 putback_lru_page(newpage
);
856 EXPORT_SYMBOL(migrate_device_finalize
);
859 * migrate_vma_finalize() - restore CPU page table entry
860 * @migrate: migrate struct containing all migration information
862 * This replaces the special migration pte entry with either a mapping to the
863 * new page if migration was successful for that page, or to the original page
866 * This also unlocks the pages and puts them back on the lru, or drops the extra
867 * refcount, for device pages.
869 void migrate_vma_finalize(struct migrate_vma
*migrate
)
871 migrate_device_finalize(migrate
->src
, migrate
->dst
, migrate
->npages
);
873 EXPORT_SYMBOL(migrate_vma_finalize
);
876 * migrate_device_range() - migrate device private pfns to normal memory.
877 * @src_pfns: array large enough to hold migrating source device private pfns.
878 * @start: starting pfn in the range to migrate.
879 * @npages: number of pages to migrate.
881 * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
882 * instead of looking up pages based on virtual address mappings a range of
883 * device pfns that should be migrated to system memory is used instead.
885 * This is useful when a driver needs to free device memory but doesn't know the
886 * virtual mappings of every page that may be in device memory. For example this
887 * is often the case when a driver is being unloaded or unbound from a device.
889 * Like migrate_vma_setup() this function will take a reference and lock any
890 * migrating pages that aren't free before unmapping them. Drivers may then
891 * allocate destination pages and start copying data from the device to CPU
892 * memory before calling migrate_device_pages().
894 int migrate_device_range(unsigned long *src_pfns
, unsigned long start
,
895 unsigned long npages
)
897 unsigned long i
, pfn
;
899 for (pfn
= start
, i
= 0; i
< npages
; pfn
++, i
++) {
900 struct page
*page
= pfn_to_page(pfn
);
902 if (!get_page_unless_zero(page
)) {
907 if (!trylock_page(page
)) {
913 src_pfns
[i
] = migrate_pfn(pfn
) | MIGRATE_PFN_MIGRATE
;
916 migrate_device_unmap(src_pfns
, npages
, NULL
);
920 EXPORT_SYMBOL(migrate_device_range
);
923 * Migrate a device coherent page back to normal memory. The caller should have
924 * a reference on page which will be copied to the new page if migration is
925 * successful or dropped on failure.
927 int migrate_device_coherent_page(struct page
*page
)
929 unsigned long src_pfn
, dst_pfn
= 0;
932 WARN_ON_ONCE(PageCompound(page
));
935 src_pfn
= migrate_pfn(page_to_pfn(page
)) | MIGRATE_PFN_MIGRATE
;
938 * We don't have a VMA and don't need to walk the page tables to find
939 * the source page. So call migrate_vma_unmap() directly to unmap the
940 * page as migrate_vma_setup() will fail if args.vma == NULL.
942 migrate_device_unmap(&src_pfn
, 1, NULL
);
943 if (!(src_pfn
& MIGRATE_PFN_MIGRATE
))
946 dpage
= alloc_page(GFP_USER
| __GFP_NOWARN
);
949 dst_pfn
= migrate_pfn(page_to_pfn(dpage
));
952 migrate_device_pages(&src_pfn
, &dst_pfn
, 1);
953 if (src_pfn
& MIGRATE_PFN_MIGRATE
)
954 copy_highpage(dpage
, page
);
955 migrate_device_finalize(&src_pfn
, &dst_pfn
, 1);
957 if (src_pfn
& MIGRATE_PFN_MIGRATE
)