1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/spinlock.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
21 #include <linux/shmem_fs.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
28 struct follow_page_context
{
29 struct dev_pagemap
*pgmap
;
30 unsigned int page_mask
;
33 static inline void sanity_check_pinned_pages(struct page
**pages
,
36 if (!IS_ENABLED(CONFIG_DEBUG_VM
))
40 * We only pin anonymous pages if they are exclusive. Once pinned, we
41 * can no longer turn them possibly shared and PageAnonExclusive() will
42 * stick around until the page is freed.
44 * We'd like to verify that our pinned anonymous pages are still mapped
45 * exclusively. The issue with anon THP is that we don't know how
46 * they are/were mapped when pinning them. However, for anon
47 * THP we can assume that either the given page (PTE-mapped THP) or
48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
49 * neither is the case, there is certainly something wrong.
51 for (; npages
; npages
--, pages
++) {
52 struct page
*page
= *pages
;
53 struct folio
*folio
= page_folio(page
);
55 if (is_zero_page(page
) ||
56 !folio_test_anon(folio
))
58 if (!folio_test_large(folio
) || folio_test_hugetlb(folio
))
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio
->page
), page
);
61 /* Either a PTE-mapped or a PMD-mapped THP. */
62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio
->page
) &&
63 !PageAnonExclusive(page
), page
);
68 * Return the folio with ref appropriately incremented,
69 * or NULL if that failed.
71 static inline struct folio
*try_get_folio(struct page
*page
, int refs
)
76 folio
= page_folio(page
);
77 if (WARN_ON_ONCE(folio_ref_count(folio
) < 0))
79 if (unlikely(!folio_ref_try_add_rcu(folio
, refs
)))
83 * At this point we have a stable reference to the folio; but it
84 * could be that between calling page_folio() and the refcount
85 * increment, the folio was split, in which case we'd end up
86 * holding a reference on a folio that has nothing to do with the page
87 * we were given anymore.
88 * So now that the folio is stable, recheck that the page still
89 * belongs to this folio.
91 if (unlikely(page_folio(page
) != folio
)) {
92 if (!put_devmap_managed_page_refs(&folio
->page
, refs
))
93 folio_put_refs(folio
, refs
);
101 * try_grab_folio() - Attempt to get or pin a folio.
102 * @page: pointer to page to be grabbed
103 * @refs: the value to (effectively) add to the folio's refcount
104 * @flags: gup flags: these are the FOLL_* flag values.
106 * "grab" names in this file mean, "look at flags to decide whether to use
107 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
110 * same time. (That's true throughout the get_user_pages*() and
111 * pin_user_pages*() APIs.) Cases:
113 * FOLL_GET: folio's refcount will be incremented by @refs.
115 * FOLL_PIN on large folios: folio's refcount will be incremented by
116 * @refs, and its pincount will be incremented by @refs.
118 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
119 * @refs * GUP_PIN_COUNTING_BIAS.
121 * Return: The folio containing @page (with refcount appropriately
122 * incremented) for success, or NULL upon failure. If neither FOLL_GET
123 * nor FOLL_PIN was set, that's considered failure, and furthermore,
124 * a likely bug in the caller, so a warning is also emitted.
126 struct folio
*try_grab_folio(struct page
*page
, int refs
, unsigned int flags
)
130 if (WARN_ON_ONCE((flags
& (FOLL_GET
| FOLL_PIN
)) == 0))
133 if (unlikely(!(flags
& FOLL_PCI_P2PDMA
) && is_pci_p2pdma_page(page
)))
136 if (flags
& FOLL_GET
)
137 return try_get_folio(page
, refs
);
139 /* FOLL_PIN is set */
142 * Don't take a pin on the zero page - it's not going anywhere
143 * and it is used in a *lot* of places.
145 if (is_zero_page(page
))
146 return page_folio(page
);
148 folio
= try_get_folio(page
, refs
);
153 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
154 * right zone, so fail and let the caller fall back to the slow
157 if (unlikely((flags
& FOLL_LONGTERM
) &&
158 !folio_is_longterm_pinnable(folio
))) {
159 if (!put_devmap_managed_page_refs(&folio
->page
, refs
))
160 folio_put_refs(folio
, refs
);
165 * When pinning a large folio, use an exact count to track it.
167 * However, be sure to *also* increment the normal folio
168 * refcount field at least once, so that the folio really
169 * is pinned. That's why the refcount from the earlier
170 * try_get_folio() is left intact.
172 if (folio_test_large(folio
))
173 atomic_add(refs
, &folio
->_pincount
);
176 refs
* (GUP_PIN_COUNTING_BIAS
- 1));
178 * Adjust the pincount before re-checking the PTE for changes.
179 * This is essentially a smp_mb() and is paired with a memory
180 * barrier in page_try_share_anon_rmap().
182 smp_mb__after_atomic();
184 node_stat_mod_folio(folio
, NR_FOLL_PIN_ACQUIRED
, refs
);
189 static void gup_put_folio(struct folio
*folio
, int refs
, unsigned int flags
)
191 if (flags
& FOLL_PIN
) {
192 if (is_zero_folio(folio
))
194 node_stat_mod_folio(folio
, NR_FOLL_PIN_RELEASED
, refs
);
195 if (folio_test_large(folio
))
196 atomic_sub(refs
, &folio
->_pincount
);
198 refs
*= GUP_PIN_COUNTING_BIAS
;
201 if (!put_devmap_managed_page_refs(&folio
->page
, refs
))
202 folio_put_refs(folio
, refs
);
206 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
207 * @page: pointer to page to be grabbed
208 * @flags: gup flags: these are the FOLL_* flag values.
210 * This might not do anything at all, depending on the flags argument.
212 * "grab" names in this file mean, "look at flags to decide whether to use
213 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
215 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
216 * time. Cases: please see the try_grab_folio() documentation, with
219 * Return: 0 for success, or if no action was required (if neither FOLL_PIN
220 * nor FOLL_GET was set, nothing is done). A negative error code for failure:
222 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
225 int __must_check
try_grab_page(struct page
*page
, unsigned int flags
)
227 struct folio
*folio
= page_folio(page
);
229 if (WARN_ON_ONCE(folio_ref_count(folio
) <= 0))
232 if (unlikely(!(flags
& FOLL_PCI_P2PDMA
) && is_pci_p2pdma_page(page
)))
235 if (flags
& FOLL_GET
)
236 folio_ref_inc(folio
);
237 else if (flags
& FOLL_PIN
) {
239 * Don't take a pin on the zero page - it's not going anywhere
240 * and it is used in a *lot* of places.
242 if (is_zero_page(page
))
246 * Similar to try_grab_folio(): be sure to *also*
247 * increment the normal page refcount field at least once,
248 * so that the page really is pinned.
250 if (folio_test_large(folio
)) {
251 folio_ref_add(folio
, 1);
252 atomic_add(1, &folio
->_pincount
);
254 folio_ref_add(folio
, GUP_PIN_COUNTING_BIAS
);
257 node_stat_mod_folio(folio
, NR_FOLL_PIN_ACQUIRED
, 1);
264 * unpin_user_page() - release a dma-pinned page
265 * @page: pointer to page to be released
267 * Pages that were pinned via pin_user_pages*() must be released via either
268 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
269 * that such pages can be separately tracked and uniquely handled. In
270 * particular, interactions with RDMA and filesystems need special handling.
272 void unpin_user_page(struct page
*page
)
274 sanity_check_pinned_pages(&page
, 1);
275 gup_put_folio(page_folio(page
), 1, FOLL_PIN
);
277 EXPORT_SYMBOL(unpin_user_page
);
280 * folio_add_pin - Try to get an additional pin on a pinned folio
281 * @folio: The folio to be pinned
283 * Get an additional pin on a folio we already have a pin on. Makes no change
284 * if the folio is a zero_page.
286 void folio_add_pin(struct folio
*folio
)
288 if (is_zero_folio(folio
))
292 * Similar to try_grab_folio(): be sure to *also* increment the normal
293 * page refcount field at least once, so that the page really is
296 if (folio_test_large(folio
)) {
297 WARN_ON_ONCE(atomic_read(&folio
->_pincount
) < 1);
298 folio_ref_inc(folio
);
299 atomic_inc(&folio
->_pincount
);
301 WARN_ON_ONCE(folio_ref_count(folio
) < GUP_PIN_COUNTING_BIAS
);
302 folio_ref_add(folio
, GUP_PIN_COUNTING_BIAS
);
306 static inline struct folio
*gup_folio_range_next(struct page
*start
,
307 unsigned long npages
, unsigned long i
, unsigned int *ntails
)
309 struct page
*next
= nth_page(start
, i
);
310 struct folio
*folio
= page_folio(next
);
313 if (folio_test_large(folio
))
314 nr
= min_t(unsigned int, npages
- i
,
315 folio_nr_pages(folio
) - folio_page_idx(folio
, next
));
321 static inline struct folio
*gup_folio_next(struct page
**list
,
322 unsigned long npages
, unsigned long i
, unsigned int *ntails
)
324 struct folio
*folio
= page_folio(list
[i
]);
327 for (nr
= i
+ 1; nr
< npages
; nr
++) {
328 if (page_folio(list
[nr
]) != folio
)
337 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
338 * @pages: array of pages to be maybe marked dirty, and definitely released.
339 * @npages: number of pages in the @pages array.
340 * @make_dirty: whether to mark the pages dirty
342 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
343 * variants called on that page.
345 * For each page in the @pages array, make that page (or its head page, if a
346 * compound page) dirty, if @make_dirty is true, and if the page was previously
347 * listed as clean. In any case, releases all pages using unpin_user_page(),
348 * possibly via unpin_user_pages(), for the non-dirty case.
350 * Please see the unpin_user_page() documentation for details.
352 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
353 * required, then the caller should a) verify that this is really correct,
354 * because _lock() is usually required, and b) hand code it:
355 * set_page_dirty_lock(), unpin_user_page().
358 void unpin_user_pages_dirty_lock(struct page
**pages
, unsigned long npages
,
366 unpin_user_pages(pages
, npages
);
370 sanity_check_pinned_pages(pages
, npages
);
371 for (i
= 0; i
< npages
; i
+= nr
) {
372 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
374 * Checking PageDirty at this point may race with
375 * clear_page_dirty_for_io(), but that's OK. Two key
378 * 1) This code sees the page as already dirty, so it
379 * skips the call to set_page_dirty(). That could happen
380 * because clear_page_dirty_for_io() called
381 * page_mkclean(), followed by set_page_dirty().
382 * However, now the page is going to get written back,
383 * which meets the original intention of setting it
384 * dirty, so all is well: clear_page_dirty_for_io() goes
385 * on to call TestClearPageDirty(), and write the page
388 * 2) This code sees the page as clean, so it calls
389 * set_page_dirty(). The page stays dirty, despite being
390 * written back, so it gets written back again in the
391 * next writeback cycle. This is harmless.
393 if (!folio_test_dirty(folio
)) {
395 folio_mark_dirty(folio
);
398 gup_put_folio(folio
, nr
, FOLL_PIN
);
401 EXPORT_SYMBOL(unpin_user_pages_dirty_lock
);
404 * unpin_user_page_range_dirty_lock() - release and optionally dirty
405 * gup-pinned page range
407 * @page: the starting page of a range maybe marked dirty, and definitely released.
408 * @npages: number of consecutive pages to release.
409 * @make_dirty: whether to mark the pages dirty
411 * "gup-pinned page range" refers to a range of pages that has had one of the
412 * pin_user_pages() variants called on that page.
414 * For the page ranges defined by [page .. page+npages], make that range (or
415 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
416 * page range was previously listed as clean.
418 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
419 * required, then the caller should a) verify that this is really correct,
420 * because _lock() is usually required, and b) hand code it:
421 * set_page_dirty_lock(), unpin_user_page().
424 void unpin_user_page_range_dirty_lock(struct page
*page
, unsigned long npages
,
431 for (i
= 0; i
< npages
; i
+= nr
) {
432 folio
= gup_folio_range_next(page
, npages
, i
, &nr
);
433 if (make_dirty
&& !folio_test_dirty(folio
)) {
435 folio_mark_dirty(folio
);
438 gup_put_folio(folio
, nr
, FOLL_PIN
);
441 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock
);
443 static void unpin_user_pages_lockless(struct page
**pages
, unsigned long npages
)
450 * Don't perform any sanity checks because we might have raced with
451 * fork() and some anonymous pages might now actually be shared --
452 * which is why we're unpinning after all.
454 for (i
= 0; i
< npages
; i
+= nr
) {
455 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
456 gup_put_folio(folio
, nr
, FOLL_PIN
);
461 * unpin_user_pages() - release an array of gup-pinned pages.
462 * @pages: array of pages to be marked dirty and released.
463 * @npages: number of pages in the @pages array.
465 * For each page in the @pages array, release the page using unpin_user_page().
467 * Please see the unpin_user_page() documentation for details.
469 void unpin_user_pages(struct page
**pages
, unsigned long npages
)
476 * If this WARN_ON() fires, then the system *might* be leaking pages (by
477 * leaving them pinned), but probably not. More likely, gup/pup returned
478 * a hard -ERRNO error to the caller, who erroneously passed it here.
480 if (WARN_ON(IS_ERR_VALUE(npages
)))
483 sanity_check_pinned_pages(pages
, npages
);
484 for (i
= 0; i
< npages
; i
+= nr
) {
485 folio
= gup_folio_next(pages
, npages
, i
, &nr
);
486 gup_put_folio(folio
, nr
, FOLL_PIN
);
489 EXPORT_SYMBOL(unpin_user_pages
);
492 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
493 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
494 * cache bouncing on large SMP machines for concurrent pinned gups.
496 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags
)
498 if (!test_bit(MMF_HAS_PINNED
, mm_flags
))
499 set_bit(MMF_HAS_PINNED
, mm_flags
);
503 static struct page
*no_page_table(struct vm_area_struct
*vma
,
507 * When core dumping an enormous anonymous area that nobody
508 * has touched so far, we don't want to allocate unnecessary pages or
509 * page tables. Return error instead of NULL to skip handle_mm_fault,
510 * then get_dump_page() will return NULL to leave a hole in the dump.
511 * But we can only make this optimization where a hole would surely
512 * be zero-filled if handle_mm_fault() actually did handle it.
514 if ((flags
& FOLL_DUMP
) &&
515 (vma_is_anonymous(vma
) || !vma
->vm_ops
->fault
))
516 return ERR_PTR(-EFAULT
);
520 static int follow_pfn_pte(struct vm_area_struct
*vma
, unsigned long address
,
521 pte_t
*pte
, unsigned int flags
)
523 if (flags
& FOLL_TOUCH
) {
524 pte_t orig_entry
= ptep_get(pte
);
525 pte_t entry
= orig_entry
;
527 if (flags
& FOLL_WRITE
)
528 entry
= pte_mkdirty(entry
);
529 entry
= pte_mkyoung(entry
);
531 if (!pte_same(orig_entry
, entry
)) {
532 set_pte_at(vma
->vm_mm
, address
, pte
, entry
);
533 update_mmu_cache(vma
, address
, pte
);
537 /* Proper page table entry exists, but no corresponding struct page */
541 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
542 static inline bool can_follow_write_pte(pte_t pte
, struct page
*page
,
543 struct vm_area_struct
*vma
,
546 /* If the pte is writable, we can write to the page. */
550 /* Maybe FOLL_FORCE is set to override it? */
551 if (!(flags
& FOLL_FORCE
))
554 /* But FOLL_FORCE has no effect on shared mappings */
555 if (vma
->vm_flags
& (VM_MAYSHARE
| VM_SHARED
))
558 /* ... or read-only private ones */
559 if (!(vma
->vm_flags
& VM_MAYWRITE
))
562 /* ... or already writable ones that just need to take a write fault */
563 if (vma
->vm_flags
& VM_WRITE
)
567 * See can_change_pte_writable(): we broke COW and could map the page
568 * writable if we have an exclusive anonymous page ...
570 if (!page
|| !PageAnon(page
) || !PageAnonExclusive(page
))
573 /* ... and a write-fault isn't required for other reasons. */
574 if (vma_soft_dirty_enabled(vma
) && !pte_soft_dirty(pte
))
576 return !userfaultfd_pte_wp(vma
, pte
);
579 static struct page
*follow_page_pte(struct vm_area_struct
*vma
,
580 unsigned long address
, pmd_t
*pmd
, unsigned int flags
,
581 struct dev_pagemap
**pgmap
)
583 struct mm_struct
*mm
= vma
->vm_mm
;
589 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
590 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
591 (FOLL_PIN
| FOLL_GET
)))
592 return ERR_PTR(-EINVAL
);
594 ptep
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
596 return no_page_table(vma
, flags
);
597 pte
= ptep_get(ptep
);
598 if (!pte_present(pte
))
600 if (pte_protnone(pte
) && !gup_can_follow_protnone(vma
, flags
))
603 page
= vm_normal_page(vma
, address
, pte
);
606 * We only care about anon pages in can_follow_write_pte() and don't
607 * have to worry about pte_devmap() because they are never anon.
609 if ((flags
& FOLL_WRITE
) &&
610 !can_follow_write_pte(pte
, page
, vma
, flags
)) {
615 if (!page
&& pte_devmap(pte
) && (flags
& (FOLL_GET
| FOLL_PIN
))) {
617 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
618 * case since they are only valid while holding the pgmap
621 *pgmap
= get_dev_pagemap(pte_pfn(pte
), *pgmap
);
623 page
= pte_page(pte
);
626 } else if (unlikely(!page
)) {
627 if (flags
& FOLL_DUMP
) {
628 /* Avoid special (like zero) pages in core dumps */
629 page
= ERR_PTR(-EFAULT
);
633 if (is_zero_pfn(pte_pfn(pte
))) {
634 page
= pte_page(pte
);
636 ret
= follow_pfn_pte(vma
, address
, ptep
, flags
);
642 if (!pte_write(pte
) && gup_must_unshare(vma
, flags
, page
)) {
643 page
= ERR_PTR(-EMLINK
);
647 VM_BUG_ON_PAGE((flags
& FOLL_PIN
) && PageAnon(page
) &&
648 !PageAnonExclusive(page
), page
);
650 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
651 ret
= try_grab_page(page
, flags
);
658 * We need to make the page accessible if and only if we are going
659 * to access its content (the FOLL_PIN case). Please see
660 * Documentation/core-api/pin_user_pages.rst for details.
662 if (flags
& FOLL_PIN
) {
663 ret
= arch_make_page_accessible(page
);
665 unpin_user_page(page
);
670 if (flags
& FOLL_TOUCH
) {
671 if ((flags
& FOLL_WRITE
) &&
672 !pte_dirty(pte
) && !PageDirty(page
))
673 set_page_dirty(page
);
675 * pte_mkyoung() would be more correct here, but atomic care
676 * is needed to avoid losing the dirty bit: it is easier to use
677 * mark_page_accessed().
679 mark_page_accessed(page
);
682 pte_unmap_unlock(ptep
, ptl
);
685 pte_unmap_unlock(ptep
, ptl
);
688 return no_page_table(vma
, flags
);
691 static struct page
*follow_pmd_mask(struct vm_area_struct
*vma
,
692 unsigned long address
, pud_t
*pudp
,
694 struct follow_page_context
*ctx
)
699 struct mm_struct
*mm
= vma
->vm_mm
;
701 pmd
= pmd_offset(pudp
, address
);
702 pmdval
= pmdp_get_lockless(pmd
);
703 if (pmd_none(pmdval
))
704 return no_page_table(vma
, flags
);
705 if (!pmd_present(pmdval
))
706 return no_page_table(vma
, flags
);
707 if (pmd_devmap(pmdval
)) {
708 ptl
= pmd_lock(mm
, pmd
);
709 page
= follow_devmap_pmd(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
714 if (likely(!pmd_trans_huge(pmdval
)))
715 return follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
717 if (pmd_protnone(pmdval
) && !gup_can_follow_protnone(vma
, flags
))
718 return no_page_table(vma
, flags
);
720 ptl
= pmd_lock(mm
, pmd
);
721 if (unlikely(!pmd_present(*pmd
))) {
723 return no_page_table(vma
, flags
);
725 if (unlikely(!pmd_trans_huge(*pmd
))) {
727 return follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
729 if (flags
& FOLL_SPLIT_PMD
) {
731 split_huge_pmd(vma
, pmd
, address
);
732 /* If pmd was left empty, stuff a page table in there quickly */
733 return pte_alloc(mm
, pmd
) ? ERR_PTR(-ENOMEM
) :
734 follow_page_pte(vma
, address
, pmd
, flags
, &ctx
->pgmap
);
736 page
= follow_trans_huge_pmd(vma
, address
, pmd
, flags
);
738 ctx
->page_mask
= HPAGE_PMD_NR
- 1;
742 static struct page
*follow_pud_mask(struct vm_area_struct
*vma
,
743 unsigned long address
, p4d_t
*p4dp
,
745 struct follow_page_context
*ctx
)
750 struct mm_struct
*mm
= vma
->vm_mm
;
752 pud
= pud_offset(p4dp
, address
);
754 return no_page_table(vma
, flags
);
755 if (pud_devmap(*pud
)) {
756 ptl
= pud_lock(mm
, pud
);
757 page
= follow_devmap_pud(vma
, address
, pud
, flags
, &ctx
->pgmap
);
762 if (unlikely(pud_bad(*pud
)))
763 return no_page_table(vma
, flags
);
765 return follow_pmd_mask(vma
, address
, pud
, flags
, ctx
);
768 static struct page
*follow_p4d_mask(struct vm_area_struct
*vma
,
769 unsigned long address
, pgd_t
*pgdp
,
771 struct follow_page_context
*ctx
)
775 p4d
= p4d_offset(pgdp
, address
);
777 return no_page_table(vma
, flags
);
778 BUILD_BUG_ON(p4d_huge(*p4d
));
779 if (unlikely(p4d_bad(*p4d
)))
780 return no_page_table(vma
, flags
);
782 return follow_pud_mask(vma
, address
, p4d
, flags
, ctx
);
786 * follow_page_mask - look up a page descriptor from a user-virtual address
787 * @vma: vm_area_struct mapping @address
788 * @address: virtual address to look up
789 * @flags: flags modifying lookup behaviour
790 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
791 * pointer to output page_mask
793 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
795 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
796 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
798 * When getting an anonymous page and the caller has to trigger unsharing
799 * of a shared anonymous page first, -EMLINK is returned. The caller should
800 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
801 * relevant with FOLL_PIN and !FOLL_WRITE.
803 * On output, the @ctx->page_mask is set according to the size of the page.
805 * Return: the mapped (struct page *), %NULL if no mapping exists, or
806 * an error pointer if there is a mapping to something not represented
807 * by a page descriptor (see also vm_normal_page()).
809 static struct page
*follow_page_mask(struct vm_area_struct
*vma
,
810 unsigned long address
, unsigned int flags
,
811 struct follow_page_context
*ctx
)
814 struct mm_struct
*mm
= vma
->vm_mm
;
819 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
820 * special hugetlb page table walking code. This eliminates the
821 * need to check for hugetlb entries in the general walking code.
823 if (is_vm_hugetlb_page(vma
))
824 return hugetlb_follow_page_mask(vma
, address
, flags
,
827 pgd
= pgd_offset(mm
, address
);
829 if (pgd_none(*pgd
) || unlikely(pgd_bad(*pgd
)))
830 return no_page_table(vma
, flags
);
832 return follow_p4d_mask(vma
, address
, pgd
, flags
, ctx
);
835 struct page
*follow_page(struct vm_area_struct
*vma
, unsigned long address
,
836 unsigned int foll_flags
)
838 struct follow_page_context ctx
= { NULL
};
841 if (vma_is_secretmem(vma
))
844 if (WARN_ON_ONCE(foll_flags
& FOLL_PIN
))
848 * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
849 * to fail on PROT_NONE-mapped pages.
851 page
= follow_page_mask(vma
, address
, foll_flags
, &ctx
);
853 put_dev_pagemap(ctx
.pgmap
);
857 static int get_gate_page(struct mm_struct
*mm
, unsigned long address
,
858 unsigned int gup_flags
, struct vm_area_struct
**vma
,
869 /* user gate pages are read-only */
870 if (gup_flags
& FOLL_WRITE
)
872 if (address
> TASK_SIZE
)
873 pgd
= pgd_offset_k(address
);
875 pgd
= pgd_offset_gate(mm
, address
);
878 p4d
= p4d_offset(pgd
, address
);
881 pud
= pud_offset(p4d
, address
);
884 pmd
= pmd_offset(pud
, address
);
885 if (!pmd_present(*pmd
))
887 pte
= pte_offset_map(pmd
, address
);
890 entry
= ptep_get(pte
);
893 *vma
= get_gate_vma(mm
);
896 *page
= vm_normal_page(*vma
, address
, entry
);
898 if ((gup_flags
& FOLL_DUMP
) || !is_zero_pfn(pte_pfn(entry
)))
900 *page
= pte_page(entry
);
902 ret
= try_grab_page(*page
, gup_flags
);
913 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
914 * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
915 * to 0 and -EBUSY returned.
917 static int faultin_page(struct vm_area_struct
*vma
,
918 unsigned long address
, unsigned int *flags
, bool unshare
,
921 unsigned int fault_flags
= 0;
924 if (*flags
& FOLL_NOFAULT
)
926 if (*flags
& FOLL_WRITE
)
927 fault_flags
|= FAULT_FLAG_WRITE
;
928 if (*flags
& FOLL_REMOTE
)
929 fault_flags
|= FAULT_FLAG_REMOTE
;
930 if (*flags
& FOLL_UNLOCKABLE
) {
931 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
933 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
934 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
935 * That's because some callers may not be prepared to
936 * handle early exits caused by non-fatal signals.
938 if (*flags
& FOLL_INTERRUPTIBLE
)
939 fault_flags
|= FAULT_FLAG_INTERRUPTIBLE
;
941 if (*flags
& FOLL_NOWAIT
)
942 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_RETRY_NOWAIT
;
943 if (*flags
& FOLL_TRIED
) {
945 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
948 fault_flags
|= FAULT_FLAG_TRIED
;
951 fault_flags
|= FAULT_FLAG_UNSHARE
;
952 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
953 VM_BUG_ON(fault_flags
& FAULT_FLAG_WRITE
);
956 ret
= handle_mm_fault(vma
, address
, fault_flags
, NULL
);
958 if (ret
& VM_FAULT_COMPLETED
) {
960 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
961 * mmap lock in the page fault handler. Sanity check this.
963 WARN_ON_ONCE(fault_flags
& FAULT_FLAG_RETRY_NOWAIT
);
967 * We should do the same as VM_FAULT_RETRY, but let's not
968 * return -EBUSY since that's not reflecting the reality of
969 * what has happened - we've just fully completed a page
970 * fault, with the mmap lock released. Use -EAGAIN to show
971 * that we want to take the mmap lock _again_.
976 if (ret
& VM_FAULT_ERROR
) {
977 int err
= vm_fault_to_errno(ret
, *flags
);
984 if (ret
& VM_FAULT_RETRY
) {
985 if (!(fault_flags
& FAULT_FLAG_RETRY_NOWAIT
))
994 * Writing to file-backed mappings which require folio dirty tracking using GUP
995 * is a fundamentally broken operation, as kernel write access to GUP mappings
996 * do not adhere to the semantics expected by a file system.
998 * Consider the following scenario:-
1000 * 1. A folio is written to via GUP which write-faults the memory, notifying
1001 * the file system and dirtying the folio.
1002 * 2. Later, writeback is triggered, resulting in the folio being cleaned and
1003 * the PTE being marked read-only.
1004 * 3. The GUP caller writes to the folio, as it is mapped read/write via the
1006 * 4. The GUP caller, now done with the page, unpins it and sets it dirty
1007 * (though it does not have to).
1009 * This results in both data being written to a folio without writenotify, and
1010 * the folio being dirtied unexpectedly (if the caller decides to do so).
1012 static bool writable_file_mapping_allowed(struct vm_area_struct
*vma
,
1013 unsigned long gup_flags
)
1016 * If we aren't pinning then no problematic write can occur. A long term
1017 * pin is the most egregious case so this is the case we disallow.
1019 if ((gup_flags
& (FOLL_PIN
| FOLL_LONGTERM
)) !=
1020 (FOLL_PIN
| FOLL_LONGTERM
))
1024 * If the VMA does not require dirty tracking then no problematic write
1027 return !vma_needs_dirty_tracking(vma
);
1030 static int check_vma_flags(struct vm_area_struct
*vma
, unsigned long gup_flags
)
1032 vm_flags_t vm_flags
= vma
->vm_flags
;
1033 int write
= (gup_flags
& FOLL_WRITE
);
1034 int foreign
= (gup_flags
& FOLL_REMOTE
);
1035 bool vma_anon
= vma_is_anonymous(vma
);
1037 if (vm_flags
& (VM_IO
| VM_PFNMAP
))
1040 if ((gup_flags
& FOLL_ANON
) && !vma_anon
)
1043 if ((gup_flags
& FOLL_LONGTERM
) && vma_is_fsdax(vma
))
1046 if (vma_is_secretmem(vma
))
1051 !writable_file_mapping_allowed(vma
, gup_flags
))
1054 if (!(vm_flags
& VM_WRITE
) || (vm_flags
& VM_SHADOW_STACK
)) {
1055 if (!(gup_flags
& FOLL_FORCE
))
1057 /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
1058 if (is_vm_hugetlb_page(vma
))
1061 * We used to let the write,force case do COW in a
1062 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1063 * set a breakpoint in a read-only mapping of an
1064 * executable, without corrupting the file (yet only
1065 * when that file had been opened for writing!).
1066 * Anon pages in shared mappings are surprising: now
1069 if (!is_cow_mapping(vm_flags
))
1072 } else if (!(vm_flags
& VM_READ
)) {
1073 if (!(gup_flags
& FOLL_FORCE
))
1076 * Is there actually any vma we can reach here which does not
1077 * have VM_MAYREAD set?
1079 if (!(vm_flags
& VM_MAYREAD
))
1083 * gups are always data accesses, not instruction
1084 * fetches, so execute=false here
1086 if (!arch_vma_access_permitted(vma
, write
, false, foreign
))
1092 * This is "vma_lookup()", but with a warning if we would have
1093 * historically expanded the stack in the GUP code.
1095 static struct vm_area_struct
*gup_vma_lookup(struct mm_struct
*mm
,
1098 #ifdef CONFIG_STACK_GROWSUP
1099 return vma_lookup(mm
, addr
);
1101 static volatile unsigned long next_warn
;
1102 struct vm_area_struct
*vma
;
1103 unsigned long now
, next
;
1105 vma
= find_vma(mm
, addr
);
1106 if (!vma
|| (addr
>= vma
->vm_start
))
1109 /* Only warn for half-way relevant accesses */
1110 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
1112 if (vma
->vm_start
- addr
> 65536)
1115 /* Let's not warn more than once an hour.. */
1116 now
= jiffies
; next
= next_warn
;
1117 if (next
&& time_before(now
, next
))
1119 next_warn
= now
+ 60*60*HZ
;
1121 /* Let people know things may have changed. */
1122 pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
1123 current
->comm
, task_pid_nr(current
),
1124 vma
->vm_start
, vma
->vm_end
, addr
);
1131 * __get_user_pages() - pin user pages in memory
1132 * @mm: mm_struct of target mm
1133 * @start: starting user address
1134 * @nr_pages: number of pages from start to pin
1135 * @gup_flags: flags modifying pin behaviour
1136 * @pages: array that receives pointers to the pages pinned.
1137 * Should be at least nr_pages long. Or NULL, if caller
1138 * only intends to ensure the pages are faulted in.
1139 * @locked: whether we're still with the mmap_lock held
1141 * Returns either number of pages pinned (which may be less than the
1142 * number requested), or an error. Details about the return value:
1144 * -- If nr_pages is 0, returns 0.
1145 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1146 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1147 * pages pinned. Again, this may be less than nr_pages.
1148 * -- 0 return value is possible when the fault would need to be retried.
1150 * The caller is responsible for releasing returned @pages, via put_page().
1152 * Must be called with mmap_lock held. It may be released. See below.
1154 * __get_user_pages walks a process's page tables and takes a reference to
1155 * each struct page that each user address corresponds to at a given
1156 * instant. That is, it takes the page that would be accessed if a user
1157 * thread accesses the given user virtual address at that instant.
1159 * This does not guarantee that the page exists in the user mappings when
1160 * __get_user_pages returns, and there may even be a completely different
1161 * page there in some cases (eg. if mmapped pagecache has been invalidated
1162 * and subsequently re-faulted). However it does guarantee that the page
1163 * won't be freed completely. And mostly callers simply care that the page
1164 * contains data that was valid *at some point in time*. Typically, an IO
1165 * or similar operation cannot guarantee anything stronger anyway because
1166 * locks can't be held over the syscall boundary.
1168 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1169 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1170 * appropriate) must be called after the page is finished with, and
1171 * before put_page is called.
1173 * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1174 * be released. If this happens *@locked will be set to 0 on return.
1176 * A caller using such a combination of @gup_flags must therefore hold the
1177 * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1178 * it must be held for either reading or writing and will not be released.
1180 * In most cases, get_user_pages or get_user_pages_fast should be used
1181 * instead of __get_user_pages. __get_user_pages should be used only if
1182 * you need some special @gup_flags.
1184 static long __get_user_pages(struct mm_struct
*mm
,
1185 unsigned long start
, unsigned long nr_pages
,
1186 unsigned int gup_flags
, struct page
**pages
,
1189 long ret
= 0, i
= 0;
1190 struct vm_area_struct
*vma
= NULL
;
1191 struct follow_page_context ctx
= { NULL
};
1196 start
= untagged_addr_remote(mm
, start
);
1198 VM_BUG_ON(!!pages
!= !!(gup_flags
& (FOLL_GET
| FOLL_PIN
)));
1202 unsigned int foll_flags
= gup_flags
;
1203 unsigned int page_increm
;
1205 /* first iteration or cross vma bound */
1206 if (!vma
|| start
>= vma
->vm_end
) {
1207 vma
= gup_vma_lookup(mm
, start
);
1208 if (!vma
&& in_gate_area(mm
, start
)) {
1209 ret
= get_gate_page(mm
, start
& PAGE_MASK
,
1211 pages
? &page
: NULL
);
1222 ret
= check_vma_flags(vma
, gup_flags
);
1228 * If we have a pending SIGKILL, don't keep faulting pages and
1229 * potentially allocating memory.
1231 if (fatal_signal_pending(current
)) {
1237 page
= follow_page_mask(vma
, start
, foll_flags
, &ctx
);
1238 if (!page
|| PTR_ERR(page
) == -EMLINK
) {
1239 ret
= faultin_page(vma
, start
, &foll_flags
,
1240 PTR_ERR(page
) == -EMLINK
, locked
);
1254 } else if (PTR_ERR(page
) == -EEXIST
) {
1256 * Proper page table entry exists, but no corresponding
1257 * struct page. If the caller expects **pages to be
1258 * filled in, bail out now, because that can't be done
1262 ret
= PTR_ERR(page
);
1265 } else if (IS_ERR(page
)) {
1266 ret
= PTR_ERR(page
);
1270 page_increm
= 1 + (~(start
>> PAGE_SHIFT
) & ctx
.page_mask
);
1271 if (page_increm
> nr_pages
)
1272 page_increm
= nr_pages
;
1275 struct page
*subpage
;
1279 * This must be a large folio (and doesn't need to
1280 * be the whole folio; it can be part of it), do
1281 * the refcount work for all the subpages too.
1283 * NOTE: here the page may not be the head page
1284 * e.g. when start addr is not thp-size aligned.
1285 * try_grab_folio() should have taken care of tail
1288 if (page_increm
> 1) {
1289 struct folio
*folio
;
1292 * Since we already hold refcount on the
1293 * large folio, this should never fail.
1295 folio
= try_grab_folio(page
, page_increm
- 1,
1297 if (WARN_ON_ONCE(!folio
)) {
1299 * Release the 1st page ref if the
1300 * folio is problematic, fail hard.
1302 gup_put_folio(page_folio(page
), 1,
1309 for (j
= 0; j
< page_increm
; j
++) {
1310 subpage
= nth_page(page
, j
);
1311 pages
[i
+ j
] = subpage
;
1312 flush_anon_page(vma
, subpage
, start
+ j
* PAGE_SIZE
);
1313 flush_dcache_page(subpage
);
1318 start
+= page_increm
* PAGE_SIZE
;
1319 nr_pages
-= page_increm
;
1323 put_dev_pagemap(ctx
.pgmap
);
1327 static bool vma_permits_fault(struct vm_area_struct
*vma
,
1328 unsigned int fault_flags
)
1330 bool write
= !!(fault_flags
& FAULT_FLAG_WRITE
);
1331 bool foreign
= !!(fault_flags
& FAULT_FLAG_REMOTE
);
1332 vm_flags_t vm_flags
= write
? VM_WRITE
: VM_READ
;
1334 if (!(vm_flags
& vma
->vm_flags
))
1338 * The architecture might have a hardware protection
1339 * mechanism other than read/write that can deny access.
1341 * gup always represents data access, not instruction
1342 * fetches, so execute=false here:
1344 if (!arch_vma_access_permitted(vma
, write
, false, foreign
))
1351 * fixup_user_fault() - manually resolve a user page fault
1352 * @mm: mm_struct of target mm
1353 * @address: user address
1354 * @fault_flags:flags to pass down to handle_mm_fault()
1355 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1356 * does not allow retry. If NULL, the caller must guarantee
1357 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1359 * This is meant to be called in the specific scenario where for locking reasons
1360 * we try to access user memory in atomic context (within a pagefault_disable()
1361 * section), this returns -EFAULT, and we want to resolve the user fault before
1364 * Typically this is meant to be used by the futex code.
1366 * The main difference with get_user_pages() is that this function will
1367 * unconditionally call handle_mm_fault() which will in turn perform all the
1368 * necessary SW fixup of the dirty and young bits in the PTE, while
1369 * get_user_pages() only guarantees to update these in the struct page.
1371 * This is important for some architectures where those bits also gate the
1372 * access permission to the page because they are maintained in software. On
1373 * such architectures, gup() will not be enough to make a subsequent access
1376 * This function will not return with an unlocked mmap_lock. So it has not the
1377 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1379 int fixup_user_fault(struct mm_struct
*mm
,
1380 unsigned long address
, unsigned int fault_flags
,
1383 struct vm_area_struct
*vma
;
1386 address
= untagged_addr_remote(mm
, address
);
1389 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
1392 vma
= gup_vma_lookup(mm
, address
);
1396 if (!vma_permits_fault(vma
, fault_flags
))
1399 if ((fault_flags
& FAULT_FLAG_KILLABLE
) &&
1400 fatal_signal_pending(current
))
1403 ret
= handle_mm_fault(vma
, address
, fault_flags
, NULL
);
1405 if (ret
& VM_FAULT_COMPLETED
) {
1407 * NOTE: it's a pity that we need to retake the lock here
1408 * to pair with the unlock() in the callers. Ideally we
1409 * could tell the callers so they do not need to unlock.
1416 if (ret
& VM_FAULT_ERROR
) {
1417 int err
= vm_fault_to_errno(ret
, 0);
1424 if (ret
& VM_FAULT_RETRY
) {
1427 fault_flags
|= FAULT_FLAG_TRIED
;
1433 EXPORT_SYMBOL_GPL(fixup_user_fault
);
1436 * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is
1437 * specified, it'll also respond to generic signals. The caller of GUP
1438 * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1440 static bool gup_signal_pending(unsigned int flags
)
1442 if (fatal_signal_pending(current
))
1445 if (!(flags
& FOLL_INTERRUPTIBLE
))
1448 return signal_pending(current
);
1452 * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1453 * the caller. This function may drop the mmap_lock. If it does so, then it will
1454 * set (*locked = 0).
1456 * (*locked == 0) means that the caller expects this function to acquire and
1457 * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1458 * the function returns, even though it may have changed temporarily during
1459 * function execution.
1461 * Please note that this function, unlike __get_user_pages(), will not return 0
1462 * for nr_pages > 0, unless FOLL_NOWAIT is used.
1464 static __always_inline
long __get_user_pages_locked(struct mm_struct
*mm
,
1465 unsigned long start
,
1466 unsigned long nr_pages
,
1467 struct page
**pages
,
1471 long ret
, pages_done
;
1472 bool must_unlock
= false;
1478 * The internal caller expects GUP to manage the lock internally and the
1479 * lock must be released when this returns.
1482 if (mmap_read_lock_killable(mm
))
1488 mmap_assert_locked(mm
);
1490 if (flags
& FOLL_PIN
)
1491 mm_set_has_pinned_flag(&mm
->flags
);
1494 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1495 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1496 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1497 * for FOLL_GET, not for the newer FOLL_PIN.
1499 * FOLL_PIN always expects pages to be non-null, but no need to assert
1500 * that here, as any failures will be obvious enough.
1502 if (pages
&& !(flags
& FOLL_PIN
))
1507 ret
= __get_user_pages(mm
, start
, nr_pages
, flags
, pages
,
1509 if (!(flags
& FOLL_UNLOCKABLE
)) {
1510 /* VM_FAULT_RETRY couldn't trigger, bypass */
1515 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1518 BUG_ON(ret
>= nr_pages
);
1529 * VM_FAULT_RETRY didn't trigger or it was a
1537 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1538 * For the prefault case (!pages) we only update counts.
1542 start
+= ret
<< PAGE_SHIFT
;
1544 /* The lock was temporarily dropped, so we must unlock later */
1549 * Repeat on the address that fired VM_FAULT_RETRY
1550 * with both FAULT_FLAG_ALLOW_RETRY and
1551 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1552 * by fatal signals of even common signals, depending on
1553 * the caller's request. So we need to check it before we
1554 * start trying again otherwise it can loop forever.
1556 if (gup_signal_pending(flags
)) {
1558 pages_done
= -EINTR
;
1562 ret
= mmap_read_lock_killable(mm
);
1571 ret
= __get_user_pages(mm
, start
, 1, flags
| FOLL_TRIED
,
1574 /* Continue to retry until we succeeded */
1592 if (must_unlock
&& *locked
) {
1594 * We either temporarily dropped the lock, or the caller
1595 * requested that we both acquire and drop the lock. Either way,
1596 * we must now unlock, and notify the caller of that state.
1598 mmap_read_unlock(mm
);
1603 * Failing to pin anything implies something has gone wrong (except when
1604 * FOLL_NOWAIT is specified).
1606 if (WARN_ON_ONCE(pages_done
== 0 && !(flags
& FOLL_NOWAIT
)))
1613 * populate_vma_page_range() - populate a range of pages in the vma.
1615 * @start: start address
1617 * @locked: whether the mmap_lock is still held
1619 * This takes care of mlocking the pages too if VM_LOCKED is set.
1621 * Return either number of pages pinned in the vma, or a negative error
1624 * vma->vm_mm->mmap_lock must be held.
1626 * If @locked is NULL, it may be held for read or write and will
1629 * If @locked is non-NULL, it must held for read only and may be
1630 * released. If it's released, *@locked will be set to 0.
1632 long populate_vma_page_range(struct vm_area_struct
*vma
,
1633 unsigned long start
, unsigned long end
, int *locked
)
1635 struct mm_struct
*mm
= vma
->vm_mm
;
1636 unsigned long nr_pages
= (end
- start
) / PAGE_SIZE
;
1637 int local_locked
= 1;
1641 VM_BUG_ON(!PAGE_ALIGNED(start
));
1642 VM_BUG_ON(!PAGE_ALIGNED(end
));
1643 VM_BUG_ON_VMA(start
< vma
->vm_start
, vma
);
1644 VM_BUG_ON_VMA(end
> vma
->vm_end
, vma
);
1645 mmap_assert_locked(mm
);
1648 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1649 * faultin_page() to break COW, so it has no work to do here.
1651 if (vma
->vm_flags
& VM_LOCKONFAULT
)
1654 gup_flags
= FOLL_TOUCH
;
1656 * We want to touch writable mappings with a write fault in order
1657 * to break COW, except for shared mappings because these don't COW
1658 * and we would not want to dirty them for nothing.
1660 if ((vma
->vm_flags
& (VM_WRITE
| VM_SHARED
)) == VM_WRITE
)
1661 gup_flags
|= FOLL_WRITE
;
1664 * We want mlock to succeed for regions that have any permissions
1665 * other than PROT_NONE.
1667 if (vma_is_accessible(vma
))
1668 gup_flags
|= FOLL_FORCE
;
1671 gup_flags
|= FOLL_UNLOCKABLE
;
1674 * We made sure addr is within a VMA, so the following will
1675 * not result in a stack expansion that recurses back here.
1677 ret
= __get_user_pages(mm
, start
, nr_pages
, gup_flags
,
1678 NULL
, locked
? locked
: &local_locked
);
1684 * faultin_vma_page_range() - populate (prefault) page tables inside the
1685 * given VMA range readable/writable
1687 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1690 * @start: start address
1692 * @write: whether to prefault readable or writable
1693 * @locked: whether the mmap_lock is still held
1695 * Returns either number of processed pages in the vma, or a negative error
1696 * code on error (see __get_user_pages()).
1698 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1699 * covered by the VMA. If it's released, *@locked will be set to 0.
1701 long faultin_vma_page_range(struct vm_area_struct
*vma
, unsigned long start
,
1702 unsigned long end
, bool write
, int *locked
)
1704 struct mm_struct
*mm
= vma
->vm_mm
;
1705 unsigned long nr_pages
= (end
- start
) / PAGE_SIZE
;
1709 VM_BUG_ON(!PAGE_ALIGNED(start
));
1710 VM_BUG_ON(!PAGE_ALIGNED(end
));
1711 VM_BUG_ON_VMA(start
< vma
->vm_start
, vma
);
1712 VM_BUG_ON_VMA(end
> vma
->vm_end
, vma
);
1713 mmap_assert_locked(mm
);
1716 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1717 * the page dirty with FOLL_WRITE -- which doesn't make a
1718 * difference with !FOLL_FORCE, because the page is writable
1719 * in the page table.
1720 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1722 * !FOLL_FORCE: Require proper access permissions.
1724 gup_flags
= FOLL_TOUCH
| FOLL_HWPOISON
| FOLL_UNLOCKABLE
;
1726 gup_flags
|= FOLL_WRITE
;
1729 * We want to report -EINVAL instead of -EFAULT for any permission
1730 * problems or incompatible mappings.
1732 if (check_vma_flags(vma
, gup_flags
))
1735 ret
= __get_user_pages(mm
, start
, nr_pages
, gup_flags
,
1742 * __mm_populate - populate and/or mlock pages within a range of address space.
1744 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1745 * flags. VMAs must be already marked with the desired vm_flags, and
1746 * mmap_lock must not be held.
1748 int __mm_populate(unsigned long start
, unsigned long len
, int ignore_errors
)
1750 struct mm_struct
*mm
= current
->mm
;
1751 unsigned long end
, nstart
, nend
;
1752 struct vm_area_struct
*vma
= NULL
;
1758 for (nstart
= start
; nstart
< end
; nstart
= nend
) {
1760 * We want to fault in pages for [nstart; end) address range.
1761 * Find first corresponding VMA.
1766 vma
= find_vma_intersection(mm
, nstart
, end
);
1767 } else if (nstart
>= vma
->vm_end
)
1768 vma
= find_vma_intersection(mm
, vma
->vm_end
, end
);
1773 * Set [nstart; nend) to intersection of desired address
1774 * range with the first VMA. Also, skip undesirable VMA types.
1776 nend
= min(end
, vma
->vm_end
);
1777 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
))
1779 if (nstart
< vma
->vm_start
)
1780 nstart
= vma
->vm_start
;
1782 * Now fault in a range of pages. populate_vma_page_range()
1783 * double checks the vma flags, so that it won't mlock pages
1784 * if the vma was already munlocked.
1786 ret
= populate_vma_page_range(vma
, nstart
, nend
, &locked
);
1788 if (ignore_errors
) {
1790 continue; /* continue at next VMA */
1794 nend
= nstart
+ ret
* PAGE_SIZE
;
1798 mmap_read_unlock(mm
);
1799 return ret
; /* 0 or negative error code */
1801 #else /* CONFIG_MMU */
1802 static long __get_user_pages_locked(struct mm_struct
*mm
, unsigned long start
,
1803 unsigned long nr_pages
, struct page
**pages
,
1804 int *locked
, unsigned int foll_flags
)
1806 struct vm_area_struct
*vma
;
1807 bool must_unlock
= false;
1808 unsigned long vm_flags
;
1815 * The internal caller expects GUP to manage the lock internally and the
1816 * lock must be released when this returns.
1819 if (mmap_read_lock_killable(mm
))
1825 /* calculate required read or write permissions.
1826 * If FOLL_FORCE is set, we only require the "MAY" flags.
1828 vm_flags
= (foll_flags
& FOLL_WRITE
) ?
1829 (VM_WRITE
| VM_MAYWRITE
) : (VM_READ
| VM_MAYREAD
);
1830 vm_flags
&= (foll_flags
& FOLL_FORCE
) ?
1831 (VM_MAYREAD
| VM_MAYWRITE
) : (VM_READ
| VM_WRITE
);
1833 for (i
= 0; i
< nr_pages
; i
++) {
1834 vma
= find_vma(mm
, start
);
1838 /* protect what we can, including chardevs */
1839 if ((vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) ||
1840 !(vm_flags
& vma
->vm_flags
))
1844 pages
[i
] = virt_to_page((void *)start
);
1849 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
1852 if (must_unlock
&& *locked
) {
1853 mmap_read_unlock(mm
);
1857 return i
? : -EFAULT
;
1859 #endif /* !CONFIG_MMU */
1862 * fault_in_writeable - fault in userspace address range for writing
1863 * @uaddr: start of address range
1864 * @size: size of address range
1866 * Returns the number of bytes not faulted in (like copy_to_user() and
1867 * copy_from_user()).
1869 size_t fault_in_writeable(char __user
*uaddr
, size_t size
)
1871 char __user
*start
= uaddr
, *end
;
1873 if (unlikely(size
== 0))
1875 if (!user_write_access_begin(uaddr
, size
))
1877 if (!PAGE_ALIGNED(uaddr
)) {
1878 unsafe_put_user(0, uaddr
, out
);
1879 uaddr
= (char __user
*)PAGE_ALIGN((unsigned long)uaddr
);
1881 end
= (char __user
*)PAGE_ALIGN((unsigned long)start
+ size
);
1882 if (unlikely(end
< start
))
1884 while (uaddr
!= end
) {
1885 unsafe_put_user(0, uaddr
, out
);
1890 user_write_access_end();
1891 if (size
> uaddr
- start
)
1892 return size
- (uaddr
- start
);
1895 EXPORT_SYMBOL(fault_in_writeable
);
1898 * fault_in_subpage_writeable - fault in an address range for writing
1899 * @uaddr: start of address range
1900 * @size: size of address range
1902 * Fault in a user address range for writing while checking for permissions at
1903 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1904 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1906 * Returns the number of bytes not faulted in (like copy_to_user() and
1907 * copy_from_user()).
1909 size_t fault_in_subpage_writeable(char __user
*uaddr
, size_t size
)
1914 * Attempt faulting in at page granularity first for page table
1915 * permission checking. The arch-specific probe_subpage_writeable()
1916 * functions may not check for this.
1918 faulted_in
= size
- fault_in_writeable(uaddr
, size
);
1920 faulted_in
-= probe_subpage_writeable(uaddr
, faulted_in
);
1922 return size
- faulted_in
;
1924 EXPORT_SYMBOL(fault_in_subpage_writeable
);
1927 * fault_in_safe_writeable - fault in an address range for writing
1928 * @uaddr: start of address range
1929 * @size: length of address range
1931 * Faults in an address range for writing. This is primarily useful when we
1932 * already know that some or all of the pages in the address range aren't in
1935 * Unlike fault_in_writeable(), this function is non-destructive.
1937 * Note that we don't pin or otherwise hold the pages referenced that we fault
1938 * in. There's no guarantee that they'll stay in memory for any duration of
1941 * Returns the number of bytes not faulted in, like copy_to_user() and
1944 size_t fault_in_safe_writeable(const char __user
*uaddr
, size_t size
)
1946 unsigned long start
= (unsigned long)uaddr
, end
;
1947 struct mm_struct
*mm
= current
->mm
;
1948 bool unlocked
= false;
1950 if (unlikely(size
== 0))
1952 end
= PAGE_ALIGN(start
+ size
);
1958 if (fixup_user_fault(mm
, start
, FAULT_FLAG_WRITE
, &unlocked
))
1960 start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
1961 } while (start
!= end
);
1962 mmap_read_unlock(mm
);
1964 if (size
> (unsigned long)uaddr
- start
)
1965 return size
- ((unsigned long)uaddr
- start
);
1968 EXPORT_SYMBOL(fault_in_safe_writeable
);
1971 * fault_in_readable - fault in userspace address range for reading
1972 * @uaddr: start of user address range
1973 * @size: size of user address range
1975 * Returns the number of bytes not faulted in (like copy_to_user() and
1976 * copy_from_user()).
1978 size_t fault_in_readable(const char __user
*uaddr
, size_t size
)
1980 const char __user
*start
= uaddr
, *end
;
1983 if (unlikely(size
== 0))
1985 if (!user_read_access_begin(uaddr
, size
))
1987 if (!PAGE_ALIGNED(uaddr
)) {
1988 unsafe_get_user(c
, uaddr
, out
);
1989 uaddr
= (const char __user
*)PAGE_ALIGN((unsigned long)uaddr
);
1991 end
= (const char __user
*)PAGE_ALIGN((unsigned long)start
+ size
);
1992 if (unlikely(end
< start
))
1994 while (uaddr
!= end
) {
1995 unsafe_get_user(c
, uaddr
, out
);
2000 user_read_access_end();
2002 if (size
> uaddr
- start
)
2003 return size
- (uaddr
- start
);
2006 EXPORT_SYMBOL(fault_in_readable
);
2009 * get_dump_page() - pin user page in memory while writing it to core dump
2010 * @addr: user address
2012 * Returns struct page pointer of user page pinned for dump,
2013 * to be freed afterwards by put_page().
2015 * Returns NULL on any kind of failure - a hole must then be inserted into
2016 * the corefile, to preserve alignment with its headers; and also returns
2017 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
2018 * allowing a hole to be left in the corefile to save disk space.
2020 * Called without mmap_lock (takes and releases the mmap_lock by itself).
2022 #ifdef CONFIG_ELF_CORE
2023 struct page
*get_dump_page(unsigned long addr
)
2029 ret
= __get_user_pages_locked(current
->mm
, addr
, 1, &page
, &locked
,
2030 FOLL_FORCE
| FOLL_DUMP
| FOLL_GET
);
2031 return (ret
== 1) ? page
: NULL
;
2033 #endif /* CONFIG_ELF_CORE */
2035 #ifdef CONFIG_MIGRATION
2037 * Returns the number of collected pages. Return value is always >= 0.
2039 static unsigned long collect_longterm_unpinnable_pages(
2040 struct list_head
*movable_page_list
,
2041 unsigned long nr_pages
,
2042 struct page
**pages
)
2044 unsigned long i
, collected
= 0;
2045 struct folio
*prev_folio
= NULL
;
2046 bool drain_allow
= true;
2048 for (i
= 0; i
< nr_pages
; i
++) {
2049 struct folio
*folio
= page_folio(pages
[i
]);
2051 if (folio
== prev_folio
)
2055 if (folio_is_longterm_pinnable(folio
))
2060 if (folio_is_device_coherent(folio
))
2063 if (folio_test_hugetlb(folio
)) {
2064 isolate_hugetlb(folio
, movable_page_list
);
2068 if (!folio_test_lru(folio
) && drain_allow
) {
2069 lru_add_drain_all();
2070 drain_allow
= false;
2073 if (!folio_isolate_lru(folio
))
2076 list_add_tail(&folio
->lru
, movable_page_list
);
2077 node_stat_mod_folio(folio
,
2078 NR_ISOLATED_ANON
+ folio_is_file_lru(folio
),
2079 folio_nr_pages(folio
));
2086 * Unpins all pages and migrates device coherent pages and movable_page_list.
2087 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
2088 * (or partial success).
2090 static int migrate_longterm_unpinnable_pages(
2091 struct list_head
*movable_page_list
,
2092 unsigned long nr_pages
,
2093 struct page
**pages
)
2098 for (i
= 0; i
< nr_pages
; i
++) {
2099 struct folio
*folio
= page_folio(pages
[i
]);
2101 if (folio_is_device_coherent(folio
)) {
2103 * Migration will fail if the page is pinned, so convert
2104 * the pin on the source page to a normal reference.
2108 gup_put_folio(folio
, 1, FOLL_PIN
);
2110 if (migrate_device_coherent_page(&folio
->page
)) {
2119 * We can't migrate pages with unexpected references, so drop
2120 * the reference obtained by __get_user_pages_locked().
2121 * Migrating pages have been added to movable_page_list after
2122 * calling folio_isolate_lru() which takes a reference so the
2123 * page won't be freed if it's migrating.
2125 unpin_user_page(pages
[i
]);
2129 if (!list_empty(movable_page_list
)) {
2130 struct migration_target_control mtc
= {
2131 .nid
= NUMA_NO_NODE
,
2132 .gfp_mask
= GFP_USER
| __GFP_NOWARN
,
2135 if (migrate_pages(movable_page_list
, alloc_migration_target
,
2136 NULL
, (unsigned long)&mtc
, MIGRATE_SYNC
,
2137 MR_LONGTERM_PIN
, NULL
)) {
2143 putback_movable_pages(movable_page_list
);
2148 for (i
= 0; i
< nr_pages
; i
++)
2150 unpin_user_page(pages
[i
]);
2151 putback_movable_pages(movable_page_list
);
2157 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2158 * pages in the range are required to be pinned via FOLL_PIN, before calling
2161 * If any pages in the range are not allowed to be pinned, then this routine
2162 * will migrate those pages away, unpin all the pages in the range and return
2163 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2164 * call this routine again.
2166 * If an error other than -EAGAIN occurs, this indicates a migration failure.
2167 * The caller should give up, and propagate the error back up the call stack.
2169 * If everything is OK and all pages in the range are allowed to be pinned, then
2170 * this routine leaves all pages pinned and returns zero for success.
2172 static long check_and_migrate_movable_pages(unsigned long nr_pages
,
2173 struct page
**pages
)
2175 unsigned long collected
;
2176 LIST_HEAD(movable_page_list
);
2178 collected
= collect_longterm_unpinnable_pages(&movable_page_list
,
2183 return migrate_longterm_unpinnable_pages(&movable_page_list
, nr_pages
,
2187 static long check_and_migrate_movable_pages(unsigned long nr_pages
,
2188 struct page
**pages
)
2192 #endif /* CONFIG_MIGRATION */
2195 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2196 * allows us to process the FOLL_LONGTERM flag.
2198 static long __gup_longterm_locked(struct mm_struct
*mm
,
2199 unsigned long start
,
2200 unsigned long nr_pages
,
2201 struct page
**pages
,
2203 unsigned int gup_flags
)
2206 long rc
, nr_pinned_pages
;
2208 if (!(gup_flags
& FOLL_LONGTERM
))
2209 return __get_user_pages_locked(mm
, start
, nr_pages
, pages
,
2212 flags
= memalloc_pin_save();
2214 nr_pinned_pages
= __get_user_pages_locked(mm
, start
, nr_pages
,
2217 if (nr_pinned_pages
<= 0) {
2218 rc
= nr_pinned_pages
;
2222 /* FOLL_LONGTERM implies FOLL_PIN */
2223 rc
= check_and_migrate_movable_pages(nr_pinned_pages
, pages
);
2224 } while (rc
== -EAGAIN
);
2225 memalloc_pin_restore(flags
);
2226 return rc
? rc
: nr_pinned_pages
;
2230 * Check that the given flags are valid for the exported gup/pup interface, and
2231 * update them with the required flags that the caller must have set.
2233 static bool is_valid_gup_args(struct page
**pages
, int *locked
,
2234 unsigned int *gup_flags_p
, unsigned int to_set
)
2236 unsigned int gup_flags
= *gup_flags_p
;
2239 * These flags not allowed to be specified externally to the gup
2241 * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2242 * - FOLL_REMOTE is internal only and used on follow_page()
2243 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2245 if (WARN_ON_ONCE(gup_flags
& INTERNAL_GUP_FLAGS
))
2248 gup_flags
|= to_set
;
2250 /* At the external interface locked must be set */
2251 if (WARN_ON_ONCE(*locked
!= 1))
2254 gup_flags
|= FOLL_UNLOCKABLE
;
2257 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2258 if (WARN_ON_ONCE((gup_flags
& (FOLL_PIN
| FOLL_GET
)) ==
2259 (FOLL_PIN
| FOLL_GET
)))
2262 /* LONGTERM can only be specified when pinning */
2263 if (WARN_ON_ONCE(!(gup_flags
& FOLL_PIN
) && (gup_flags
& FOLL_LONGTERM
)))
2266 /* Pages input must be given if using GET/PIN */
2267 if (WARN_ON_ONCE((gup_flags
& (FOLL_GET
| FOLL_PIN
)) && !pages
))
2270 /* We want to allow the pgmap to be hot-unplugged at all times */
2271 if (WARN_ON_ONCE((gup_flags
& FOLL_LONGTERM
) &&
2272 (gup_flags
& FOLL_PCI_P2PDMA
)))
2275 *gup_flags_p
= gup_flags
;
2281 * get_user_pages_remote() - pin user pages in memory
2282 * @mm: mm_struct of target mm
2283 * @start: starting user address
2284 * @nr_pages: number of pages from start to pin
2285 * @gup_flags: flags modifying lookup behaviour
2286 * @pages: array that receives pointers to the pages pinned.
2287 * Should be at least nr_pages long. Or NULL, if caller
2288 * only intends to ensure the pages are faulted in.
2289 * @locked: pointer to lock flag indicating whether lock is held and
2290 * subsequently whether VM_FAULT_RETRY functionality can be
2291 * utilised. Lock must initially be held.
2293 * Returns either number of pages pinned (which may be less than the
2294 * number requested), or an error. Details about the return value:
2296 * -- If nr_pages is 0, returns 0.
2297 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2298 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2299 * pages pinned. Again, this may be less than nr_pages.
2301 * The caller is responsible for releasing returned @pages, via put_page().
2303 * Must be called with mmap_lock held for read or write.
2305 * get_user_pages_remote walks a process's page tables and takes a reference
2306 * to each struct page that each user address corresponds to at a given
2307 * instant. That is, it takes the page that would be accessed if a user
2308 * thread accesses the given user virtual address at that instant.
2310 * This does not guarantee that the page exists in the user mappings when
2311 * get_user_pages_remote returns, and there may even be a completely different
2312 * page there in some cases (eg. if mmapped pagecache has been invalidated
2313 * and subsequently re-faulted). However it does guarantee that the page
2314 * won't be freed completely. And mostly callers simply care that the page
2315 * contains data that was valid *at some point in time*. Typically, an IO
2316 * or similar operation cannot guarantee anything stronger anyway because
2317 * locks can't be held over the syscall boundary.
2319 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2320 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2321 * be called after the page is finished with, and before put_page is called.
2323 * get_user_pages_remote is typically used for fewer-copy IO operations,
2324 * to get a handle on the memory by some means other than accesses
2325 * via the user virtual addresses. The pages may be submitted for
2326 * DMA to devices or accessed via their kernel linear mapping (via the
2327 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2329 * See also get_user_pages_fast, for performance critical applications.
2331 * get_user_pages_remote should be phased out in favor of
2332 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2333 * should use get_user_pages_remote because it cannot pass
2334 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2336 long get_user_pages_remote(struct mm_struct
*mm
,
2337 unsigned long start
, unsigned long nr_pages
,
2338 unsigned int gup_flags
, struct page
**pages
,
2341 int local_locked
= 1;
2343 if (!is_valid_gup_args(pages
, locked
, &gup_flags
,
2344 FOLL_TOUCH
| FOLL_REMOTE
))
2347 return __get_user_pages_locked(mm
, start
, nr_pages
, pages
,
2348 locked
? locked
: &local_locked
,
2351 EXPORT_SYMBOL(get_user_pages_remote
);
2353 #else /* CONFIG_MMU */
2354 long get_user_pages_remote(struct mm_struct
*mm
,
2355 unsigned long start
, unsigned long nr_pages
,
2356 unsigned int gup_flags
, struct page
**pages
,
2361 #endif /* !CONFIG_MMU */
2364 * get_user_pages() - pin user pages in memory
2365 * @start: starting user address
2366 * @nr_pages: number of pages from start to pin
2367 * @gup_flags: flags modifying lookup behaviour
2368 * @pages: array that receives pointers to the pages pinned.
2369 * Should be at least nr_pages long. Or NULL, if caller
2370 * only intends to ensure the pages are faulted in.
2372 * This is the same as get_user_pages_remote(), just with a less-flexible
2373 * calling convention where we assume that the mm being operated on belongs to
2374 * the current task, and doesn't allow passing of a locked parameter. We also
2375 * obviously don't pass FOLL_REMOTE in here.
2377 long get_user_pages(unsigned long start
, unsigned long nr_pages
,
2378 unsigned int gup_flags
, struct page
**pages
)
2382 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
, FOLL_TOUCH
))
2385 return __get_user_pages_locked(current
->mm
, start
, nr_pages
, pages
,
2386 &locked
, gup_flags
);
2388 EXPORT_SYMBOL(get_user_pages
);
2391 * get_user_pages_unlocked() is suitable to replace the form:
2393 * mmap_read_lock(mm);
2394 * get_user_pages(mm, ..., pages, NULL);
2395 * mmap_read_unlock(mm);
2399 * get_user_pages_unlocked(mm, ..., pages);
2401 * It is functionally equivalent to get_user_pages_fast so
2402 * get_user_pages_fast should be used instead if specific gup_flags
2403 * (e.g. FOLL_FORCE) are not required.
2405 long get_user_pages_unlocked(unsigned long start
, unsigned long nr_pages
,
2406 struct page
**pages
, unsigned int gup_flags
)
2410 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
,
2411 FOLL_TOUCH
| FOLL_UNLOCKABLE
))
2414 return __get_user_pages_locked(current
->mm
, start
, nr_pages
, pages
,
2415 &locked
, gup_flags
);
2417 EXPORT_SYMBOL(get_user_pages_unlocked
);
2422 * get_user_pages_fast attempts to pin user pages by walking the page
2423 * tables directly and avoids taking locks. Thus the walker needs to be
2424 * protected from page table pages being freed from under it, and should
2425 * block any THP splits.
2427 * One way to achieve this is to have the walker disable interrupts, and
2428 * rely on IPIs from the TLB flushing code blocking before the page table
2429 * pages are freed. This is unsuitable for architectures that do not need
2430 * to broadcast an IPI when invalidating TLBs.
2432 * Another way to achieve this is to batch up page table containing pages
2433 * belonging to more than one mm_user, then rcu_sched a callback to free those
2434 * pages. Disabling interrupts will allow the fast_gup walker to both block
2435 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2436 * (which is a relatively rare event). The code below adopts this strategy.
2438 * Before activating this code, please be aware that the following assumptions
2439 * are currently made:
2441 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2442 * free pages containing page tables or TLB flushing requires IPI broadcast.
2444 * *) ptes can be read atomically by the architecture.
2446 * *) access_ok is sufficient to validate userspace address ranges.
2448 * The last two assumptions can be relaxed by the addition of helper functions.
2450 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2452 #ifdef CONFIG_HAVE_FAST_GUP
2455 * Used in the GUP-fast path to determine whether a pin is permitted for a
2458 * This call assumes the caller has pinned the folio, that the lowest page table
2459 * level still points to this folio, and that interrupts have been disabled.
2461 * Writing to pinned file-backed dirty tracked folios is inherently problematic
2462 * (see comment describing the writable_file_mapping_allowed() function). We
2463 * therefore try to avoid the most egregious case of a long-term mapping doing
2466 * This function cannot be as thorough as that one as the VMA is not available
2467 * in the fast path, so instead we whitelist known good cases and if in doubt,
2468 * fall back to the slow path.
2470 static bool folio_fast_pin_allowed(struct folio
*folio
, unsigned int flags
)
2472 struct address_space
*mapping
;
2473 unsigned long mapping_flags
;
2476 * If we aren't pinning then no problematic write can occur. A long term
2477 * pin is the most egregious case so this is the one we disallow.
2479 if ((flags
& (FOLL_PIN
| FOLL_LONGTERM
| FOLL_WRITE
)) !=
2480 (FOLL_PIN
| FOLL_LONGTERM
| FOLL_WRITE
))
2483 /* The folio is pinned, so we can safely access folio fields. */
2485 if (WARN_ON_ONCE(folio_test_slab(folio
)))
2488 /* hugetlb mappings do not require dirty-tracking. */
2489 if (folio_test_hugetlb(folio
))
2493 * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
2494 * cannot proceed, which means no actions performed under RCU can
2497 * inodes and thus their mappings are freed under RCU, which means the
2498 * mapping cannot be freed beneath us and thus we can safely dereference
2501 lockdep_assert_irqs_disabled();
2504 * However, there may be operations which _alter_ the mapping, so ensure
2505 * we read it once and only once.
2507 mapping
= READ_ONCE(folio
->mapping
);
2510 * The mapping may have been truncated, in any case we cannot determine
2511 * if this mapping is safe - fall back to slow path to determine how to
2517 /* Anonymous folios pose no problem. */
2518 mapping_flags
= (unsigned long)mapping
& PAGE_MAPPING_FLAGS
;
2520 return mapping_flags
& PAGE_MAPPING_ANON
;
2523 * At this point, we know the mapping is non-null and points to an
2524 * address_space object. The only remaining whitelisted file system is
2527 return shmem_mapping(mapping
);
2530 static void __maybe_unused
undo_dev_pagemap(int *nr
, int nr_start
,
2532 struct page
**pages
)
2534 while ((*nr
) - nr_start
) {
2535 struct page
*page
= pages
[--(*nr
)];
2537 ClearPageReferenced(page
);
2538 if (flags
& FOLL_PIN
)
2539 unpin_user_page(page
);
2545 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2547 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2550 * To pin the page, fast-gup needs to do below in order:
2551 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2553 * For the rest of pgtable operations where pgtable updates can be racy
2554 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2557 * Above will work for all pte-level operations, including THP split.
2559 * For THP collapse, it's a bit more complicated because fast-gup may be
2560 * walking a pgtable page that is being freed (pte is still valid but pmd
2561 * can be cleared already). To avoid race in such condition, we need to
2562 * also check pmd here to make sure pmd doesn't change (corresponds to
2563 * pmdp_collapse_flush() in the THP collapse code path).
2565 static int gup_pte_range(pmd_t pmd
, pmd_t
*pmdp
, unsigned long addr
,
2566 unsigned long end
, unsigned int flags
,
2567 struct page
**pages
, int *nr
)
2569 struct dev_pagemap
*pgmap
= NULL
;
2570 int nr_start
= *nr
, ret
= 0;
2573 ptem
= ptep
= pte_offset_map(&pmd
, addr
);
2577 pte_t pte
= ptep_get_lockless(ptep
);
2579 struct folio
*folio
;
2582 * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2583 * pte_access_permitted() better should reject these pages
2584 * either way: otherwise, GUP-fast might succeed in
2585 * cases where ordinary GUP would fail due to VMA access
2588 if (pte_protnone(pte
))
2591 if (!pte_access_permitted(pte
, flags
& FOLL_WRITE
))
2594 if (pte_devmap(pte
)) {
2595 if (unlikely(flags
& FOLL_LONGTERM
))
2598 pgmap
= get_dev_pagemap(pte_pfn(pte
), pgmap
);
2599 if (unlikely(!pgmap
)) {
2600 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2603 } else if (pte_special(pte
))
2606 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
2607 page
= pte_page(pte
);
2609 folio
= try_grab_folio(page
, 1, flags
);
2613 if (unlikely(folio_is_secretmem(folio
))) {
2614 gup_put_folio(folio
, 1, flags
);
2618 if (unlikely(pmd_val(pmd
) != pmd_val(*pmdp
)) ||
2619 unlikely(pte_val(pte
) != pte_val(ptep_get(ptep
)))) {
2620 gup_put_folio(folio
, 1, flags
);
2624 if (!folio_fast_pin_allowed(folio
, flags
)) {
2625 gup_put_folio(folio
, 1, flags
);
2629 if (!pte_write(pte
) && gup_must_unshare(NULL
, flags
, page
)) {
2630 gup_put_folio(folio
, 1, flags
);
2635 * We need to make the page accessible if and only if we are
2636 * going to access its content (the FOLL_PIN case). Please
2637 * see Documentation/core-api/pin_user_pages.rst for
2640 if (flags
& FOLL_PIN
) {
2641 ret
= arch_make_page_accessible(page
);
2643 gup_put_folio(folio
, 1, flags
);
2647 folio_set_referenced(folio
);
2650 } while (ptep
++, addr
+= PAGE_SIZE
, addr
!= end
);
2656 put_dev_pagemap(pgmap
);
2663 * If we can't determine whether or not a pte is special, then fail immediately
2664 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2667 * For a futex to be placed on a THP tail page, get_futex_key requires a
2668 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2669 * useful to have gup_huge_pmd even if we can't operate on ptes.
2671 static int gup_pte_range(pmd_t pmd
, pmd_t
*pmdp
, unsigned long addr
,
2672 unsigned long end
, unsigned int flags
,
2673 struct page
**pages
, int *nr
)
2677 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2679 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2680 static int __gup_device_huge(unsigned long pfn
, unsigned long addr
,
2681 unsigned long end
, unsigned int flags
,
2682 struct page
**pages
, int *nr
)
2685 struct dev_pagemap
*pgmap
= NULL
;
2688 struct page
*page
= pfn_to_page(pfn
);
2690 pgmap
= get_dev_pagemap(pfn
, pgmap
);
2691 if (unlikely(!pgmap
)) {
2692 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2696 if (!(flags
& FOLL_PCI_P2PDMA
) && is_pci_p2pdma_page(page
)) {
2697 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2701 SetPageReferenced(page
);
2703 if (unlikely(try_grab_page(page
, flags
))) {
2704 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2709 } while (addr
+= PAGE_SIZE
, addr
!= end
);
2711 put_dev_pagemap(pgmap
);
2715 static int __gup_device_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2716 unsigned long end
, unsigned int flags
,
2717 struct page
**pages
, int *nr
)
2719 unsigned long fault_pfn
;
2722 fault_pfn
= pmd_pfn(orig
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
2723 if (!__gup_device_huge(fault_pfn
, addr
, end
, flags
, pages
, nr
))
2726 if (unlikely(pmd_val(orig
) != pmd_val(*pmdp
))) {
2727 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2733 static int __gup_device_huge_pud(pud_t orig
, pud_t
*pudp
, unsigned long addr
,
2734 unsigned long end
, unsigned int flags
,
2735 struct page
**pages
, int *nr
)
2737 unsigned long fault_pfn
;
2740 fault_pfn
= pud_pfn(orig
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
2741 if (!__gup_device_huge(fault_pfn
, addr
, end
, flags
, pages
, nr
))
2744 if (unlikely(pud_val(orig
) != pud_val(*pudp
))) {
2745 undo_dev_pagemap(nr
, nr_start
, flags
, pages
);
2751 static int __gup_device_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2752 unsigned long end
, unsigned int flags
,
2753 struct page
**pages
, int *nr
)
2759 static int __gup_device_huge_pud(pud_t pud
, pud_t
*pudp
, unsigned long addr
,
2760 unsigned long end
, unsigned int flags
,
2761 struct page
**pages
, int *nr
)
2768 static int record_subpages(struct page
*page
, unsigned long addr
,
2769 unsigned long end
, struct page
**pages
)
2773 for (nr
= 0; addr
!= end
; nr
++, addr
+= PAGE_SIZE
)
2774 pages
[nr
] = nth_page(page
, nr
);
2779 #ifdef CONFIG_ARCH_HAS_HUGEPD
2780 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
2783 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
2784 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
2787 static int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
2788 unsigned long end
, unsigned int flags
,
2789 struct page
**pages
, int *nr
)
2791 unsigned long pte_end
;
2793 struct folio
*folio
;
2797 pte_end
= (addr
+ sz
) & ~(sz
-1);
2801 pte
= huge_ptep_get(ptep
);
2803 if (!pte_access_permitted(pte
, flags
& FOLL_WRITE
))
2806 /* hugepages are never "special" */
2807 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
2809 page
= nth_page(pte_page(pte
), (addr
& (sz
- 1)) >> PAGE_SHIFT
);
2810 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2812 folio
= try_grab_folio(page
, refs
, flags
);
2816 if (unlikely(pte_val(pte
) != pte_val(ptep_get(ptep
)))) {
2817 gup_put_folio(folio
, refs
, flags
);
2821 if (!folio_fast_pin_allowed(folio
, flags
)) {
2822 gup_put_folio(folio
, refs
, flags
);
2826 if (!pte_write(pte
) && gup_must_unshare(NULL
, flags
, &folio
->page
)) {
2827 gup_put_folio(folio
, refs
, flags
);
2832 folio_set_referenced(folio
);
2836 static int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
2837 unsigned int pdshift
, unsigned long end
, unsigned int flags
,
2838 struct page
**pages
, int *nr
)
2841 unsigned long sz
= 1UL << hugepd_shift(hugepd
);
2844 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
2846 next
= hugepte_addr_end(addr
, end
, sz
);
2847 if (!gup_hugepte(ptep
, sz
, addr
, end
, flags
, pages
, nr
))
2849 } while (ptep
++, addr
= next
, addr
!= end
);
2854 static inline int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
2855 unsigned int pdshift
, unsigned long end
, unsigned int flags
,
2856 struct page
**pages
, int *nr
)
2860 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2862 static int gup_huge_pmd(pmd_t orig
, pmd_t
*pmdp
, unsigned long addr
,
2863 unsigned long end
, unsigned int flags
,
2864 struct page
**pages
, int *nr
)
2867 struct folio
*folio
;
2870 if (!pmd_access_permitted(orig
, flags
& FOLL_WRITE
))
2873 if (pmd_devmap(orig
)) {
2874 if (unlikely(flags
& FOLL_LONGTERM
))
2876 return __gup_device_huge_pmd(orig
, pmdp
, addr
, end
, flags
,
2880 page
= nth_page(pmd_page(orig
), (addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
2881 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2883 folio
= try_grab_folio(page
, refs
, flags
);
2887 if (unlikely(pmd_val(orig
) != pmd_val(*pmdp
))) {
2888 gup_put_folio(folio
, refs
, flags
);
2892 if (!folio_fast_pin_allowed(folio
, flags
)) {
2893 gup_put_folio(folio
, refs
, flags
);
2896 if (!pmd_write(orig
) && gup_must_unshare(NULL
, flags
, &folio
->page
)) {
2897 gup_put_folio(folio
, refs
, flags
);
2902 folio_set_referenced(folio
);
2906 static int gup_huge_pud(pud_t orig
, pud_t
*pudp
, unsigned long addr
,
2907 unsigned long end
, unsigned int flags
,
2908 struct page
**pages
, int *nr
)
2911 struct folio
*folio
;
2914 if (!pud_access_permitted(orig
, flags
& FOLL_WRITE
))
2917 if (pud_devmap(orig
)) {
2918 if (unlikely(flags
& FOLL_LONGTERM
))
2920 return __gup_device_huge_pud(orig
, pudp
, addr
, end
, flags
,
2924 page
= nth_page(pud_page(orig
), (addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
2925 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2927 folio
= try_grab_folio(page
, refs
, flags
);
2931 if (unlikely(pud_val(orig
) != pud_val(*pudp
))) {
2932 gup_put_folio(folio
, refs
, flags
);
2936 if (!folio_fast_pin_allowed(folio
, flags
)) {
2937 gup_put_folio(folio
, refs
, flags
);
2941 if (!pud_write(orig
) && gup_must_unshare(NULL
, flags
, &folio
->page
)) {
2942 gup_put_folio(folio
, refs
, flags
);
2947 folio_set_referenced(folio
);
2951 static int gup_huge_pgd(pgd_t orig
, pgd_t
*pgdp
, unsigned long addr
,
2952 unsigned long end
, unsigned int flags
,
2953 struct page
**pages
, int *nr
)
2957 struct folio
*folio
;
2959 if (!pgd_access_permitted(orig
, flags
& FOLL_WRITE
))
2962 BUILD_BUG_ON(pgd_devmap(orig
));
2964 page
= nth_page(pgd_page(orig
), (addr
& ~PGDIR_MASK
) >> PAGE_SHIFT
);
2965 refs
= record_subpages(page
, addr
, end
, pages
+ *nr
);
2967 folio
= try_grab_folio(page
, refs
, flags
);
2971 if (unlikely(pgd_val(orig
) != pgd_val(*pgdp
))) {
2972 gup_put_folio(folio
, refs
, flags
);
2976 if (!pgd_write(orig
) && gup_must_unshare(NULL
, flags
, &folio
->page
)) {
2977 gup_put_folio(folio
, refs
, flags
);
2981 if (!folio_fast_pin_allowed(folio
, flags
)) {
2982 gup_put_folio(folio
, refs
, flags
);
2987 folio_set_referenced(folio
);
2991 static int gup_pmd_range(pud_t
*pudp
, pud_t pud
, unsigned long addr
, unsigned long end
,
2992 unsigned int flags
, struct page
**pages
, int *nr
)
2997 pmdp
= pmd_offset_lockless(pudp
, pud
, addr
);
2999 pmd_t pmd
= pmdp_get_lockless(pmdp
);
3001 next
= pmd_addr_end(addr
, end
);
3002 if (!pmd_present(pmd
))
3005 if (unlikely(pmd_trans_huge(pmd
) || pmd_huge(pmd
) ||
3007 /* See gup_pte_range() */
3008 if (pmd_protnone(pmd
))
3011 if (!gup_huge_pmd(pmd
, pmdp
, addr
, next
, flags
,
3015 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd
))))) {
3017 * architecture have different format for hugetlbfs
3018 * pmd format and THP pmd format
3020 if (!gup_huge_pd(__hugepd(pmd_val(pmd
)), addr
,
3021 PMD_SHIFT
, next
, flags
, pages
, nr
))
3023 } else if (!gup_pte_range(pmd
, pmdp
, addr
, next
, flags
, pages
, nr
))
3025 } while (pmdp
++, addr
= next
, addr
!= end
);
3030 static int gup_pud_range(p4d_t
*p4dp
, p4d_t p4d
, unsigned long addr
, unsigned long end
,
3031 unsigned int flags
, struct page
**pages
, int *nr
)
3036 pudp
= pud_offset_lockless(p4dp
, p4d
, addr
);
3038 pud_t pud
= READ_ONCE(*pudp
);
3040 next
= pud_addr_end(addr
, end
);
3041 if (unlikely(!pud_present(pud
)))
3043 if (unlikely(pud_huge(pud
) || pud_devmap(pud
))) {
3044 if (!gup_huge_pud(pud
, pudp
, addr
, next
, flags
,
3047 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud
))))) {
3048 if (!gup_huge_pd(__hugepd(pud_val(pud
)), addr
,
3049 PUD_SHIFT
, next
, flags
, pages
, nr
))
3051 } else if (!gup_pmd_range(pudp
, pud
, addr
, next
, flags
, pages
, nr
))
3053 } while (pudp
++, addr
= next
, addr
!= end
);
3058 static int gup_p4d_range(pgd_t
*pgdp
, pgd_t pgd
, unsigned long addr
, unsigned long end
,
3059 unsigned int flags
, struct page
**pages
, int *nr
)
3064 p4dp
= p4d_offset_lockless(pgdp
, pgd
, addr
);
3066 p4d_t p4d
= READ_ONCE(*p4dp
);
3068 next
= p4d_addr_end(addr
, end
);
3071 BUILD_BUG_ON(p4d_huge(p4d
));
3072 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d
))))) {
3073 if (!gup_huge_pd(__hugepd(p4d_val(p4d
)), addr
,
3074 P4D_SHIFT
, next
, flags
, pages
, nr
))
3076 } else if (!gup_pud_range(p4dp
, p4d
, addr
, next
, flags
, pages
, nr
))
3078 } while (p4dp
++, addr
= next
, addr
!= end
);
3083 static void gup_pgd_range(unsigned long addr
, unsigned long end
,
3084 unsigned int flags
, struct page
**pages
, int *nr
)
3089 pgdp
= pgd_offset(current
->mm
, addr
);
3091 pgd_t pgd
= READ_ONCE(*pgdp
);
3093 next
= pgd_addr_end(addr
, end
);
3096 if (unlikely(pgd_huge(pgd
))) {
3097 if (!gup_huge_pgd(pgd
, pgdp
, addr
, next
, flags
,
3100 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd
))))) {
3101 if (!gup_huge_pd(__hugepd(pgd_val(pgd
)), addr
,
3102 PGDIR_SHIFT
, next
, flags
, pages
, nr
))
3104 } else if (!gup_p4d_range(pgdp
, pgd
, addr
, next
, flags
, pages
, nr
))
3106 } while (pgdp
++, addr
= next
, addr
!= end
);
3109 static inline void gup_pgd_range(unsigned long addr
, unsigned long end
,
3110 unsigned int flags
, struct page
**pages
, int *nr
)
3113 #endif /* CONFIG_HAVE_FAST_GUP */
3115 #ifndef gup_fast_permitted
3117 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
3118 * we need to fall back to the slow version:
3120 static bool gup_fast_permitted(unsigned long start
, unsigned long end
)
3126 static unsigned long lockless_pages_from_mm(unsigned long start
,
3128 unsigned int gup_flags
,
3129 struct page
**pages
)
3131 unsigned long flags
;
3135 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP
) ||
3136 !gup_fast_permitted(start
, end
))
3139 if (gup_flags
& FOLL_PIN
) {
3140 seq
= raw_read_seqcount(¤t
->mm
->write_protect_seq
);
3146 * Disable interrupts. The nested form is used, in order to allow full,
3147 * general purpose use of this routine.
3149 * With interrupts disabled, we block page table pages from being freed
3150 * from under us. See struct mmu_table_batch comments in
3151 * include/asm-generic/tlb.h for more details.
3153 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
3154 * that come from THPs splitting.
3156 local_irq_save(flags
);
3157 gup_pgd_range(start
, end
, gup_flags
, pages
, &nr_pinned
);
3158 local_irq_restore(flags
);
3161 * When pinning pages for DMA there could be a concurrent write protect
3162 * from fork() via copy_page_range(), in this case always fail fast GUP.
3164 if (gup_flags
& FOLL_PIN
) {
3165 if (read_seqcount_retry(¤t
->mm
->write_protect_seq
, seq
)) {
3166 unpin_user_pages_lockless(pages
, nr_pinned
);
3169 sanity_check_pinned_pages(pages
, nr_pinned
);
3175 static int internal_get_user_pages_fast(unsigned long start
,
3176 unsigned long nr_pages
,
3177 unsigned int gup_flags
,
3178 struct page
**pages
)
3180 unsigned long len
, end
;
3181 unsigned long nr_pinned
;
3185 if (WARN_ON_ONCE(gup_flags
& ~(FOLL_WRITE
| FOLL_LONGTERM
|
3186 FOLL_FORCE
| FOLL_PIN
| FOLL_GET
|
3187 FOLL_FAST_ONLY
| FOLL_NOFAULT
|
3188 FOLL_PCI_P2PDMA
| FOLL_HONOR_NUMA_FAULT
)))
3191 if (gup_flags
& FOLL_PIN
)
3192 mm_set_has_pinned_flag(¤t
->mm
->flags
);
3194 if (!(gup_flags
& FOLL_FAST_ONLY
))
3195 might_lock_read(¤t
->mm
->mmap_lock
);
3197 start
= untagged_addr(start
) & PAGE_MASK
;
3198 len
= nr_pages
<< PAGE_SHIFT
;
3199 if (check_add_overflow(start
, len
, &end
))
3201 if (end
> TASK_SIZE_MAX
)
3203 if (unlikely(!access_ok((void __user
*)start
, len
)))
3206 nr_pinned
= lockless_pages_from_mm(start
, end
, gup_flags
, pages
);
3207 if (nr_pinned
== nr_pages
|| gup_flags
& FOLL_FAST_ONLY
)
3210 /* Slow path: try to get the remaining pages with get_user_pages */
3211 start
+= nr_pinned
<< PAGE_SHIFT
;
3213 ret
= __gup_longterm_locked(current
->mm
, start
, nr_pages
- nr_pinned
,
3215 gup_flags
| FOLL_TOUCH
| FOLL_UNLOCKABLE
);
3218 * The caller has to unpin the pages we already pinned so
3219 * returning -errno is not an option
3225 return ret
+ nr_pinned
;
3229 * get_user_pages_fast_only() - pin user pages in memory
3230 * @start: starting user address
3231 * @nr_pages: number of pages from start to pin
3232 * @gup_flags: flags modifying pin behaviour
3233 * @pages: array that receives pointers to the pages pinned.
3234 * Should be at least nr_pages long.
3236 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3239 * If the architecture does not support this function, simply return with no
3242 * Careful, careful! COW breaking can go either way, so a non-write
3243 * access can get ambiguous page results. If you call this function without
3244 * 'write' set, you'd better be sure that you're ok with that ambiguity.
3246 int get_user_pages_fast_only(unsigned long start
, int nr_pages
,
3247 unsigned int gup_flags
, struct page
**pages
)
3250 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3251 * because gup fast is always a "pin with a +1 page refcount" request.
3253 * FOLL_FAST_ONLY is required in order to match the API description of
3254 * this routine: no fall back to regular ("slow") GUP.
3256 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
,
3257 FOLL_GET
| FOLL_FAST_ONLY
))
3260 return internal_get_user_pages_fast(start
, nr_pages
, gup_flags
, pages
);
3262 EXPORT_SYMBOL_GPL(get_user_pages_fast_only
);
3265 * get_user_pages_fast() - pin user pages in memory
3266 * @start: starting user address
3267 * @nr_pages: number of pages from start to pin
3268 * @gup_flags: flags modifying pin behaviour
3269 * @pages: array that receives pointers to the pages pinned.
3270 * Should be at least nr_pages long.
3272 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3273 * If not successful, it will fall back to taking the lock and
3274 * calling get_user_pages().
3276 * Returns number of pages pinned. This may be fewer than the number requested.
3277 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3280 int get_user_pages_fast(unsigned long start
, int nr_pages
,
3281 unsigned int gup_flags
, struct page
**pages
)
3284 * The caller may or may not have explicitly set FOLL_GET; either way is
3285 * OK. However, internally (within mm/gup.c), gup fast variants must set
3286 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3289 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
, FOLL_GET
))
3291 return internal_get_user_pages_fast(start
, nr_pages
, gup_flags
, pages
);
3293 EXPORT_SYMBOL_GPL(get_user_pages_fast
);
3296 * pin_user_pages_fast() - pin user pages in memory without taking locks
3298 * @start: starting user address
3299 * @nr_pages: number of pages from start to pin
3300 * @gup_flags: flags modifying pin behaviour
3301 * @pages: array that receives pointers to the pages pinned.
3302 * Should be at least nr_pages long.
3304 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3305 * get_user_pages_fast() for documentation on the function arguments, because
3306 * the arguments here are identical.
3308 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3309 * see Documentation/core-api/pin_user_pages.rst for further details.
3311 * Note that if a zero_page is amongst the returned pages, it will not have
3312 * pins in it and unpin_user_page() will not remove pins from it.
3314 int pin_user_pages_fast(unsigned long start
, int nr_pages
,
3315 unsigned int gup_flags
, struct page
**pages
)
3317 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
, FOLL_PIN
))
3319 return internal_get_user_pages_fast(start
, nr_pages
, gup_flags
, pages
);
3321 EXPORT_SYMBOL_GPL(pin_user_pages_fast
);
3324 * pin_user_pages_remote() - pin pages of a remote process
3326 * @mm: mm_struct of target mm
3327 * @start: starting user address
3328 * @nr_pages: number of pages from start to pin
3329 * @gup_flags: flags modifying lookup behaviour
3330 * @pages: array that receives pointers to the pages pinned.
3331 * Should be at least nr_pages long.
3332 * @locked: pointer to lock flag indicating whether lock is held and
3333 * subsequently whether VM_FAULT_RETRY functionality can be
3334 * utilised. Lock must initially be held.
3336 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3337 * get_user_pages_remote() for documentation on the function arguments, because
3338 * the arguments here are identical.
3340 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3341 * see Documentation/core-api/pin_user_pages.rst for details.
3343 * Note that if a zero_page is amongst the returned pages, it will not have
3344 * pins in it and unpin_user_page*() will not remove pins from it.
3346 long pin_user_pages_remote(struct mm_struct
*mm
,
3347 unsigned long start
, unsigned long nr_pages
,
3348 unsigned int gup_flags
, struct page
**pages
,
3351 int local_locked
= 1;
3353 if (!is_valid_gup_args(pages
, locked
, &gup_flags
,
3354 FOLL_PIN
| FOLL_TOUCH
| FOLL_REMOTE
))
3356 return __gup_longterm_locked(mm
, start
, nr_pages
, pages
,
3357 locked
? locked
: &local_locked
,
3360 EXPORT_SYMBOL(pin_user_pages_remote
);
3363 * pin_user_pages() - pin user pages in memory for use by other devices
3365 * @start: starting user address
3366 * @nr_pages: number of pages from start to pin
3367 * @gup_flags: flags modifying lookup behaviour
3368 * @pages: array that receives pointers to the pages pinned.
3369 * Should be at least nr_pages long.
3371 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3374 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3375 * see Documentation/core-api/pin_user_pages.rst for details.
3377 * Note that if a zero_page is amongst the returned pages, it will not have
3378 * pins in it and unpin_user_page*() will not remove pins from it.
3380 long pin_user_pages(unsigned long start
, unsigned long nr_pages
,
3381 unsigned int gup_flags
, struct page
**pages
)
3385 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
, FOLL_PIN
))
3387 return __gup_longterm_locked(current
->mm
, start
, nr_pages
,
3388 pages
, &locked
, gup_flags
);
3390 EXPORT_SYMBOL(pin_user_pages
);
3393 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3394 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3395 * FOLL_PIN and rejects FOLL_GET.
3397 * Note that if a zero_page is amongst the returned pages, it will not have
3398 * pins in it and unpin_user_page*() will not remove pins from it.
3400 long pin_user_pages_unlocked(unsigned long start
, unsigned long nr_pages
,
3401 struct page
**pages
, unsigned int gup_flags
)
3405 if (!is_valid_gup_args(pages
, NULL
, &gup_flags
,
3406 FOLL_PIN
| FOLL_TOUCH
| FOLL_UNLOCKABLE
))
3409 return __gup_longterm_locked(current
->mm
, start
, nr_pages
, pages
,
3410 &locked
, gup_flags
);
3412 EXPORT_SYMBOL(pin_user_pages_unlocked
);