1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015 Red Hat, Inc.
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
22 static __always_inline
23 struct vm_area_struct
*find_dst_vma(struct mm_struct
*dst_mm
,
24 unsigned long dst_start
,
28 * Make sure that the dst range is both valid and fully within a
29 * single existing vma.
31 struct vm_area_struct
*dst_vma
;
33 dst_vma
= find_vma(dst_mm
, dst_start
);
37 if (dst_start
< dst_vma
->vm_start
||
38 dst_start
+ len
> dst_vma
->vm_end
)
42 * Check the vma is registered in uffd, this is required to
43 * enforce the VM_MAYWRITE check done at uffd registration
46 if (!dst_vma
->vm_userfaultfd_ctx
.ctx
)
53 * Install PTEs, to map dst_addr (within dst_vma) to page.
55 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
56 * and anon, and for both shared and private VMAs.
58 int mfill_atomic_install_pte(struct mm_struct
*dst_mm
, pmd_t
*dst_pmd
,
59 struct vm_area_struct
*dst_vma
,
60 unsigned long dst_addr
, struct page
*page
,
61 bool newly_allocated
, bool wp_copy
)
64 pte_t _dst_pte
, *dst_pte
;
65 bool writable
= dst_vma
->vm_flags
& VM_WRITE
;
66 bool vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
67 bool page_in_cache
= page
->mapping
;
70 pgoff_t offset
, max_off
;
72 _dst_pte
= mk_pte(page
, dst_vma
->vm_page_prot
);
73 _dst_pte
= pte_mkdirty(_dst_pte
);
74 if (page_in_cache
&& !vm_shared
)
78 * Always mark a PTE as write-protected when needed, regardless of
79 * VM_WRITE, which the user might change.
82 _dst_pte
= pte_mkuffd_wp(_dst_pte
);
87 _dst_pte
= pte_mkwrite(_dst_pte
);
90 * We need this to make sure write bit removed; as mk_pte()
91 * could return a pte with write bit set.
93 _dst_pte
= pte_wrprotect(_dst_pte
);
95 dst_pte
= pte_offset_map_lock(dst_mm
, dst_pmd
, dst_addr
, &ptl
);
97 if (vma_is_shmem(dst_vma
)) {
98 /* serialize against truncate with the page table lock */
99 inode
= dst_vma
->vm_file
->f_inode
;
100 offset
= linear_page_index(dst_vma
, dst_addr
);
101 max_off
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
103 if (unlikely(offset
>= max_off
))
109 * We allow to overwrite a pte marker: consider when both MISSING|WP
110 * registered, we firstly wr-protect a none pte which has no page cache
111 * page backing it, then access the page.
113 if (!pte_none_mostly(*dst_pte
))
117 /* Usually, cache pages are already added to LRU */
120 page_add_file_rmap(page
, dst_vma
, false);
122 page_add_new_anon_rmap(page
, dst_vma
, dst_addr
);
123 lru_cache_add_inactive_or_unevictable(page
, dst_vma
);
127 * Must happen after rmap, as mm_counter() checks mapping (via
128 * PageAnon()), which is set by __page_set_anon_rmap().
130 inc_mm_counter(dst_mm
, mm_counter(page
));
132 set_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
134 /* No need to invalidate - it was non-present before */
135 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
138 pte_unmap_unlock(dst_pte
, ptl
);
142 static int mcopy_atomic_pte(struct mm_struct
*dst_mm
,
144 struct vm_area_struct
*dst_vma
,
145 unsigned long dst_addr
,
146 unsigned long src_addr
,
156 page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, dst_vma
, dst_addr
);
160 page_kaddr
= kmap_atomic(page
);
161 ret
= copy_from_user(page_kaddr
,
162 (const void __user
*) src_addr
,
164 kunmap_atomic(page_kaddr
);
166 /* fallback to copy_from_user outside mmap_lock */
170 /* don't free the page */
174 flush_dcache_page(page
);
181 * The memory barrier inside __SetPageUptodate makes sure that
182 * preceding stores to the page contents become visible before
183 * the set_pte_at() write.
185 __SetPageUptodate(page
);
188 if (mem_cgroup_charge(page_folio(page
), dst_mm
, GFP_KERNEL
))
191 ret
= mfill_atomic_install_pte(dst_mm
, dst_pmd
, dst_vma
, dst_addr
,
192 page
, true, wp_copy
);
202 static int mfill_zeropage_pte(struct mm_struct
*dst_mm
,
204 struct vm_area_struct
*dst_vma
,
205 unsigned long dst_addr
)
207 pte_t _dst_pte
, *dst_pte
;
210 pgoff_t offset
, max_off
;
213 _dst_pte
= pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr
),
214 dst_vma
->vm_page_prot
));
215 dst_pte
= pte_offset_map_lock(dst_mm
, dst_pmd
, dst_addr
, &ptl
);
216 if (dst_vma
->vm_file
) {
217 /* the shmem MAP_PRIVATE case requires checking the i_size */
218 inode
= dst_vma
->vm_file
->f_inode
;
219 offset
= linear_page_index(dst_vma
, dst_addr
);
220 max_off
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
222 if (unlikely(offset
>= max_off
))
226 if (!pte_none(*dst_pte
))
228 set_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
229 /* No need to invalidate - it was non-present before */
230 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
233 pte_unmap_unlock(dst_pte
, ptl
);
237 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
238 static int mcontinue_atomic_pte(struct mm_struct
*dst_mm
,
240 struct vm_area_struct
*dst_vma
,
241 unsigned long dst_addr
,
244 struct inode
*inode
= file_inode(dst_vma
->vm_file
);
245 pgoff_t pgoff
= linear_page_index(dst_vma
, dst_addr
);
249 ret
= shmem_getpage(inode
, pgoff
, &page
, SGP_READ
);
257 if (PageHWPoison(page
)) {
262 ret
= mfill_atomic_install_pte(dst_mm
, dst_pmd
, dst_vma
, dst_addr
,
263 page
, false, wp_copy
);
277 static pmd_t
*mm_alloc_pmd(struct mm_struct
*mm
, unsigned long address
)
283 pgd
= pgd_offset(mm
, address
);
284 p4d
= p4d_alloc(mm
, pgd
, address
);
287 pud
= pud_alloc(mm
, p4d
, address
);
291 * Note that we didn't run this because the pmd was
292 * missing, the *pmd may be already established and in
293 * turn it may also be a trans_huge_pmd.
295 return pmd_alloc(mm
, pud
, address
);
298 #ifdef CONFIG_HUGETLB_PAGE
300 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
301 * called with mmap_lock held, it will release mmap_lock before returning.
303 static __always_inline ssize_t
__mcopy_atomic_hugetlb(struct mm_struct
*dst_mm
,
304 struct vm_area_struct
*dst_vma
,
305 unsigned long dst_start
,
306 unsigned long src_start
,
308 enum mcopy_atomic_mode mode
,
311 int vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
314 unsigned long src_addr
, dst_addr
;
317 unsigned long vma_hpagesize
;
320 struct address_space
*mapping
;
323 * There is no default zero huge page for all huge page sizes as
324 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
325 * by THP. Since we can not reliably insert a zero page, this
326 * feature is not supported.
328 if (mode
== MCOPY_ATOMIC_ZEROPAGE
) {
329 mmap_read_unlock(dst_mm
);
333 src_addr
= src_start
;
334 dst_addr
= dst_start
;
337 vma_hpagesize
= vma_kernel_pagesize(dst_vma
);
340 * Validate alignment based on huge page size
343 if (dst_start
& (vma_hpagesize
- 1) || len
& (vma_hpagesize
- 1))
348 * On routine entry dst_vma is set. If we had to drop mmap_lock and
349 * retry, dst_vma will be set to NULL and we must lookup again.
353 dst_vma
= find_dst_vma(dst_mm
, dst_start
, len
);
354 if (!dst_vma
|| !is_vm_hugetlb_page(dst_vma
))
358 if (vma_hpagesize
!= vma_kernel_pagesize(dst_vma
))
361 vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
365 * If not shared, ensure the dst_vma has a anon_vma.
369 if (unlikely(anon_vma_prepare(dst_vma
)))
373 while (src_addr
< src_start
+ len
) {
374 BUG_ON(dst_addr
>= dst_start
+ len
);
377 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
378 * i_mmap_rwsem ensures the dst_pte remains valid even
379 * in the case of shared pmds. fault mutex prevents
380 * races with other faulting threads.
382 mapping
= dst_vma
->vm_file
->f_mapping
;
383 i_mmap_lock_read(mapping
);
384 idx
= linear_page_index(dst_vma
, dst_addr
);
385 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
386 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
389 dst_pte
= huge_pte_alloc(dst_mm
, dst_vma
, dst_addr
, vma_hpagesize
);
391 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
392 i_mmap_unlock_read(mapping
);
396 if (mode
!= MCOPY_ATOMIC_CONTINUE
&&
397 !huge_pte_none_mostly(huge_ptep_get(dst_pte
))) {
399 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
400 i_mmap_unlock_read(mapping
);
404 err
= hugetlb_mcopy_atomic_pte(dst_mm
, dst_pte
, dst_vma
,
405 dst_addr
, src_addr
, mode
, &page
,
408 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
409 i_mmap_unlock_read(mapping
);
413 if (unlikely(err
== -ENOENT
)) {
414 mmap_read_unlock(dst_mm
);
417 err
= copy_huge_page_from_user(page
,
418 (const void __user
*)src_addr
,
419 vma_hpagesize
/ PAGE_SIZE
,
425 mmap_read_lock(dst_mm
);
433 dst_addr
+= vma_hpagesize
;
434 src_addr
+= vma_hpagesize
;
435 copied
+= vma_hpagesize
;
437 if (fatal_signal_pending(current
))
445 mmap_read_unlock(dst_mm
);
451 BUG_ON(!copied
&& !err
);
452 return copied
? copied
: err
;
454 #else /* !CONFIG_HUGETLB_PAGE */
455 /* fail at build time if gcc attempts to use this */
456 extern ssize_t
__mcopy_atomic_hugetlb(struct mm_struct
*dst_mm
,
457 struct vm_area_struct
*dst_vma
,
458 unsigned long dst_start
,
459 unsigned long src_start
,
461 enum mcopy_atomic_mode mode
,
463 #endif /* CONFIG_HUGETLB_PAGE */
465 static __always_inline ssize_t
mfill_atomic_pte(struct mm_struct
*dst_mm
,
467 struct vm_area_struct
*dst_vma
,
468 unsigned long dst_addr
,
469 unsigned long src_addr
,
471 enum mcopy_atomic_mode mode
,
476 if (mode
== MCOPY_ATOMIC_CONTINUE
) {
477 return mcontinue_atomic_pte(dst_mm
, dst_pmd
, dst_vma
, dst_addr
,
482 * The normal page fault path for a shmem will invoke the
483 * fault, fill the hole in the file and COW it right away. The
484 * result generates plain anonymous memory. So when we are
485 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
486 * generate anonymous memory directly without actually filling
487 * the hole. For the MAP_PRIVATE case the robustness check
488 * only happens in the pagetable (to verify it's still none)
489 * and not in the radix tree.
491 if (!(dst_vma
->vm_flags
& VM_SHARED
)) {
492 if (mode
== MCOPY_ATOMIC_NORMAL
)
493 err
= mcopy_atomic_pte(dst_mm
, dst_pmd
, dst_vma
,
494 dst_addr
, src_addr
, page
,
497 err
= mfill_zeropage_pte(dst_mm
, dst_pmd
,
500 err
= shmem_mfill_atomic_pte(dst_mm
, dst_pmd
, dst_vma
,
502 mode
!= MCOPY_ATOMIC_NORMAL
,
509 static __always_inline ssize_t
__mcopy_atomic(struct mm_struct
*dst_mm
,
510 unsigned long dst_start
,
511 unsigned long src_start
,
513 enum mcopy_atomic_mode mcopy_mode
,
514 atomic_t
*mmap_changing
,
517 struct vm_area_struct
*dst_vma
;
520 unsigned long src_addr
, dst_addr
;
526 * Sanitize the command parameters:
528 BUG_ON(dst_start
& ~PAGE_MASK
);
529 BUG_ON(len
& ~PAGE_MASK
);
531 /* Does the address range wrap, or is the span zero-sized? */
532 BUG_ON(src_start
+ len
<= src_start
);
533 BUG_ON(dst_start
+ len
<= dst_start
);
535 src_addr
= src_start
;
536 dst_addr
= dst_start
;
540 mmap_read_lock(dst_mm
);
543 * If memory mappings are changing because of non-cooperative
544 * operation (e.g. mremap) running in parallel, bail out and
545 * request the user to retry later
548 if (mmap_changing
&& atomic_read(mmap_changing
))
552 * Make sure the vma is not shared, that the dst range is
553 * both valid and fully within a single existing vma.
556 dst_vma
= find_dst_vma(dst_mm
, dst_start
, len
);
562 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
563 * it will overwrite vm_ops, so vma_is_anonymous must return false.
565 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma
) &&
566 dst_vma
->vm_flags
& VM_SHARED
))
570 * validate 'mode' now that we know the dst_vma: don't allow
571 * a wrprotect copy if the userfaultfd didn't register as WP.
573 wp_copy
= mode
& UFFDIO_COPY_MODE_WP
;
574 if (wp_copy
&& !(dst_vma
->vm_flags
& VM_UFFD_WP
))
578 * If this is a HUGETLB vma, pass off to appropriate routine
580 if (is_vm_hugetlb_page(dst_vma
))
581 return __mcopy_atomic_hugetlb(dst_mm
, dst_vma
, dst_start
,
582 src_start
, len
, mcopy_mode
,
585 if (!vma_is_anonymous(dst_vma
) && !vma_is_shmem(dst_vma
))
587 if (!vma_is_shmem(dst_vma
) && mcopy_mode
== MCOPY_ATOMIC_CONTINUE
)
591 * Ensure the dst_vma has a anon_vma or this page
592 * would get a NULL anon_vma when moved in the
596 if (!(dst_vma
->vm_flags
& VM_SHARED
) &&
597 unlikely(anon_vma_prepare(dst_vma
)))
600 while (src_addr
< src_start
+ len
) {
603 BUG_ON(dst_addr
>= dst_start
+ len
);
605 dst_pmd
= mm_alloc_pmd(dst_mm
, dst_addr
);
606 if (unlikely(!dst_pmd
)) {
611 dst_pmdval
= pmd_read_atomic(dst_pmd
);
613 * If the dst_pmd is mapped as THP don't
614 * override it and just be strict.
616 if (unlikely(pmd_trans_huge(dst_pmdval
))) {
620 if (unlikely(pmd_none(dst_pmdval
)) &&
621 unlikely(__pte_alloc(dst_mm
, dst_pmd
))) {
625 /* If an huge pmd materialized from under us fail */
626 if (unlikely(pmd_trans_huge(*dst_pmd
))) {
631 BUG_ON(pmd_none(*dst_pmd
));
632 BUG_ON(pmd_trans_huge(*dst_pmd
));
634 err
= mfill_atomic_pte(dst_mm
, dst_pmd
, dst_vma
, dst_addr
,
635 src_addr
, &page
, mcopy_mode
, wp_copy
);
638 if (unlikely(err
== -ENOENT
)) {
641 mmap_read_unlock(dst_mm
);
644 page_kaddr
= kmap(page
);
645 err
= copy_from_user(page_kaddr
,
646 (const void __user
*) src_addr
,
653 flush_dcache_page(page
);
659 dst_addr
+= PAGE_SIZE
;
660 src_addr
+= PAGE_SIZE
;
663 if (fatal_signal_pending(current
))
671 mmap_read_unlock(dst_mm
);
677 BUG_ON(!copied
&& !err
);
678 return copied
? copied
: err
;
681 ssize_t
mcopy_atomic(struct mm_struct
*dst_mm
, unsigned long dst_start
,
682 unsigned long src_start
, unsigned long len
,
683 atomic_t
*mmap_changing
, __u64 mode
)
685 return __mcopy_atomic(dst_mm
, dst_start
, src_start
, len
,
686 MCOPY_ATOMIC_NORMAL
, mmap_changing
, mode
);
689 ssize_t
mfill_zeropage(struct mm_struct
*dst_mm
, unsigned long start
,
690 unsigned long len
, atomic_t
*mmap_changing
)
692 return __mcopy_atomic(dst_mm
, start
, 0, len
, MCOPY_ATOMIC_ZEROPAGE
,
696 ssize_t
mcopy_continue(struct mm_struct
*dst_mm
, unsigned long start
,
697 unsigned long len
, atomic_t
*mmap_changing
)
699 return __mcopy_atomic(dst_mm
, start
, 0, len
, MCOPY_ATOMIC_CONTINUE
,
703 int mwriteprotect_range(struct mm_struct
*dst_mm
, unsigned long start
,
704 unsigned long len
, bool enable_wp
,
705 atomic_t
*mmap_changing
)
707 struct vm_area_struct
*dst_vma
;
708 unsigned long page_mask
;
709 struct mmu_gather tlb
;
714 * Sanitize the command parameters:
716 BUG_ON(start
& ~PAGE_MASK
);
717 BUG_ON(len
& ~PAGE_MASK
);
719 /* Does the address range wrap, or is the span zero-sized? */
720 BUG_ON(start
+ len
<= start
);
722 mmap_read_lock(dst_mm
);
725 * If memory mappings are changing because of non-cooperative
726 * operation (e.g. mremap) running in parallel, bail out and
727 * request the user to retry later
730 if (mmap_changing
&& atomic_read(mmap_changing
))
734 dst_vma
= find_dst_vma(dst_mm
, start
, len
);
738 if (!userfaultfd_wp(dst_vma
))
740 if (!vma_can_userfault(dst_vma
, dst_vma
->vm_flags
))
743 if (is_vm_hugetlb_page(dst_vma
)) {
745 page_mask
= vma_kernel_pagesize(dst_vma
) - 1;
746 if ((start
& page_mask
) || (len
& page_mask
))
751 newprot
= vm_get_page_prot(dst_vma
->vm_flags
& ~(VM_WRITE
));
753 newprot
= vm_get_page_prot(dst_vma
->vm_flags
);
755 tlb_gather_mmu(&tlb
, dst_mm
);
756 change_protection(&tlb
, dst_vma
, start
, start
+ len
, newprot
,
757 enable_wp
? MM_CP_UFFD_WP
: MM_CP_UFFD_WP_RESOLVE
);
758 tlb_finish_mmu(&tlb
);
762 mmap_read_unlock(dst_mm
);