]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
userfaultfd: introduce vm_uffd_ops->alloc_folio()
authorMike Rapoport (Microsoft) <rppt@kernel.org>
Thu, 2 Apr 2026 04:11:50 +0000 (07:11 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 18 Apr 2026 07:10:54 +0000 (00:10 -0700)
and use it to refactor mfill_atomic_pte_zeroed_folio() and
mfill_atomic_pte_copy().

mfill_atomic_pte_zeroed_folio() and mfill_atomic_pte_copy() perform
almost identical actions:
* allocate a folio
* update folio contents (either copy from userspace of fill with zeros)
* update page tables with the new folio

Split a __mfill_atomic_pte() helper that handles both cases and uses newly
introduced vm_uffd_ops->alloc_folio() to allocate the folio.

Pass the ops structure from the callers to __mfill_atomic_pte() to later
allow using anon_uffd_ops for MAP_PRIVATE mappings of file-backed VMAs.

Note, that the new ops method is called alloc_folio() rather than
folio_alloc() to avoid clash with alloc_tag macro folio_alloc().

Link: https://lore.kernel.org/20260402041156.1377214-10-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: James Houghton <jthoughton@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrei Vagin <avagin@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Harry Yoo (Oracle) <harry@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nikita Kalyazin <kalyazin@amazon.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Carlier <devnexen@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index 4bda632dae8847b79f7b0ed2a4392493d210b5e2..0f508c752741582740a308e9fb9a2fc400e90624 100644 (file)
@@ -94,6 +94,12 @@ struct vm_uffd_ops {
         * The returned folio is locked and with reference held.
         */
        struct folio *(*get_folio_noalloc)(struct inode *inode, pgoff_t pgoff);
+       /*
+        * Called during resolution of UFFDIO_COPY request.
+        * Should allocate and return a folio or NULL if allocation fails.
+        */
+       struct folio *(*alloc_folio)(struct vm_area_struct *vma,
+                                    unsigned long addr);
 };
 
 /* A combined operation mode + behavior flags. */
index 5b204c3ec986cc3120b7324502b9079a7896cf22..dd191703b32065e4dc89b94c1731135a985e29dc 100644 (file)
@@ -42,8 +42,26 @@ static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
        return true;
 }
 
+static struct folio *anon_alloc_folio(struct vm_area_struct *vma,
+                                     unsigned long addr)
+{
+       struct folio *folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
+                                             addr);
+
+       if (!folio)
+               return NULL;
+
+       if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) {
+               folio_put(folio);
+               return NULL;
+       }
+
+       return folio;
+}
+
 static const struct vm_uffd_ops anon_uffd_ops = {
        .can_userfault  = anon_can_userfault,
+       .alloc_folio    = anon_alloc_folio,
 };
 
 static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
@@ -456,7 +474,8 @@ static int mfill_copy_folio_retry(struct mfill_state *state, struct folio *folio
        return 0;
 }
 
-static int mfill_atomic_pte_copy(struct mfill_state *state)
+static int __mfill_atomic_pte(struct mfill_state *state,
+                             const struct vm_uffd_ops *ops)
 {
        unsigned long dst_addr = state->dst_addr;
        unsigned long src_addr = state->src_addr;
@@ -464,16 +483,12 @@ static int mfill_atomic_pte_copy(struct mfill_state *state)
        struct folio *folio;
        int ret;
 
-       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, state->vma, dst_addr);
+       folio = ops->alloc_folio(state->vma, state->dst_addr);
        if (!folio)
                return -ENOMEM;
 
-       ret = -ENOMEM;
-       if (mem_cgroup_charge(folio, state->vma->vm_mm, GFP_KERNEL))
-               goto out_release;
-
-       ret = mfill_copy_folio_locked(folio, src_addr);
-       if (unlikely(ret)) {
+       if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+               ret = mfill_copy_folio_locked(folio, src_addr);
                /*
                 * Fallback to copy_from_user outside mmap_lock.
                 * If retry is successful, mfill_copy_folio_locked() returns
@@ -481,9 +496,15 @@ static int mfill_atomic_pte_copy(struct mfill_state *state)
                 * If there was an error, we must mfill_put_vma() anyway and it
                 * will take care of unlocking if needed.
                 */
-               ret = mfill_copy_folio_retry(state, folio);
-               if (ret)
-                       goto out_release;
+               if (unlikely(ret)) {
+                       ret = mfill_copy_folio_retry(state, folio);
+                       if (ret)
+                               goto err_folio_put;
+               }
+       } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
+               clear_user_highpage(&folio->page, state->dst_addr);
+       } else {
+               VM_WARN_ONCE(1, "Unknown UFFDIO operation, flags: %x", flags);
        }
 
        /*
@@ -496,47 +517,30 @@ static int mfill_atomic_pte_copy(struct mfill_state *state)
        ret = mfill_atomic_install_pte(state->pmd, state->vma, dst_addr,
                                       &folio->page, true, flags);
        if (ret)
-               goto out_release;
-out:
-       return ret;
-out_release:
+               goto err_folio_put;
+
+       return 0;
+
+err_folio_put:
+       folio_put(folio);
        /* Don't return -ENOENT so that our caller won't retry */
        if (ret == -ENOENT)
                ret = -EFAULT;
-       folio_put(folio);
-       goto out;
+       return ret;
 }
 
-static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
-                                        struct vm_area_struct *dst_vma,
-                                        unsigned long dst_addr)
+static int mfill_atomic_pte_copy(struct mfill_state *state)
 {
-       struct folio *folio;
-       int ret = -ENOMEM;
-
-       folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
-       if (!folio)
-               return ret;
-
-       if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
-               goto out_put;
+       const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
 
-       /*
-        * The memory barrier inside __folio_mark_uptodate makes sure that
-        * zeroing out the folio become visible before mapping the page
-        * using set_pte_at(). See do_anonymous_page().
-        */
-       __folio_mark_uptodate(folio);
+       return __mfill_atomic_pte(state, ops);
+}
 
-       ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
-                                      &folio->page, true, 0);
-       if (ret)
-               goto out_put;
+static int mfill_atomic_pte_zeroed_folio(struct mfill_state *state)
+{
+       const struct vm_uffd_ops *ops = vma_uffd_ops(state->vma);
 
-       return 0;
-out_put:
-       folio_put(folio);
-       return ret;
+       return __mfill_atomic_pte(state, ops);
 }
 
 static int mfill_atomic_pte_zeropage(struct mfill_state *state)
@@ -549,7 +553,7 @@ static int mfill_atomic_pte_zeropage(struct mfill_state *state)
        int ret;
 
        if (mm_forbids_zeropage(dst_vma->vm_mm))
-               return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
+               return mfill_atomic_pte_zeroed_folio(state);
 
        _dst_pte = pte_mkspecial(pfn_pte(zero_pfn(dst_addr),
                                         dst_vma->vm_page_prot));