]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
userfaultfd: introduce vm_uffd_ops
authorMike Rapoport (Microsoft) <rppt@kernel.org>
Thu, 2 Apr 2026 04:11:48 +0000 (07:11 +0300)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 18 Apr 2026 07:10:53 +0000 (00:10 -0700)
Current userfaultfd implementation works only with memory managed by core
MM: anonymous, shmem and hugetlb.

First, there is no fundamental reason to limit userfaultfd support only to
the core memory types and userfaults can be handled similarly to regular
page faults provided a VMA owner implements appropriate callbacks.

Second, historically various code paths were conditioned on
vma_is_anonymous(), vma_is_shmem() and is_vm_hugetlb_page() and some of
these conditions can be expressed as operations implemented by a
particular memory type.

Introduce vm_uffd_ops extension to vm_operations_struct that will delegate
memory type specific operations to a VMA owner.

Operations for anonymous memory are handled internally in userfaultfd
using anon_uffd_ops that implicitly assigned to anonymous VMAs.

Start with a single operation, ->can_userfault() that will verify that a
VMA meets requirements for userfaultfd support at registration time.

Implement that method for anonymous, shmem and hugetlb and move relevant
parts of vma_can_userfault() into the new callbacks.

[rppt@kernel.org: relocate VM_DROPPABLE test, per Tal]
Link: https://lore.kernel.org/adffgfM5ANxtPIEF@kernel.org
Link: https://lore.kernel.org/20260402041156.1377214-8-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrei Vagin <avagin@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Harry Yoo <harry.yoo@oracle.com>
Cc: Harry Yoo (Oracle) <harry@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nikita Kalyazin <kalyazin@amazon.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Carlier <devnexen@gmail.com>
Cc: Tal Zussman <tz2294@columbia.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
include/linux/userfaultfd_k.h
mm/hugetlb.c
mm/shmem.c
mm/userfaultfd.c

index 8260e28205e905b74821bb299203b4a4d4f1754b..633bbf9a184a65b002e7728597bf39046e03ddb2 100644 (file)
@@ -758,6 +758,8 @@ struct vm_fault {
                                         */
 };
 
+struct vm_uffd_ops;
+
 /*
  * These are the virtual MM functions - opening of an area, closing and
  * unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -865,6 +867,9 @@ struct vm_operations_struct {
        struct page *(*find_normal_page)(struct vm_area_struct *vma,
                                         unsigned long addr);
 #endif /* CONFIG_FIND_NORMAL_PAGE */
+#ifdef CONFIG_USERFAULTFD
+       const struct vm_uffd_ops *uffd_ops;
+#endif
 };
 
 #ifdef CONFIG_NUMA_BALANCING
index ce0201c3dd822416e5837d899cfc139255a92503..6d445dbfe8ffd97526bc41cd2d59e06d2d9680f9 100644 (file)
@@ -83,6 +83,12 @@ struct userfaultfd_ctx {
 
 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
 
+/* VMA userfaultfd operations */
+struct vm_uffd_ops {
+       /* Checks if a VMA can support userfaultfd */
+       bool (*can_userfault)(struct vm_area_struct *vma, vm_flags_t vm_flags);
+};
+
 /* A combined operation mode + behavior flags. */
 typedef unsigned int __bitwise uffd_flags_t;
 
index a786034ac95cbddaba370624e47c33b45607fe68..88009cd2a846c19d5ae56671ee13389e9be9c3b8 100644 (file)
@@ -4792,6 +4792,18 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
        return 0;
 }
 
+#ifdef CONFIG_USERFAULTFD
+static bool hugetlb_can_userfault(struct vm_area_struct *vma,
+                                 vm_flags_t vm_flags)
+{
+       return true;
+}
+
+static const struct vm_uffd_ops hugetlb_uffd_ops = {
+       .can_userfault = hugetlb_can_userfault,
+};
+#endif
+
 /*
  * When a new function is introduced to vm_operations_struct and added
  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
@@ -4805,6 +4817,9 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .close = hugetlb_vm_op_close,
        .may_split = hugetlb_vm_op_split,
        .pagesize = hugetlb_vm_op_pagesize,
+#ifdef CONFIG_USERFAULTFD
+       .uffd_ops = &hugetlb_uffd_ops,
+#endif
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
index 6fa1e8340c93fbc72ab1946cae5b4c675bc01905..389b2d76396e5025e2bf9be269f214c5bab96369 100644 (file)
@@ -3288,6 +3288,15 @@ out_unacct_blocks:
        shmem_inode_unacct_blocks(inode, 1);
        return ret;
 }
+
+static bool shmem_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+       return true;
+}
+
+static const struct vm_uffd_ops shmem_uffd_ops = {
+       .can_userfault  = shmem_can_userfault,
+};
 #endif /* CONFIG_USERFAULTFD */
 
 #ifdef CONFIG_TMPFS
@@ -5307,6 +5316,9 @@ static const struct vm_operations_struct shmem_vm_ops = {
        .set_policy     = shmem_set_policy,
        .get_policy     = shmem_get_policy,
 #endif
+#ifdef CONFIG_USERFAULTFD
+       .uffd_ops       = &shmem_uffd_ops,
+#endif
 };
 
 static const struct vm_operations_struct shmem_anon_vm_ops = {
@@ -5316,6 +5328,9 @@ static const struct vm_operations_struct shmem_anon_vm_ops = {
        .set_policy     = shmem_set_policy,
        .get_policy     = shmem_get_policy,
 #endif
+#ifdef CONFIG_USERFAULTFD
+       .uffd_ops       = &shmem_uffd_ops,
+#endif
 };
 
 int shmem_init_fs_context(struct fs_context *fc)
index ebdc6e24a2c70b40e02ffa98f950e6a8d633c30c..3a824e034a099df2a66f05c3fa10519a372e5a92 100644 (file)
@@ -34,6 +34,25 @@ struct mfill_state {
        pmd_t *pmd;
 };
 
+static bool anon_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags)
+{
+       /* anonymous memory does not support MINOR mode */
+       if (vm_flags & VM_UFFD_MINOR)
+               return false;
+       return true;
+}
+
+static const struct vm_uffd_ops anon_uffd_ops = {
+       .can_userfault  = anon_can_userfault,
+};
+
+static const struct vm_uffd_ops *vma_uffd_ops(struct vm_area_struct *vma)
+{
+       if (vma_is_anonymous(vma))
+               return &anon_uffd_ops;
+       return vma->vm_ops ? vma->vm_ops->uffd_ops : NULL;
+}
+
 static __always_inline
 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
 {
@@ -2021,34 +2040,33 @@ out:
 bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags,
                       bool wp_async)
 {
-       vm_flags &= __VM_UFFD_FLAGS;
+       const struct vm_uffd_ops *ops = vma_uffd_ops(vma);
 
        if (vma->vm_flags & VM_DROPPABLE)
                return false;
 
-       if ((vm_flags & VM_UFFD_MINOR) &&
-           (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
-               return false;
+       vm_flags &= __VM_UFFD_FLAGS;
 
        /*
-        * If wp async enabled, and WP is the only mode enabled, allow any
+        * If WP is the only mode enabled and context is wp async, allow any
         * memory type.
         */
        if (wp_async && (vm_flags == VM_UFFD_WP))
                return true;
 
+       /* For any other mode reject VMAs that don't implement vm_uffd_ops */
+       if (!ops)
+               return false;
+
        /*
         * If user requested uffd-wp but not enabled pte markers for
-        * uffd-wp, then shmem & hugetlbfs are not supported but only
-        * anonymous.
+        * uffd-wp, then only anonymous memory is supported
         */
        if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
            !vma_is_anonymous(vma))
                return false;
 
-       /* By default, allow any of anon|shmem|hugetlb */
-       return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
-           vma_is_shmem(vma);
+       return ops->can_userfault(vma, vm_flags);
 }
 
 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,