]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: have mmap_action_complete() handle the rmap lock and unmap
authorLorenzo Stoakes (Oracle) <ljs@kernel.org>
Fri, 20 Mar 2026 22:39:33 +0000 (22:39 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 5 Apr 2026 20:53:42 +0000 (13:53 -0700)
Rather than have the callers handle this both the rmap lock release and
unmapping the VMA on error, handle it within the mmap_action_complete()
logic where it makes sense to, being careful not to unlock twice.

This simplifies the logic and makes it harder to make mistake with this,
while retaining correct behaviour with regard to avoiding deadlocks.

Also replace the call_action_complete() function with a direct invocation
of mmap_action_complete() as the abstraction is no longer required.

Also update the VMA tests to reflect this change.

Link: https://lkml.kernel.org/r/8d1ee8ebd3542d006a47e8382fb80cf5b57ecf10.1774045440.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Bodo Stroesser <bostroesser@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Clemens Ladisch <clemens@ladisch.de>
Cc: David Hildenbrand <david@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Dexuan Cui <decui@microsoft.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Long Li <longli@microsoft.com>
Cc: Marc Dionne <marc.dionne@auristor.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Richard Weinberger <richard@nod.at>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vignesh Raghavendra <vigneshr@ti.com>
Cc: Wei Liu <wei.liu@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/util.c
mm/vma.c
tools/testing/vma/include/dup.h

index 4dddd89153d4f3dbbf519addf4a4d63661ad8a45..241510e21f4bc813ca94b3269c23364c583289c2 100644 (file)
@@ -1863,6 +1863,25 @@ static inline int io_remap_pfn_range_prepare(struct vm_area_desc *desc)
        return 0;
 }
 
+/*
+ * When we succeed an mmap action or just before we unmap a VMA on error, we
+ * need to ensure any rmap lock held is released. On unmap it's required to
+ * avoid a deadlock.
+ */
+static inline void maybe_rmap_unlock_action(struct vm_area_struct *vma,
+               struct mmap_action *action)
+{
+       struct file *file;
+
+       if (!action->hide_from_rmap_until_complete)
+               return;
+
+       VM_WARN_ON_ONCE(vma_is_anonymous(vma));
+       file = vma->vm_file;
+       i_mmap_unlock_write(file->f_mapping);
+       action->hide_from_rmap_until_complete = false;
+}
+
 #ifdef CONFIG_MMU_NOTIFIER
 static inline bool clear_flush_young_ptes_notify(struct vm_area_struct *vma,
                unsigned long addr, pte_t *ptep, unsigned int nr)
index 54eab29adb56f4792d68d532868532b030b595da..e272efca8c0e971e407f7f67478f29d935678f0e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -1219,13 +1219,7 @@ int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
        action->hide_from_rmap_until_complete = false;
 
        set_vma_from_desc(vma, &desc);
-       err = mmap_action_complete(vma, action);
-       if (err) {
-               const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
-               do_munmap(current->mm, vma->vm_start, len, NULL);
-       }
-       return err;
+       return mmap_action_complete(vma, action);
 }
 EXPORT_SYMBOL(compat_vma_mmap);
 
@@ -1320,26 +1314,30 @@ again:
 static int mmap_action_finish(struct vm_area_struct *vma,
                              struct mmap_action *action, int err)
 {
+       size_t len;
+
+       if (!err && action->success_hook)
+               err = action->success_hook(vma);
+
+       /* do_munmap() might take rmap lock, so release if held. */
+       maybe_rmap_unlock_action(vma, action);
+       if (!err)
+               return 0;
+
        /*
         * If an error occurs, unmap the VMA altogether and return an error. We
         * only clear the newly allocated VMA, since this function is only
         * invoked if we do NOT merge, so we only clean up the VMA we created.
         */
-       if (err) {
-               if (action->error_hook) {
-                       /* We may want to filter the error. */
-                       err = action->error_hook(err);
-
-                       /* The caller should not clear the error. */
-                       VM_WARN_ON_ONCE(!err);
-               }
-               return err;
+       len = vma_pages(vma) << PAGE_SHIFT;
+       do_munmap(current->mm, vma->vm_start, len, NULL);
+       if (action->error_hook) {
+               /* We may want to filter the error. */
+               err = action->error_hook(err);
+               /* The caller should not clear the error. */
+               VM_WARN_ON_ONCE(!err);
        }
-
-       if (action->success_hook)
-               return action->success_hook(vma);
-
-       return 0;
+       return err;
 }
 
 #ifdef CONFIG_MMU
@@ -1377,7 +1375,6 @@ EXPORT_SYMBOL(mmap_action_prepare);
  */
 int mmap_action_complete(struct vm_area_struct *vma,
                         struct mmap_action *action)
-
 {
        int err = 0;
 
index 8ad24be1654e582469ce3b9fb4a7741f7b92496a..e1950ae048e22f7528019505544383c81891ca99 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -2729,30 +2729,6 @@ static bool can_set_ksm_flags_early(struct mmap_state *map)
        return false;
 }
 
-static int call_action_complete(struct mmap_state *map,
-                               struct mmap_action *action,
-                               struct vm_area_struct *vma)
-{
-       int err;
-
-       err = mmap_action_complete(vma, action);
-
-       /* If we held the file rmap we need to release it. */
-       if (action->hide_from_rmap_until_complete) {
-               struct file *file = vma->vm_file;
-
-               i_mmap_unlock_write(file->f_mapping);
-       }
-
-       if (err) {
-               const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
-               do_munmap(current->mm, vma->vm_start, len, NULL);
-       }
-
-       return err;
-}
-
 static unsigned long __mmap_region(struct file *file, unsigned long addr,
                unsigned long len, vma_flags_t vma_flags,
                unsigned long pgoff, struct list_head *uf)
@@ -2804,7 +2780,7 @@ static unsigned long __mmap_region(struct file *file, unsigned long addr,
        __mmap_complete(&map, vma);
 
        if (have_mmap_prepare && allocated_new) {
-               error = call_action_complete(&map, &desc.action, vma);
+               error = mmap_action_complete(vma, &desc.action);
 
                if (error)
                        return error;
index 64bb56980b9cc7d36ff2b91a6f6e236194d9fc08..a95a4b07f68bec17129f38fd7150d4df43435b28 100644 (file)
@@ -1300,13 +1300,7 @@ static inline int compat_vma_mmap(struct file *file, struct vm_area_struct *vma)
        action->hide_from_rmap_until_complete = false;
 
        set_vma_from_desc(vma, &desc);
-       err = mmap_action_complete(vma, action);
-       if (err) {
-               const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
-               do_munmap(current->mm, vma->vm_start, len, NULL);
-       }
-       return err;
+       return mmap_action_complete(vma, action);
 }
 
 static inline void vma_iter_init(struct vma_iterator *vmi,