Rather than have the callers handle this both the rmap lock release and
unmapping the VMA on error, handle it within the mmap_action_complete()
logic where it makes sense to, being careful not to unlock twice.
This simplifies the logic and makes it harder to make mistake with this,
while retaining correct behaviour with regard to avoiding deadlocks.
Also replace the call_action_complete() function with a direct invocation
of mmap_action_complete() as the abstraction is no longer required.
Also update the VMA tests to reflect this change.
Link: https://lkml.kernel.org/r/8d1ee8ebd3542d006a47e8382fb80cf5b57ecf10.1774045440.git.ljs@kernel.org
Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Bodo Stroesser <bostroesser@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Clemens Ladisch <clemens@ladisch.de>
Cc: David Hildenbrand <david@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Dexuan Cui <decui@microsoft.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: K. Y. Srinivasan <kys@microsoft.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Long Li <longli@microsoft.com>
Cc: Marc Dionne <marc.dionne@auristor.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: Richard Weinberger <richard@nod.at>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vignesh Raghavendra <vigneshr@ti.com>
Cc: Wei Liu <wei.liu@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
return 0;
}
+/*
+ * When we succeed an mmap action or just before we unmap a VMA on error, we
+ * need to ensure any rmap lock held is released. On unmap it's required to
+ * avoid a deadlock.
+ */
+static inline void maybe_rmap_unlock_action(struct vm_area_struct *vma,
+ struct mmap_action *action)
+{
+ struct file *file;
+
+ if (!action->hide_from_rmap_until_complete)
+ return;
+
+ VM_WARN_ON_ONCE(vma_is_anonymous(vma));
+ file = vma->vm_file;
+ i_mmap_unlock_write(file->f_mapping);
+ action->hide_from_rmap_until_complete = false;
+}
+
#ifdef CONFIG_MMU_NOTIFIER
static inline bool clear_flush_young_ptes_notify(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
action->hide_from_rmap_until_complete = false;
set_vma_from_desc(vma, &desc);
- err = mmap_action_complete(vma, action);
- if (err) {
- const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
- do_munmap(current->mm, vma->vm_start, len, NULL);
- }
- return err;
+ return mmap_action_complete(vma, action);
}
EXPORT_SYMBOL(compat_vma_mmap);
static int mmap_action_finish(struct vm_area_struct *vma,
struct mmap_action *action, int err)
{
+ size_t len;
+
+ if (!err && action->success_hook)
+ err = action->success_hook(vma);
+
+ /* do_munmap() might take rmap lock, so release if held. */
+ maybe_rmap_unlock_action(vma, action);
+ if (!err)
+ return 0;
+
/*
* If an error occurs, unmap the VMA altogether and return an error. We
* only clear the newly allocated VMA, since this function is only
* invoked if we do NOT merge, so we only clean up the VMA we created.
*/
- if (err) {
- if (action->error_hook) {
- /* We may want to filter the error. */
- err = action->error_hook(err);
-
- /* The caller should not clear the error. */
- VM_WARN_ON_ONCE(!err);
- }
- return err;
+ len = vma_pages(vma) << PAGE_SHIFT;
+ do_munmap(current->mm, vma->vm_start, len, NULL);
+ if (action->error_hook) {
+ /* We may want to filter the error. */
+ err = action->error_hook(err);
+ /* The caller should not clear the error. */
+ VM_WARN_ON_ONCE(!err);
}
-
- if (action->success_hook)
- return action->success_hook(vma);
-
- return 0;
+ return err;
}
#ifdef CONFIG_MMU
*/
int mmap_action_complete(struct vm_area_struct *vma,
struct mmap_action *action)
-
{
int err = 0;
return false;
}
-static int call_action_complete(struct mmap_state *map,
- struct mmap_action *action,
- struct vm_area_struct *vma)
-{
- int err;
-
- err = mmap_action_complete(vma, action);
-
- /* If we held the file rmap we need to release it. */
- if (action->hide_from_rmap_until_complete) {
- struct file *file = vma->vm_file;
-
- i_mmap_unlock_write(file->f_mapping);
- }
-
- if (err) {
- const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
- do_munmap(current->mm, vma->vm_start, len, NULL);
- }
-
- return err;
-}
-
static unsigned long __mmap_region(struct file *file, unsigned long addr,
unsigned long len, vma_flags_t vma_flags,
unsigned long pgoff, struct list_head *uf)
__mmap_complete(&map, vma);
if (have_mmap_prepare && allocated_new) {
- error = call_action_complete(&map, &desc.action, vma);
+ error = mmap_action_complete(vma, &desc.action);
if (error)
return error;
action->hide_from_rmap_until_complete = false;
set_vma_from_desc(vma, &desc);
- err = mmap_action_complete(vma, action);
- if (err) {
- const size_t len = vma_pages(vma) << PAGE_SHIFT;
-
- do_munmap(current->mm, vma->vm_start, len, NULL);
- }
- return err;
+ return mmap_action_complete(vma, action);
}
static inline void vma_iter_init(struct vma_iterator *vmi,