]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jun 2023 18:03:46 +0000 (20:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jun 2023 18:03:46 +0000 (20:03 +0200)
added patches:
mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch
series

queue-6.3/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch [new file with mode: 0644]
queue-6.3/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch [new file with mode: 0644]
queue-6.3/series [new file with mode: 0644]

diff --git a/queue-6.3/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch b/queue-6.3/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
new file mode 100644 (file)
index 0000000..26c52a2
--- /dev/null
@@ -0,0 +1,103 @@
+From 298524d6e772f47f7f3a521b71c15e6a56b153eb Mon Sep 17 00:00:00 2001
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Date: Sat, 17 Jun 2023 20:47:08 -0400
+Subject: mm/mmap: Fix error path in do_vmi_align_munmap()
+
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+
+commit 606c812eb1d5b5fb0dd9e330ca94b52d7c227830 upstream.
+
+The error unrolling was leaving the VMAs detached in many cases and
+leaving the locked_vm statistic altered, and skipping the unrolling
+entirely in the case of the vma tree write failing.
+
+Fix the error path by re-attaching the detached VMAs and adding the
+necessary goto for the failed vma tree write, and fix the locked_vm
+statistic by only updating after the vma tree write succeeds.
+
+Fixes: 763ecb035029 ("mm: remove the vma linked list")
+Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ dwmw2: Strictly, the original patch wasn't *re-attaching* the
+         detached VMAs. They *were* still attached but just had
+         the 'detached' flag set, which is an optimisation. Which
+         doesn't exist in 6.3, so drop that. Also drop the call
+         to vma_start_write() which came in with the per-VMA
+         locking in 6.4. ]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c |   29 +++++++++++------------------
+ 1 file changed, 11 insertions(+), 18 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2280,19 +2280,6 @@ int split_vma(struct vma_iterator *vmi,
+       return __split_vma(vmi, vma, addr, new_below);
+ }
+-static inline int munmap_sidetree(struct vm_area_struct *vma,
+-                                 struct ma_state *mas_detach)
+-{
+-      mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
+-      if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
+-              return -ENOMEM;
+-
+-      if (vma->vm_flags & VM_LOCKED)
+-              vma->vm_mm->locked_vm -= vma_pages(vma);
+-
+-      return 0;
+-}
+-
+ /*
+  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
+  * @vmi: The vma iterator
+@@ -2314,6 +2301,7 @@ do_vmi_align_munmap(struct vma_iterator
+       struct maple_tree mt_detach;
+       int count = 0;
+       int error = -ENOMEM;
++      unsigned long locked_vm = 0;
+       MA_STATE(mas_detach, &mt_detach, 0, 0);
+       mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
+       mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+@@ -2359,9 +2347,11 @@ do_vmi_align_munmap(struct vma_iterator
+                       if (error)
+                               goto end_split_failed;
+               }
+-              error = munmap_sidetree(next, &mas_detach);
+-              if (error)
+-                      goto munmap_sidetree_failed;
++              mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
++              if (mas_store_gfp(&mas_detach, next, GFP_KERNEL))
++                      goto munmap_gather_failed;
++              if (next->vm_flags & VM_LOCKED)
++                      locked_vm += vma_pages(next);
+               count++;
+ #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+@@ -2407,10 +2397,12 @@ do_vmi_align_munmap(struct vma_iterator
+       }
+ #endif
+       /* Point of no return */
++      error = -ENOMEM;
+       vma_iter_set(vmi, start);
+       if (vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL))
+-              return -ENOMEM;
++              goto clear_tree_failed;
++      mm->locked_vm -= locked_vm;
+       mm->map_count -= count;
+       /*
+        * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+@@ -2440,8 +2432,9 @@ do_vmi_align_munmap(struct vma_iterator
+       validate_mm(mm);
+       return downgrade ? 1 : 0;
++clear_tree_failed:
+ userfaultfd_error:
+-munmap_sidetree_failed:
++munmap_gather_failed:
+ end_split_failed:
+       __mt_destroy(&mt_detach);
+ start_split_failed:
diff --git a/queue-6.3/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch b/queue-6.3/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch
new file mode 100644 (file)
index 0000000..dfa7c97
--- /dev/null
@@ -0,0 +1,63 @@
+From fc0766af431cd7fe01d04231dfcc3458dafa792c Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Wed, 28 Jun 2023 10:55:03 +0100
+Subject: mm/mmap: Fix error return in do_vmi_align_munmap()
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 6c26bd4384da24841bac4f067741bbca18b0fb74 upstream,
+
+If mas_store_gfp() in the gather loop failed, the 'error' variable that
+ultimately gets returned was not being set. In many cases, its original
+value of -ENOMEM was still in place, and that was fine. But if VMAs had
+been split at the start or end of the range, then 'error' could be zero.
+
+Change to the 'error = foo(); if (error) goto â\80¦' idiom to fix the bug.
+
+Also clean up a later case which avoided the same bug by *explicitly*
+setting error = -ENOMEM right before calling the function that might
+return -ENOMEM.
+
+In a final cosmetic change, move the 'Point of no return' comment to
+*after* the goto. That's been in the wrong place since the preallocation
+was removed, and this new error path was added.
+
+Fixes: 606c812eb1d5 ("mm/mmap: Fix error path in do_vmi_align_munmap()")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2348,7 +2348,8 @@ do_vmi_align_munmap(struct vma_iterator
+                               goto end_split_failed;
+               }
+               mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
+-              if (mas_store_gfp(&mas_detach, next, GFP_KERNEL))
++              error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
++              if (error)
+                       goto munmap_gather_failed;
+               if (next->vm_flags & VM_LOCKED)
+                       locked_vm += vma_pages(next);
+@@ -2396,12 +2397,12 @@ do_vmi_align_munmap(struct vma_iterator
+               BUG_ON(count != test_count);
+       }
+ #endif
+-      /* Point of no return */
+-      error = -ENOMEM;
+       vma_iter_set(vmi, start);
+-      if (vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL))
++      error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
++      if (error)
+               goto clear_tree_failed;
++      /* Point of no return */
+       mm->locked_vm -= locked_vm;
+       mm->map_count -= count;
+       /*
diff --git a/queue-6.3/series b/queue-6.3/series
new file mode 100644 (file)
index 0000000..eb7f63b
--- /dev/null
@@ -0,0 +1,2 @@
+mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
+mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch