]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jun 2023 18:03:29 +0000 (20:03 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jun 2023 18:03:29 +0000 (20:03 +0200)
added patches:
mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch
series

queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch [new file with mode: 0644]
queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch [new file with mode: 0644]
queue-6.1/series [new file with mode: 0644]

diff --git a/queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch b/queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
new file mode 100644 (file)
index 0000000..052b3c1
--- /dev/null
@@ -0,0 +1,114 @@
+From eb2fcec387b998d8cb7c8de15c2b03f0c20d643a Mon Sep 17 00:00:00 2001
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Date: Sat, 17 Jun 2023 20:47:08 -0400
+Subject: mm/mmap: Fix error path in do_vmi_align_munmap()
+
+From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+
+commit 606c812eb1d5b5fb0dd9e330ca94b52d7c227830 upstream
+
+The error unrolling was leaving the VMAs detached in many cases and
+leaving the locked_vm statistic altered, and skipping the unrolling
+entirely in the case of the vma tree write failing.
+
+Fix the error path by re-attaching the detached VMAs and adding the
+necessary goto for the failed vma tree write, and fix the locked_vm
+statistic by only updating after the vma tree write succeeds.
+
+Fixes: 763ecb035029 ("mm: remove the vma linked list")
+Reported-by: Vegard Nossum <vegard.nossum@oracle.com>
+Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[ dwmw2: Strictly, the original patch wasn't *re-attaching* the
+         detached VMAs. They *were* still attached but just had
+         the 'detached' flag set, which is an optimisation. Which
+         doesn't exist in 6.3, so drop that. Also drop the call
+         to vma_start_write() which came in with the per-VMA
+         locking in 6.4. ]
+[ dwmw2 (6.1): It's do_mas_align_munmap() here. And has two call
+         sites for the now-removed munmap_sidetree() function.
+         Inline them both rather then trying to backport various
+         dependencies with potentially subtle interactions. ]
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c |   33 ++++++++++++++-------------------
+ 1 file changed, 14 insertions(+), 19 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2311,19 +2311,6 @@ int split_vma(struct mm_struct *mm, stru
+       return __split_vma(mm, vma, addr, new_below);
+ }
+-static inline int munmap_sidetree(struct vm_area_struct *vma,
+-                                 struct ma_state *mas_detach)
+-{
+-      mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
+-      if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
+-              return -ENOMEM;
+-
+-      if (vma->vm_flags & VM_LOCKED)
+-              vma->vm_mm->locked_vm -= vma_pages(vma);
+-
+-      return 0;
+-}
+-
+ /*
+  * do_mas_align_munmap() - munmap the aligned region from @start to @end.
+  * @mas: The maple_state, ideally set up to alter the correct tree location.
+@@ -2345,6 +2332,7 @@ do_mas_align_munmap(struct ma_state *mas
+       struct maple_tree mt_detach;
+       int count = 0;
+       int error = -ENOMEM;
++      unsigned long locked_vm = 0;
+       MA_STATE(mas_detach, &mt_detach, 0, 0);
+       mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK);
+       mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+@@ -2403,18 +2391,23 @@ do_mas_align_munmap(struct ma_state *mas
+                       mas_set(mas, end);
+                       split = mas_prev(mas, 0);
+-                      error = munmap_sidetree(split, &mas_detach);
++                      mas_set_range(&mas_detach, split->vm_start, split->vm_end - 1);
++                      error = mas_store_gfp(&mas_detach, split, GFP_KERNEL);
+                       if (error)
+-                              goto munmap_sidetree_failed;
++                              goto munmap_gather_failed;
++                      if (next->vm_flags & VM_LOCKED)
++                              locked_vm += vma_pages(split);
+                       count++;
+                       if (vma == next)
+                               vma = split;
+                       break;
+               }
+-              error = munmap_sidetree(next, &mas_detach);
+-              if (error)
+-                      goto munmap_sidetree_failed;
++              mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
++              if (mas_store_gfp(&mas_detach, next, GFP_KERNEL))
++                      goto munmap_gather_failed;
++              if (next->vm_flags & VM_LOCKED)
++                      locked_vm += vma_pages(next);
+               count++;
+ #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+@@ -2464,6 +2457,8 @@ do_mas_align_munmap(struct ma_state *mas
+       }
+ #endif
+       mas_store_prealloc(mas, NULL);
++
++      mm->locked_vm -= locked_vm;
+       mm->map_count -= count;
+       /*
+        * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+@@ -2490,7 +2485,7 @@ do_mas_align_munmap(struct ma_state *mas
+       return downgrade ? 1 : 0;
+ userfaultfd_error:
+-munmap_sidetree_failed:
++munmap_gather_failed:
+ end_split_failed:
+       __mt_destroy(&mt_detach);
+ start_split_failed:
diff --git a/queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch b/queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch
new file mode 100644 (file)
index 0000000..b1393d3
--- /dev/null
@@ -0,0 +1,55 @@
+From c308e1c3a7429681e6a8615421a68f14b0c345c0 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Wed, 28 Jun 2023 10:55:03 +0100
+Subject: mm/mmap: Fix error return in do_vmi_align_munmap()
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 6c26bd4384da24841bac4f067741bbca18b0fb74 upstream,
+
+If mas_store_gfp() in the gather loop failed, the 'error' variable that
+ultimately gets returned was not being set. In many cases, its original
+value of -ENOMEM was still in place, and that was fine. But if VMAs had
+been split at the start or end of the range, then 'error' could be zero.
+
+Change to the 'error = foo(); if (error) goto â\80¦' idiom to fix the bug.
+
+Also clean up a later case which avoided the same bug by *explicitly*
+setting error = -ENOMEM right before calling the function that might
+return -ENOMEM.
+
+In a final cosmetic change, move the 'Point of no return' comment to
+*after* the goto. That's been in the wrong place since the preallocation
+was removed, and this new error path was added.
+
+Fixes: 606c812eb1d5 ("mm/mmap: Fix error path in do_vmi_align_munmap()")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2404,7 +2404,8 @@ do_mas_align_munmap(struct ma_state *mas
+                       break;
+               }
+               mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
+-              if (mas_store_gfp(&mas_detach, next, GFP_KERNEL))
++              error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
++              if (error)
+                       goto munmap_gather_failed;
+               if (next->vm_flags & VM_LOCKED)
+                       locked_vm += vma_pages(next);
+@@ -2456,6 +2457,7 @@ do_mas_align_munmap(struct ma_state *mas
+               mas_set_range(mas, start, end - 1);
+       }
+ #endif
++      /* Point of no return */
+       mas_store_prealloc(mas, NULL);
+       mm->locked_vm -= locked_vm;
diff --git a/queue-6.1/series b/queue-6.1/series
new file mode 100644 (file)
index 0000000..eb7f63b
--- /dev/null
@@ -0,0 +1,2 @@
+mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch
+mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch