From: Greg Kroah-Hartman Date: Wed, 28 Jun 2023 18:03:29 +0000 (+0200) Subject: 6.1-stable patches X-Git-Tag: v6.4.1~54 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=114ad89dfa3ce9b7e69447a2ba949030e6ce1ff5;p=thirdparty%2Fkernel%2Fstable-queue.git 6.1-stable patches added patches: mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch series --- diff --git a/queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch b/queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch new file mode 100644 index 00000000000..052b3c1d28e --- /dev/null +++ b/queue-6.1/mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch @@ -0,0 +1,114 @@ +From eb2fcec387b998d8cb7c8de15c2b03f0c20d643a Mon Sep 17 00:00:00 2001 +From: "Liam R. Howlett" +Date: Sat, 17 Jun 2023 20:47:08 -0400 +Subject: mm/mmap: Fix error path in do_vmi_align_munmap() + +From: "Liam R. Howlett" + +commit 606c812eb1d5b5fb0dd9e330ca94b52d7c227830 upstream + +The error unrolling was leaving the VMAs detached in many cases and +leaving the locked_vm statistic altered, and skipping the unrolling +entirely in the case of the vma tree write failing. + +Fix the error path by re-attaching the detached VMAs and adding the +necessary goto for the failed vma tree write, and fix the locked_vm +statistic by only updating after the vma tree write succeeds. + +Fixes: 763ecb035029 ("mm: remove the vma linked list") +Reported-by: Vegard Nossum +Signed-off-by: Liam R. Howlett +Signed-off-by: Linus Torvalds +[ dwmw2: Strictly, the original patch wasn't *re-attaching* the + detached VMAs. They *were* still attached but just had + the 'detached' flag set, which is an optimisation. Which + doesn't exist in 6.3, so drop that. Also drop the call + to vma_start_write() which came in with the per-VMA + locking in 6.4. ] +[ dwmw2 (6.1): It's do_mas_align_munmap() here. And has two call + sites for the now-removed munmap_sidetree() function. + Inline them both rather then trying to backport various + dependencies with potentially subtle interactions. ] +Signed-off-by: David Woodhouse +Signed-off-by: Greg Kroah-Hartman +--- + mm/mmap.c | 33 ++++++++++++++------------------- + 1 file changed, 14 insertions(+), 19 deletions(-) + +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2311,19 +2311,6 @@ int split_vma(struct mm_struct *mm, stru + return __split_vma(mm, vma, addr, new_below); + } + +-static inline int munmap_sidetree(struct vm_area_struct *vma, +- struct ma_state *mas_detach) +-{ +- mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); +- if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) +- return -ENOMEM; +- +- if (vma->vm_flags & VM_LOCKED) +- vma->vm_mm->locked_vm -= vma_pages(vma); +- +- return 0; +-} +- + /* + * do_mas_align_munmap() - munmap the aligned region from @start to @end. + * @mas: The maple_state, ideally set up to alter the correct tree location. +@@ -2345,6 +2332,7 @@ do_mas_align_munmap(struct ma_state *mas + struct maple_tree mt_detach; + int count = 0; + int error = -ENOMEM; ++ unsigned long locked_vm = 0; + MA_STATE(mas_detach, &mt_detach, 0, 0); + mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK); + mt_set_external_lock(&mt_detach, &mm->mmap_lock); +@@ -2403,18 +2391,23 @@ do_mas_align_munmap(struct ma_state *mas + + mas_set(mas, end); + split = mas_prev(mas, 0); +- error = munmap_sidetree(split, &mas_detach); ++ mas_set_range(&mas_detach, split->vm_start, split->vm_end - 1); ++ error = mas_store_gfp(&mas_detach, split, GFP_KERNEL); + if (error) +- goto munmap_sidetree_failed; ++ goto munmap_gather_failed; ++ if (next->vm_flags & VM_LOCKED) ++ locked_vm += vma_pages(split); + + count++; + if (vma == next) + vma = split; + break; + } +- error = munmap_sidetree(next, &mas_detach); +- if (error) +- goto munmap_sidetree_failed; ++ mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1); ++ if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) ++ goto munmap_gather_failed; ++ if (next->vm_flags & VM_LOCKED) ++ locked_vm += vma_pages(next); + + count++; + #ifdef CONFIG_DEBUG_VM_MAPLE_TREE +@@ -2464,6 +2457,8 @@ do_mas_align_munmap(struct ma_state *mas + } + #endif + mas_store_prealloc(mas, NULL); ++ ++ mm->locked_vm -= locked_vm; + mm->map_count -= count; + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or +@@ -2490,7 +2485,7 @@ do_mas_align_munmap(struct ma_state *mas + return downgrade ? 1 : 0; + + userfaultfd_error: +-munmap_sidetree_failed: ++munmap_gather_failed: + end_split_failed: + __mt_destroy(&mt_detach); + start_split_failed: diff --git a/queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch b/queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch new file mode 100644 index 00000000000..b1393d30c27 --- /dev/null +++ b/queue-6.1/mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch @@ -0,0 +1,55 @@ +From c308e1c3a7429681e6a8615421a68f14b0c345c0 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Wed, 28 Jun 2023 10:55:03 +0100 +Subject: mm/mmap: Fix error return in do_vmi_align_munmap() + +From: David Woodhouse + +commit 6c26bd4384da24841bac4f067741bbca18b0fb74 upstream, + +If mas_store_gfp() in the gather loop failed, the 'error' variable that +ultimately gets returned was not being set. In many cases, its original +value of -ENOMEM was still in place, and that was fine. But if VMAs had +been split at the start or end of the range, then 'error' could be zero. + +Change to the 'error = foo(); if (error) goto …' idiom to fix the bug. + +Also clean up a later case which avoided the same bug by *explicitly* +setting error = -ENOMEM right before calling the function that might +return -ENOMEM. + +In a final cosmetic change, move the 'Point of no return' comment to +*after* the goto. That's been in the wrong place since the preallocation +was removed, and this new error path was added. + +Fixes: 606c812eb1d5 ("mm/mmap: Fix error path in do_vmi_align_munmap()") +Signed-off-by: David Woodhouse +Cc: stable@vger.kernel.org +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Liam R. Howlett +Signed-off-by: David Woodhouse +Signed-off-by: Greg Kroah-Hartman +--- + mm/mmap.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2404,7 +2404,8 @@ do_mas_align_munmap(struct ma_state *mas + break; + } + mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1); +- if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) ++ error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); ++ if (error) + goto munmap_gather_failed; + if (next->vm_flags & VM_LOCKED) + locked_vm += vma_pages(next); +@@ -2456,6 +2457,7 @@ do_mas_align_munmap(struct ma_state *mas + mas_set_range(mas, start, end - 1); + } + #endif ++ /* Point of no return */ + mas_store_prealloc(mas, NULL); + + mm->locked_vm -= locked_vm; diff --git a/queue-6.1/series b/queue-6.1/series new file mode 100644 index 00000000000..eb7f63bed40 --- /dev/null +++ b/queue-6.1/series @@ -0,0 +1,2 @@ +mm-mmap-fix-error-path-in-do_vmi_align_munmap.patch +mm-mmap-fix-error-return-in-do_vmi_align_munmap.patch