]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 31 Mar 2026 10:54:43 +0000 (12:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 31 Mar 2026 10:54:43 +0000 (12:54 +0200)
added patches:
mm-mseal-update-vma-end-correctly-on-merge.patch

queue-6.18/mm-mseal-update-vma-end-correctly-on-merge.patch [new file with mode: 0644]
queue-6.18/series

diff --git a/queue-6.18/mm-mseal-update-vma-end-correctly-on-merge.patch b/queue-6.18/mm-mseal-update-vma-end-correctly-on-merge.patch
new file mode 100644 (file)
index 0000000..51bb575
--- /dev/null
@@ -0,0 +1,73 @@
+From 2697dd8ae721db4f6a53d4f4cbd438212a80f8dc Mon Sep 17 00:00:00 2001
+From: "Lorenzo Stoakes (Oracle)" <ljs@kernel.org>
+Date: Fri, 27 Mar 2026 17:31:04 +0000
+Subject: mm/mseal: update VMA end correctly on merge
+
+From: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+
+commit 2697dd8ae721db4f6a53d4f4cbd438212a80f8dc upstream.
+
+Previously we stored the end of the current VMA in curr_end, and then upon
+iterating to the next VMA updated curr_start to curr_end to advance to the
+next VMA.
+
+However, this doesn't take into account the fact that a VMA might be
+updated due to a merge by vma_modify_flags(), which can result in curr_end
+being stale and thus, upon setting curr_start to curr_end, ending up with
+an incorrect curr_start on the next iteration.
+
+Resolve the issue by setting curr_end to vma->vm_end unconditionally to
+ensure this value remains updated should this occur.
+
+While we're here, eliminate this entire class of bug by simply setting
+const curr_[start/end] to be clamped to the input range and VMAs, which
+also happens to simplify the logic.
+
+Link: https://lkml.kernel.org/r/20260327173104.322405-1-ljs@kernel.org
+Fixes: 6c2da14ae1e0 ("mm/mseal: rework mseal apply logic")
+Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Reported-by: Antonius <antonius@bluedragonsec.com>
+Closes: https://lore.kernel.org/linux-mm/CAK8a0jwWGj9-SgFk0yKFh7i8jMkwKm5b0ao9=kmXWjO54veX2g@mail.gmail.com/
+Suggested-by: David Hildenbrand (ARM) <david@kernel.org>
+Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
+Reviewed-by: Pedro Falcato <pfalcato@suse.de>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Jeff Xu <jeffxu@chromium.org>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mseal.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/mm/mseal.c
++++ b/mm/mseal.c
+@@ -56,7 +56,6 @@ static int mseal_apply(struct mm_struct
+               unsigned long start, unsigned long end)
+ {
+       struct vm_area_struct *vma, *prev;
+-      unsigned long curr_start = start;
+       VMA_ITERATOR(vmi, mm, start);
+       /* We know there are no gaps so this will be non-NULL. */
+@@ -66,7 +65,8 @@ static int mseal_apply(struct mm_struct
+               prev = vma;
+       for_each_vma_range(vmi, vma, end) {
+-              unsigned long curr_end = MIN(vma->vm_end, end);
++              const unsigned long curr_start = MAX(vma->vm_start, start);
++              const unsigned long curr_end = MIN(vma->vm_end, end);
+               if (!(vma->vm_flags & VM_SEALED)) {
+                       vma = vma_modify_flags(&vmi, prev, vma,
+@@ -78,7 +78,6 @@ static int mseal_apply(struct mm_struct
+               }
+               prev = vma;
+-              curr_start = curr_end;
+       }
+       return 0;
index 9992caa07595c554b357560f7ef26ba219179c42..e79046a33a34820de2c0f597d736d396fc5d5eeb 100644 (file)
@@ -279,3 +279,4 @@ mm-damon-core-avoid-use-of-half-online-committed-context.patch
 mm-damon-sysfs-fix-param_ctx-leak-on-damon_sysfs_new_test_ctx-failure.patch
 mm-huge_memory-fix-folio-isn-t-locked-in-softleaf_to_folio.patch
 ksmbd-fix-use-after-free-and-null-deref-in-smb_grant_oplock.patch
+mm-mseal-update-vma-end-correctly-on-merge.patch