]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Feb 2024 15:45:08 +0000 (16:45 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 20 Feb 2024 15:45:08 +0000 (16:45 +0100)
added patches:
userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch

queue-6.1/series
queue-6.1/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch [new file with mode: 0644]

index 6d7c8fcc33b70662ad06fc840073af5695ba8be3..2a0440b33dcf965eec48cf24946fd0ce80c9b82b 100644 (file)
@@ -194,3 +194,4 @@ net-prevent-mss-overflow-in-skb_segment.patch
 bpf-add-struct-for-bin_args-arg-in-bpf_bprintf_prepare.patch
 bpf-do-cleanup-in-bpf_bprintf_cleanup-only-when-needed.patch
 bpf-remove-trace_printk_lock.patch
+userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch
diff --git a/queue-6.1/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch b/queue-6.1/userfaultfd-fix-mmap_changing-checking-in-mfill_atomic_hugetlb.patch
new file mode 100644 (file)
index 0000000..2e783f9
--- /dev/null
@@ -0,0 +1,81 @@
+From 67695f18d55924b2013534ef3bdc363bc9e14605 Mon Sep 17 00:00:00 2001
+From: Lokesh Gidra <lokeshgidra@google.com>
+Date: Wed, 17 Jan 2024 14:37:29 -0800
+Subject: userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb
+
+From: Lokesh Gidra <lokeshgidra@google.com>
+
+commit 67695f18d55924b2013534ef3bdc363bc9e14605 upstream.
+
+In mfill_atomic_hugetlb(), mmap_changing isn't being checked
+again if we drop mmap_lock and reacquire it. When the lock is not held,
+mmap_changing could have been incremented. This is also inconsistent
+with the behavior in mfill_atomic().
+
+Link: https://lkml.kernel.org/r/20240117223729.1444522-1-lokeshgidra@google.com
+Fixes: df2cc96e77011 ("userfaultfd: prevent non-cooperative events vs mcopy_atomic races")
+Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Brian Geffon <bgeffon@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Kalesh Singh <kaleshsingh@google.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Nicolas Geoffray <ngeoffray@google.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/userfaultfd.c |   15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+--- a/mm/userfaultfd.c
++++ b/mm/userfaultfd.c
+@@ -327,6 +327,7 @@ static __always_inline ssize_t __mcopy_a
+                                             unsigned long dst_start,
+                                             unsigned long src_start,
+                                             unsigned long len,
++                                            atomic_t *mmap_changing,
+                                             enum mcopy_atomic_mode mode,
+                                             bool wp_copy)
+ {
+@@ -445,6 +446,15 @@ retry:
+                               goto out;
+                       }
+                       mmap_read_lock(dst_mm);
++                      /*
++                       * If memory mappings are changing because of non-cooperative
++                       * operation (e.g. mremap) running in parallel, bail out and
++                       * request the user to retry later
++                       */
++                      if (mmap_changing && atomic_read(mmap_changing)) {
++                              err = -EAGAIN;
++                              break;
++                      }
+                       dst_vma = NULL;
+                       goto retry;
+@@ -480,6 +490,7 @@ extern ssize_t __mcopy_atomic_hugetlb(st
+                                     unsigned long dst_start,
+                                     unsigned long src_start,
+                                     unsigned long len,
++                                    atomic_t *mmap_changing,
+                                     enum mcopy_atomic_mode mode,
+                                     bool wp_copy);
+ #endif /* CONFIG_HUGETLB_PAGE */
+@@ -601,8 +612,8 @@ retry:
+        */
+       if (is_vm_hugetlb_page(dst_vma))
+               return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
+-                                             src_start, len, mcopy_mode,
+-                                             wp_copy);
++                                             src_start, len, mmap_changing,
++                                             mcopy_mode, wp_copy);
+       if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+               goto out_unlock;