]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Oct 2022 06:26:00 +0000 (08:26 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Oct 2022 06:26:00 +0000 (08:26 +0200)
added patches:
mm-pagewalk-fix-race-between-unmap-and-page-walker.patch

queue-5.4/mm-pagewalk-fix-race-between-unmap-and-page-walker.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/mm-pagewalk-fix-race-between-unmap-and-page-walker.patch b/queue-5.4/mm-pagewalk-fix-race-between-unmap-and-page-walker.patch
new file mode 100644 (file)
index 0000000..f4c0797
--- /dev/null
@@ -0,0 +1,99 @@
+From 8782fb61cc848364e1e1599d76d3c9dd58a1cc06 Mon Sep 17 00:00:00 2001
+From: Steven Price <steven.price@arm.com>
+Date: Fri, 2 Sep 2022 12:26:12 +0100
+Subject: mm: pagewalk: Fix race between unmap and page walker
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steven Price <steven.price@arm.com>
+
+commit 8782fb61cc848364e1e1599d76d3c9dd58a1cc06 upstream.
+
+The mmap lock protects the page walker from changes to the page tables
+during the walk.  However a read lock is insufficient to protect those
+areas which don't have a VMA as munmap() detaches the VMAs before
+downgrading to a read lock and actually tearing down PTEs/page tables.
+
+For users of walk_page_range() the solution is to simply call pte_hole()
+immediately without checking the actual page tables when a VMA is not
+present. We now never call __walk_page_range() without a valid vma.
+
+For walk_page_range_novma() the locking requirements are tightened to
+require the mmap write lock to be taken, and then walking the pgd
+directly with 'no_vma' set.
+
+This in turn means that all page walkers either have a valid vma, or
+it's that special 'novma' case for page table debugging.  As a result,
+all the odd '(!walk->vma && !walk->no_vma)' tests can be removed.
+
+Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[manually backported. backport note: walk_page_range_novma() does not exist in
+5.4, so I'm omitting it from the backport]
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/pagewalk.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -38,7 +38,7 @@ static int walk_pmd_range(pud_t *pud, un
+       do {
+ again:
+               next = pmd_addr_end(addr, end);
+-              if (pmd_none(*pmd) || !walk->vma) {
++              if (pmd_none(*pmd)) {
+                       if (ops->pte_hole)
+                               err = ops->pte_hole(addr, next, walk);
+                       if (err)
+@@ -84,7 +84,7 @@ static int walk_pud_range(p4d_t *p4d, un
+       do {
+  again:
+               next = pud_addr_end(addr, end);
+-              if (pud_none(*pud) || !walk->vma) {
++              if (pud_none(*pud)) {
+                       if (ops->pte_hole)
+                               err = ops->pte_hole(addr, next, walk);
+                       if (err)
+@@ -254,7 +254,7 @@ static int __walk_page_range(unsigned lo
+       int err = 0;
+       struct vm_area_struct *vma = walk->vma;
+-      if (vma && is_vm_hugetlb_page(vma)) {
++      if (is_vm_hugetlb_page(vma)) {
+               if (walk->ops->hugetlb_entry)
+                       err = walk_hugetlb_range(start, end, walk);
+       } else
+@@ -324,9 +324,13 @@ int walk_page_range(struct mm_struct *mm
+               if (!vma) { /* after the last vma */
+                       walk.vma = NULL;
+                       next = end;
++                      if (ops->pte_hole)
++                              err = ops->pte_hole(start, next, &walk);
+               } else if (start < vma->vm_start) { /* outside vma */
+                       walk.vma = NULL;
+                       next = min(end, vma->vm_start);
++                      if (ops->pte_hole)
++                              err = ops->pte_hole(start, next, &walk);
+               } else { /* inside vma */
+                       walk.vma = vma;
+                       next = min(end, vma->vm_end);
+@@ -344,9 +348,8 @@ int walk_page_range(struct mm_struct *mm
+                       }
+                       if (err < 0)
+                               break;
+-              }
+-              if (walk.vma || walk.ops->pte_hole)
+                       err = __walk_page_range(start, next, &walk);
++              }
+               if (err)
+                       break;
+       } while (start = next, start < end);
index 914da3c60f94554f1ea4f5c42f0cb38cf8e6e7fe..61a9bce98e58237515205b50aa80804a4973b30a 100644 (file)
@@ -1,2 +1,3 @@
+mm-pagewalk-fix-race-between-unmap-and-page-walker.patch
 wait_on_bit-add-an-acquire-memory-barrier.patch
 provide-arch_test_bit_acquire-for-architectures-that-define-test_bit.patch