]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 2 Oct 2022 10:28:38 +0000 (12:28 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 2 Oct 2022 10:28:38 +0000 (12:28 +0200)
added patches:
mm-migrate_device.c-flush-tlb-while-holding-ptl.patch

queue-4.19/mm-migrate_device.c-flush-tlb-while-holding-ptl.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/mm-migrate_device.c-flush-tlb-while-holding-ptl.patch b/queue-4.19/mm-migrate_device.c-flush-tlb-while-holding-ptl.patch
new file mode 100644 (file)
index 0000000..5c6ae8d
--- /dev/null
@@ -0,0 +1,74 @@
+From 60bae73708963de4a17231077285bd9ff2f41c44 Mon Sep 17 00:00:00 2001
+From: Alistair Popple <apopple@nvidia.com>
+Date: Fri, 2 Sep 2022 10:35:51 +1000
+Subject: mm/migrate_device.c: flush TLB while holding PTL
+
+From: Alistair Popple <apopple@nvidia.com>
+
+commit 60bae73708963de4a17231077285bd9ff2f41c44 upstream.
+
+When clearing a PTE the TLB should be flushed whilst still holding the PTL
+to avoid a potential race with madvise/munmap/etc.  For example consider
+the following sequence:
+
+  CPU0                          CPU1
+  ----                          ----
+
+  migrate_vma_collect_pmd()
+  pte_unmap_unlock()
+                                madvise(MADV_DONTNEED)
+                                -> zap_pte_range()
+                                pte_offset_map_lock()
+                                [ PTE not present, TLB not flushed ]
+                                pte_unmap_unlock()
+                                [ page is still accessible via stale TLB ]
+  flush_tlb_range()
+
+In this case the page may still be accessed via the stale TLB entry after
+madvise returns.  Fix this by flushing the TLB while holding the PTL.
+
+Fixes: 8c3328f1f36a ("mm/migrate: migrate_vma() unmap page from vma while collecting pages")
+Link: https://lkml.kernel.org/r/9f801e9d8d830408f2ca27821f606e09aa856899.1662078528.git-series.apopple@nvidia.com
+Signed-off-by: Alistair Popple <apopple@nvidia.com>
+Reported-by: Nadav Amit <nadav.amit@gmail.com>
+Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Peter Xu <peterx@redhat.com>
+Cc: Alex Sierra <alex.sierra@amd.com>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: Felix Kuehling <Felix.Kuehling@amd.com>
+Cc: huang ying <huang.ying.caritas@gmail.com>
+Cc: Jason Gunthorpe <jgg@nvidia.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Karol Herbst <kherbst@redhat.com>
+Cc: Logan Gunthorpe <logang@deltatee.com>
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Paul Mackerras <paulus@ozlabs.org>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2359,13 +2359,14 @@ next:
+               migrate->dst[migrate->npages] = 0;
+               migrate->src[migrate->npages++] = mpfn;
+       }
+-      arch_leave_lazy_mmu_mode();
+-      pte_unmap_unlock(ptep - 1, ptl);
+       /* Only flush the TLB if we actually modified any entries */
+       if (unmapped)
+               flush_tlb_range(walk->vma, start, end);
++      arch_leave_lazy_mmu_mode();
++      pte_unmap_unlock(ptep - 1, ptl);
++
+       return 0;
+ }
index 84e9a202e68a8893b2541d102d7407bda13bb789..f37d45235623a6469f677d860855419625983220 100644 (file)
@@ -8,3 +8,4 @@ libata-add-ata_horkage_nolpm-for-pioneer-bdr-207m-and-bdr-205.patch
 mmc-moxart-fix-4-bit-bus-width-and-remove-8-bit-bus-width.patch
 mm-page_alloc-fix-race-condition-between-build_all_zonelists-and-page-allocation.patch
 mm-prevent-page_frag_alloc-from-corrupting-the-memory.patch
+mm-migrate_device.c-flush-tlb-while-holding-ptl.patch