]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Nov 2021 11:15:05 +0000 (12:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Nov 2021 11:15:05 +0000 (12:15 +0100)
added patches:
hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch

queue-4.19/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch b/queue-4.19/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch
new file mode 100644 (file)
index 0000000..957b42f
--- /dev/null
@@ -0,0 +1,100 @@
+From a4a118f2eead1d6c49e00765de89878288d4b890 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Sun, 21 Nov 2021 12:40:07 -0800
+Subject: hugetlbfs: flush TLBs correctly after huge_pmd_unshare
+
+From: Nadav Amit <namit@vmware.com>
+
+commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream.
+
+When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
+flush is missing.  This TLB flush must be performed before releasing the
+i_mmap_rwsem, in order to prevent an unshared PMDs page from being
+released and reused before the TLB flush took place.
+
+Arguably, a comprehensive solution would use mmu_gather interface to
+batch the TLB flushes and the PMDs page release, however it is not an
+easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
+huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
+deferring the release of the page reference for the PMDs page until
+after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
+thinking PMDs are shared when they are not.
+
+Fix __unmap_hugepage_range() by adding the missing TLB flush, and
+forcing a flush when unshare is successful.
+
+Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/tlb.h |    6 ++++++
+ mm/hugetlb.c              |   23 +++++++++++++++++++----
+ 2 files changed, 25 insertions(+), 4 deletions(-)
+
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -205,6 +205,12 @@ static inline void tlb_remove_check_page
+ #define tlb_end_vma   __tlb_end_vma
+ #endif
++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
++                              unsigned long address, unsigned long size)
++{
++      __tlb_adjust_range(tlb, address, size);
++}
++
+ #ifndef __tlb_remove_tlb_entry
+ #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+ #endif
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3425,6 +3425,7 @@ void __unmap_hugepage_range(struct mmu_g
+       unsigned long sz = huge_page_size(h);
+       unsigned long mmun_start = start;       /* For mmu_notifiers */
+       unsigned long mmun_end   = end;         /* For mmu_notifiers */
++      bool force_flush = false;
+       WARN_ON(!is_vm_hugetlb_page(vma));
+       BUG_ON(start & ~huge_page_mask(h));
+@@ -3451,10 +3452,8 @@ void __unmap_hugepage_range(struct mmu_g
+               ptl = huge_pte_lock(h, mm, ptep);
+               if (huge_pmd_unshare(mm, &address, ptep)) {
+                       spin_unlock(ptl);
+-                      /*
+-                       * We just unmapped a page of PMDs by clearing a PUD.
+-                       * The caller's TLB flush range should cover this area.
+-                       */
++                      tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
++                      force_flush = true;
+                       continue;
+               }
+@@ -3511,6 +3510,22 @@ void __unmap_hugepage_range(struct mmu_g
+       }
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+       tlb_end_vma(tlb, vma);
++
++      /*
++       * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
++       * could defer the flush until now, since by holding i_mmap_rwsem we
++       * guaranteed that the last refernece would not be dropped. But we must
++       * do the flushing before we return, as otherwise i_mmap_rwsem will be
++       * dropped and the last reference to the shared PMDs page might be
++       * dropped as well.
++       *
++       * In theory we could defer the freeing of the PMD pages as well, but
++       * huge_pmd_unshare() relies on the exact page_count for the PMD page to
++       * detect sharing, so we cannot defer the release of the page either.
++       * Instead, do flush now.
++       */
++      if (force_flush)
++              tlb_flush_mmu_tlbonly(tlb);
+ }
+ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
index eb6022bad78b66ca8ad2d010665464df248d52c7..957d88e4f1839334012930db06adf09ff2516cc9 100644 (file)
@@ -320,3 +320,4 @@ batman-adv-don-t-always-reallocate-the-fragmentation-skb-head.patch
 rdma-netlink-add-__maybe_unused-to-static-inline-in-c-file.patch
 asoc-dapm-cover-regression-by-kctl-change-notification-fix.patch
 usb-max-3421-use-driver-data-instead-of-maintaining-a-list-of-bound-devices.patch
+hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch