From: Greg Kroah-Hartman Date: Thu, 25 Nov 2021 11:38:39 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v5.10.82~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b51a5fa7147361ea3cbd8a25708c939053c45fd9;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch --- diff --git a/queue-4.14/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch b/queue-4.14/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch new file mode 100644 index 00000000000..6e435ef2de3 --- /dev/null +++ b/queue-4.14/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch @@ -0,0 +1,147 @@ +From dff11abe280b47c21b804a8ace318e0638bb9a49 Mon Sep 17 00:00:00 2001 +From: Mike Kravetz +Date: Fri, 5 Oct 2018 15:51:33 -0700 +Subject: hugetlb: take PMD sharing into account when flushing tlb/caches + +From: Mike Kravetz + +commit dff11abe280b47c21b804a8ace318e0638bb9a49 upstream. + +When fixing an issue with PMD sharing and migration, it was discovered via +code inspection that other callers of huge_pmd_unshare potentially have an +issue with cache and tlb flushing. + +Use the routine adjust_range_if_pmd_sharing_possible() to calculate worst +case ranges for mmu notifiers. Ensure that this range is flushed if +huge_pmd_unshare succeeds and unmaps a PUD_SUZE area. + +Link: http://lkml.kernel.org/r/20180823205917.16297-3-mike.kravetz@oracle.com +Signed-off-by: Mike Kravetz +Acked-by: Kirill A. Shutemov +Reviewed-by: Naoya Horiguchi +Cc: Vlastimil Babka +Cc: Davidlohr Bueso +Cc: Michal Hocko +Cc: Jerome Glisse +Cc: Mike Kravetz +Signed-off-by: Andrew Morton +Cc: Nadav Amit +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 53 ++++++++++++++++++++++++++++++++++++++++++++--------- + 1 file changed, 44 insertions(+), 9 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -3384,8 +3384,8 @@ void __unmap_hugepage_range(struct mmu_g + struct page *page; + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); +- const unsigned long mmun_start = start; /* For mmu_notifiers */ +- const unsigned long mmun_end = end; /* For mmu_notifiers */ ++ unsigned long mmun_start = start; /* For mmu_notifiers */ ++ unsigned long mmun_end = end; /* For mmu_notifiers */ + + WARN_ON(!is_vm_hugetlb_page(vma)); + BUG_ON(start & ~huge_page_mask(h)); +@@ -3397,6 +3397,11 @@ void __unmap_hugepage_range(struct mmu_g + */ + tlb_remove_check_page_size_change(tlb, sz); + tlb_start_vma(tlb, vma); ++ ++ /* ++ * If sharing possible, alert mmu notifiers of worst case. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end); + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + address = start; + for (; address < end; address += sz) { +@@ -3407,6 +3412,10 @@ void __unmap_hugepage_range(struct mmu_g + ptl = huge_pte_lock(h, mm, ptep); + if (huge_pmd_unshare(mm, &address, ptep)) { + spin_unlock(ptl); ++ /* ++ * We just unmapped a page of PMDs by clearing a PUD. ++ * The caller's TLB flush range should cover this area. ++ */ + continue; + } + +@@ -3489,12 +3498,23 @@ void unmap_hugepage_range(struct vm_area + { + struct mm_struct *mm; + struct mmu_gather tlb; ++ unsigned long tlb_start = start; ++ unsigned long tlb_end = end; ++ ++ /* ++ * If shared PMDs were possibly used within this vma range, adjust ++ * start/end for worst case tlb flushing. ++ * Note that we can not be sure if PMDs are shared until we try to ++ * unmap pages. However, we want to make sure TLB flushing covers ++ * the largest possible range. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); + + mm = vma->vm_mm; + +- tlb_gather_mmu(&tlb, mm, start, end); ++ tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); + __unmap_hugepage_range(&tlb, vma, start, end, ref_page); +- tlb_finish_mmu(&tlb, start, end); ++ tlb_finish_mmu(&tlb, tlb_start, tlb_end); + } + + /* +@@ -4389,11 +4409,21 @@ unsigned long hugetlb_change_protection( + pte_t pte; + struct hstate *h = hstate_vma(vma); + unsigned long pages = 0; ++ unsigned long f_start = start; ++ unsigned long f_end = end; ++ bool shared_pmd = false; ++ ++ /* ++ * In the case of shared PMDs, the area to flush could be beyond ++ * start/end. Set f_start/f_end to cover the maximum possible ++ * range if PMD sharing is possible. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end); + + BUG_ON(address >= end); +- flush_cache_range(vma, address, end); ++ flush_cache_range(vma, f_start, f_end); + +- mmu_notifier_invalidate_range_start(mm, start, end); ++ mmu_notifier_invalidate_range_start(mm, f_start, f_end); + i_mmap_lock_write(vma->vm_file->f_mapping); + for (; address < end; address += huge_page_size(h)) { + spinlock_t *ptl; +@@ -4404,6 +4434,7 @@ unsigned long hugetlb_change_protection( + if (huge_pmd_unshare(mm, &address, ptep)) { + pages++; + spin_unlock(ptl); ++ shared_pmd = true; + continue; + } + pte = huge_ptep_get(ptep); +@@ -4439,12 +4470,16 @@ unsigned long hugetlb_change_protection( + * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare + * may have cleared our pud entry and done put_page on the page table: + * once we release i_mmap_rwsem, another task can do the final put_page +- * and that page table be reused and filled with junk. ++ * and that page table be reused and filled with junk. If we actually ++ * did unshare a page of pmds, flush the range corresponding to the pud. + */ +- flush_hugetlb_tlb_range(vma, start, end); ++ if (shared_pmd) ++ flush_hugetlb_tlb_range(vma, f_start, f_end); ++ else ++ flush_hugetlb_tlb_range(vma, start, end); + mmu_notifier_invalidate_range(mm, start, end); + i_mmap_unlock_write(vma->vm_file->f_mapping); +- mmu_notifier_invalidate_range_end(mm, start, end); ++ mmu_notifier_invalidate_range_end(mm, f_start, f_end); + + return pages << h->order; + } diff --git a/queue-4.14/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch b/queue-4.14/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch index acb58842c41..08bb3737cb3 100644 --- a/queue-4.14/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch +++ b/queue-4.14/hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch @@ -31,7 +31,6 @@ Cc: KAMEZAWA Hiroyuki Cc: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman - --- arch/arm/include/asm/tlb.h | 8 ++++++++ arch/ia64/include/asm/tlb.h | 10 ++++++++++ @@ -156,22 +155,22 @@ Signed-off-by: Greg Kroah-Hartman +++ b/mm/hugetlb.c @@ -3386,6 +3386,7 @@ void __unmap_hugepage_range(struct mmu_g unsigned long sz = huge_page_size(h); - const unsigned long mmun_start = start; /* For mmu_notifiers */ - const unsigned long mmun_end = end; /* For mmu_notifiers */ + unsigned long mmun_start = start; /* For mmu_notifiers */ + unsigned long mmun_end = end; /* For mmu_notifiers */ + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); -@@ -3407,6 +3408,8 @@ void __unmap_hugepage_range(struct mmu_g - ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { +@@ -3422,6 +3423,8 @@ void __unmap_hugepage_range(struct mmu_g + pte = huge_ptep_get(ptep); + if (huge_pte_none(pte)) { spin_unlock(ptl); + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } -@@ -3463,6 +3466,22 @@ void __unmap_hugepage_range(struct mmu_g +@@ -3472,6 +3475,22 @@ void __unmap_hugepage_range(struct mmu_g } mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); tlb_end_vma(tlb, vma); diff --git a/queue-4.14/series b/queue-4.14/series index a1483b3fd7b..d948826f2e7 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -246,4 +246,5 @@ rdma-netlink-add-__maybe_unused-to-static-inline-in-c-file.patch asoc-dapm-cover-regression-by-kctl-change-notification-fix.patch usb-max-3421-use-driver-data-instead-of-maintaining-a-list-of-bound-devices.patch soc-tegra-pmc-fix-imbalanced-clock-disabling-in-error-code-path.patch +hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch hugetlbfs-flush-tlbs-correctly-after-huge_pmd_unshare.patch