From 45b9713165a8615cab8267197230253539d1aba0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 2 Dec 2021 16:38:22 +0100 Subject: [PATCH] 4.9-stable patches added patches: hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch --- ...nto-account-when-flushing-tlb-caches.patch | 139 ++++++++++++++++++ queue-4.9/series | 1 + 2 files changed, 140 insertions(+) create mode 100644 queue-4.9/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch diff --git a/queue-4.9/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch b/queue-4.9/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch new file mode 100644 index 00000000000..4670e27f817 --- /dev/null +++ b/queue-4.9/hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch @@ -0,0 +1,139 @@ +From dff11abe280b47c21b804a8ace318e0638bb9a49 Mon Sep 17 00:00:00 2001 +From: Mike Kravetz +Date: Fri, 5 Oct 2018 15:51:33 -0700 +Subject: hugetlb: take PMD sharing into account when flushing tlb/caches + +From: Mike Kravetz + +commit dff11abe280b47c21b804a8ace318e0638bb9a49 upstream. + +When fixing an issue with PMD sharing and migration, it was discovered via +code inspection that other callers of huge_pmd_unshare potentially have an +issue with cache and tlb flushing. + +Use the routine adjust_range_if_pmd_sharing_possible() to calculate worst +case ranges for mmu notifiers. Ensure that this range is flushed if +huge_pmd_unshare succeeds and unmaps a PUD_SUZE area. + +Link: http://lkml.kernel.org/r/20180823205917.16297-3-mike.kravetz@oracle.com +Signed-off-by: Mike Kravetz +Acked-by: Kirill A. Shutemov +Reviewed-by: Naoya Horiguchi +Cc: Vlastimil Babka +Cc: Davidlohr Bueso +Cc: Michal Hocko +Cc: Jerome Glisse +Cc: Mike Kravetz +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Greg Kroah-Hartman +--- + mm/hugetlb.c | 53 +++++++++++++++++++++++++++++++++++++++++++---------- + 1 file changed, 43 insertions(+), 10 deletions(-) + +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -3393,8 +3393,8 @@ void __unmap_hugepage_range(struct mmu_g + struct page *page; + struct hstate *h = hstate_vma(vma); + unsigned long sz = huge_page_size(h); +- const unsigned long mmun_start = start; /* For mmu_notifiers */ +- const unsigned long mmun_end = end; /* For mmu_notifiers */ ++ unsigned long mmun_start = start; /* For mmu_notifiers */ ++ unsigned long mmun_end = end; /* For mmu_notifiers */ + bool force_flush = false; + + WARN_ON(!is_vm_hugetlb_page(vma)); +@@ -3402,6 +3402,11 @@ void __unmap_hugepage_range(struct mmu_g + BUG_ON(end & ~huge_page_mask(h)); + + tlb_start_vma(tlb, vma); ++ ++ /* ++ * If sharing possible, alert mmu notifiers of worst case. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end); + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + address = start; + for (; address < end; address += sz) { +@@ -3512,12 +3517,23 @@ void unmap_hugepage_range(struct vm_area + { + struct mm_struct *mm; + struct mmu_gather tlb; ++ unsigned long tlb_start = start; ++ unsigned long tlb_end = end; ++ ++ /* ++ * If shared PMDs were possibly used within this vma range, adjust ++ * start/end for worst case tlb flushing. ++ * Note that we can not be sure if PMDs are shared until we try to ++ * unmap pages. However, we want to make sure TLB flushing covers ++ * the largest possible range. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); + + mm = vma->vm_mm; + +- tlb_gather_mmu(&tlb, mm, start, end); ++ tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); + __unmap_hugepage_range(&tlb, vma, start, end, ref_page); +- tlb_finish_mmu(&tlb, start, end); ++ tlb_finish_mmu(&tlb, tlb_start, tlb_end); + } + + /* +@@ -4205,11 +4221,21 @@ unsigned long hugetlb_change_protection( + pte_t pte; + struct hstate *h = hstate_vma(vma); + unsigned long pages = 0; ++ unsigned long f_start = start; ++ unsigned long f_end = end; ++ bool shared_pmd = false; ++ ++ /* ++ * In the case of shared PMDs, the area to flush could be beyond ++ * start/end. Set f_start/f_end to cover the maximum possible ++ * range if PMD sharing is possible. ++ */ ++ adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end); + + BUG_ON(address >= end); +- flush_cache_range(vma, address, end); ++ flush_cache_range(vma, f_start, f_end); + +- mmu_notifier_invalidate_range_start(mm, start, end); ++ mmu_notifier_invalidate_range_start(mm, f_start, f_end); + i_mmap_lock_write(vma->vm_file->f_mapping); + for (; address < end; address += huge_page_size(h)) { + spinlock_t *ptl; +@@ -4220,6 +4246,7 @@ unsigned long hugetlb_change_protection( + if (huge_pmd_unshare(mm, &address, ptep)) { + pages++; + spin_unlock(ptl); ++ shared_pmd = true; + continue; + } + pte = huge_ptep_get(ptep); +@@ -4254,12 +4281,18 @@ unsigned long hugetlb_change_protection( + * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare + * may have cleared our pud entry and done put_page on the page table: + * once we release i_mmap_rwsem, another task can do the final put_page +- * and that page table be reused and filled with junk. ++ * and that page table be reused and filled with junk. If we actually ++ * did unshare a page of pmds, flush the range corresponding to the pud. + */ +- flush_hugetlb_tlb_range(vma, start, end); +- mmu_notifier_invalidate_range(mm, start, end); ++ if (shared_pmd) { ++ flush_hugetlb_tlb_range(vma, f_start, f_end); ++ mmu_notifier_invalidate_range(mm, f_start, f_end); ++ } else { ++ flush_hugetlb_tlb_range(vma, start, end); ++ mmu_notifier_invalidate_range(mm, start, end); ++ } + i_mmap_unlock_write(vma->vm_file->f_mapping); +- mmu_notifier_invalidate_range_end(mm, start, end); ++ mmu_notifier_invalidate_range_end(mm, f_start, f_end); + + return pages << h->order; + } diff --git a/queue-4.9/series b/queue-4.9/series index d6dfabe04e6..9399875a6ba 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -36,3 +36,4 @@ xen-netfront-don-t-trust-the-backend-response-data-blindly.patch tty-hvc-replace-bug_on-with-negative-return-value.patch shm-extend-forced-shm-destroy-to-support-objects-from-several-ipc-nses.patch nfsv42-fix-pagecache-invalidation-after-copy-clone.patch +hugetlb-take-pmd-sharing-into-account-when-flushing-tlb-caches.patch -- 2.47.2