--- /dev/null
+From a4a118f2eead1d6c49e00765de89878288d4b890 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Sun, 21 Nov 2021 12:40:07 -0800
+Subject: hugetlbfs: flush TLBs correctly after huge_pmd_unshare
+
+From: Nadav Amit <namit@vmware.com>
+
+commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream.
+
+When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
+flush is missing. This TLB flush must be performed before releasing the
+i_mmap_rwsem, in order to prevent an unshared PMDs page from being
+released and reused before the TLB flush took place.
+
+Arguably, a comprehensive solution would use mmu_gather interface to
+batch the TLB flushes and the PMDs page release, however it is not an
+easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
+huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
+deferring the release of the page reference for the PMDs page until
+after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
+thinking PMDs are shared when they are not.
+
+Fix __unmap_hugepage_range() by adding the missing TLB flush, and
+forcing a flush when unshare is successful.
+
+Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/tlb.h | 7 +++++++
+ mm/hugetlb.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -165,6 +165,13 @@ static inline void __tlb_reset_range(str
+ #define tlb_end_vma __tlb_end_vma
+ #endif
+
++static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
++ unsigned long address, unsigned long size)
++{
++ tlb->start = min(tlb->start, address);
++ tlb->end = max(tlb->end, address + size);
++}
++
+ #ifndef __tlb_remove_tlb_entry
+ #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+ #endif
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3290,8 +3290,11 @@ again:
+ continue;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+- if (huge_pmd_unshare(mm, &address, ptep))
++ if (huge_pmd_unshare(mm, &address, ptep)) {
++ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
++ force_flush = 1;
+ goto unlock;
++ }
+
+ pte = huge_ptep_get(ptep);
+ if (huge_pte_none(pte))