tlb_end_vma(tlb, vma);
/*
- * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
- * could defer the flush until now, since by holding i_mmap_rwsem we
- * guaranteed that the last reference would not be dropped. But we must
- * do the flushing before we return, as otherwise i_mmap_rwsem will be
- * dropped and the last reference to the shared PMDs page might be
- * dropped as well.
- *
- * In theory we could defer the freeing of the PMD pages as well, but
- * huge_pmd_unshare() relies on the exact page_count for the PMD page to
- * detect sharing, so we cannot defer the release of the page either.
- * Instead, do flush now.
+ * There is nothing protecting a previously-shared page table that we
+ * unshared through huge_pmd_unshare() from getting freed after we
+ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
+ * succeeded, flush the range corresponding to the pud.
*/
if (force_flush)
tlb_flush_mmu_tlbonly(tlb);
cond_resched();
}
/*
- * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
- * may have cleared our pud entry and done put_page on the page table:
- * once we release i_mmap_rwsem, another task can do the final put_page
- * and that page table be reused and filled with junk. If we actually
- * did unshare a page of pmds, flush the range corresponding to the pud.
+ * There is nothing protecting a previously-shared page table that we
+ * unshared through huge_pmd_unshare() from getting freed after we
+ * release i_mmap_rwsem, so flush the TLB now. If huge_pmd_unshare()
+ * succeeded, flush the range corresponding to the pud.
*/
if (shared_pmd)
flush_hugetlb_tlb_range(vma, range.start, range.end);