pte_t new_pte;
bool new_folio, new_pagecache_folio = false;
u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
+ bool folio_locked = true;
/*
* Currently, we are forced to kill the process in the event the
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ /*
+ * No need to keep file folios locked. See comment in
+ * hugetlb_fault().
+ */
+ if (!anon_rmap) {
+ folio_locked = false;
+ folio_unlock(folio);
+ }
/* Optimization, do the COW without a second fault */
ret = hugetlb_wp(vmf);
}
if (new_folio)
folio_set_hugetlb_migratable(folio);
- folio_unlock(folio);
+ if (folio_locked)
+ folio_unlock(folio);
out:
hugetlb_vma_unlock_read(vma);
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
if (!huge_pte_write(vmf.orig_pte)) {
- /* hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) */
+ /*
+ * Anonymous folios need to be lock since hugetlb_wp()
+ * checks whether we can re-use the folio exclusively
+ * for us in case we are the only user of it.
+ */
folio = page_folio(pte_page(vmf.orig_pte));
- if (!folio_trylock(folio)) {
+ if (folio_test_anon(folio) && !folio_trylock(folio)) {
need_wait_lock = true;
goto out_ptl;
}
folio_get(folio);
ret = hugetlb_wp(&vmf);
- folio_unlock(folio);
+ if (folio_test_anon(folio))
+ folio_unlock(folio);
folio_put(folio);
goto out_ptl;
} else if (likely(flags & FAULT_FLAG_WRITE)) {