static inline bool should_try_to_free_swap(struct swap_info_struct *si,
struct folio *folio,
struct vm_area_struct *vma,
+ unsigned int extra_refs,
unsigned int fault_flags)
{
if (!folio_test_swapcache(folio))
* reference only in case it's likely that we'll be the exclusive user.
*/
return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
- folio_ref_count(folio) == (1 + folio_nr_pages(folio));
+ folio_ref_count(folio) == (extra_refs + folio_nr_pages(folio));
}
static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
*/
arch_swap_restore(folio_swap(entry, folio), folio);
- /*
- * Remove the swap entry and conditionally try to free up the swapcache.
- * We're already holding a reference on the page but haven't mapped it
- * yet.
- */
- swap_free_nr(entry, nr_pages);
- if (should_try_to_free_swap(si, folio, vma, vmf->flags))
- folio_free_swap(folio);
-
add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
pte = mk_pte(page, vma->vm_page_prot);
arch_do_swap_page_nr(vma->vm_mm, vma, address,
pte, pte, nr_pages);
+ /*
+ * Remove the swap entry and conditionally try to free up the swapcache.
+ * Do it after mapping, so raced page faults will likely see the folio
+ * in swap cache and wait on the folio lock.
+ */
+ swap_free_nr(entry, nr_pages);
+ if (should_try_to_free_swap(si, folio, vma, nr_pages, vmf->flags))
+ folio_free_swap(folio);
+
folio_unlock(folio);
if (unlikely(folio != swapcache)) {
/*