* Try to allocate it some swap space here.
* Lazyfree folio could be freed directly
*/
- if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
- if (!folio_test_swapcache(folio)) {
- if (!(sc->gfp_mask & __GFP_IO))
- goto keep_locked;
- if (folio_maybe_dma_pinned(folio))
- goto keep_locked;
- if (folio_test_large(folio)) {
- /* cannot split folio, skip it */
- if (folio_expected_ref_count(folio) !=
- folio_ref_count(folio) - 1)
- goto activate_locked;
- /*
- * Split partially mapped folios right away.
- * We can free the unmapped pages without IO.
- */
- if (data_race(!list_empty(&folio->_deferred_list) &&
- folio_test_partially_mapped(folio)) &&
- split_folio_to_list(folio, folio_list))
- goto activate_locked;
- }
- if (folio_alloc_swap(folio)) {
- int __maybe_unused order = folio_order(folio);
-
- if (!folio_test_large(folio))
- goto activate_locked_split;
- /* Fallback to swap normal pages */
- if (split_folio_to_list(folio, folio_list))
- goto activate_locked;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (nr_pages >= HPAGE_PMD_NR) {
- count_memcg_folio_events(folio,
- THP_SWPOUT_FALLBACK, 1);
- count_vm_event(THP_SWPOUT_FALLBACK);
- }
-#endif
- count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (folio_alloc_swap(folio))
- goto activate_locked_split;
- }
+ if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)) {
+ if (!(sc->gfp_mask & __GFP_IO))
+ goto keep_locked;
+ if (folio_maybe_dma_pinned(folio))
+ goto keep_locked;
+ if (folio_test_large(folio)) {
+ /* cannot split folio, skip it */
+ if (folio_expected_ref_count(folio) !=
+ folio_ref_count(folio) - 1)
+ goto activate_locked;
/*
- * Normally the folio will be dirtied in unmap because its
- * pte should be dirty. A special case is MADV_FREE page. The
- * page's pte could have dirty bit cleared but the folio's
- * SwapBacked flag is still set because clearing the dirty bit
- * and SwapBacked flag has no lock protected. For such folio,
- * unmap will not set dirty bit for it, so folio reclaim will
- * not write the folio out. This can cause data corruption when
- * the folio is swapped in later. Always setting the dirty flag
- * for the folio solves the problem.
+ * Split partially mapped folios right away.
+ * We can free the unmapped pages without IO.
*/
- folio_mark_dirty(folio);
+ if (data_race(!list_empty(&folio->_deferred_list) &&
+ folio_test_partially_mapped(folio)) &&
+ split_folio_to_list(folio, folio_list))
+ goto activate_locked;
+ }
+ if (folio_alloc_swap(folio)) {
+ int __maybe_unused order = folio_order(folio);
+
+ if (!folio_test_large(folio))
+ goto activate_locked_split;
+ /* Fallback to swap normal pages */
+ if (split_folio_to_list(folio, folio_list))
+ goto activate_locked;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (nr_pages >= HPAGE_PMD_NR) {
+ count_memcg_folio_events(folio,
+ THP_SWPOUT_FALLBACK, 1);
+ count_vm_event(THP_SWPOUT_FALLBACK);
+ }
+#endif
+ count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
+ if (folio_alloc_swap(folio))
+ goto activate_locked_split;
}
+ /*
+ * Normally the folio will be dirtied in unmap because
+ * its pte should be dirty. A special case is MADV_FREE
+ * page. The page's pte could have dirty bit cleared but
+ * the folio's SwapBacked flag is still set because
+ * clearing the dirty bit and SwapBacked flag has no
+ * lock protected. For such folio, unmap will not set
+ * dirty bit for it, so folio reclaim will not write the
+ * folio out. This can cause data corruption when the
+ * folio is swapped in later. Always setting the dirty
+ * flag for the folio solves the problem.
+ */
+ folio_mark_dirty(folio);
}
/*