From: Andrew Morton Date: Mon, 15 Dec 2025 19:05:56 +0000 (-0800) Subject: mm/vmscan.c:shrink_folio_list(): save a tabstop X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7adc97bc93946e55fc6af30a03d296fb833a28df;p=thirdparty%2Fkernel%2Flinux.git mm/vmscan.c:shrink_folio_list(): save a tabstop We have some needlessly deep indentation in this huge function due to if (expr1) { if (expr2) { ... } } Convert this to if (expr1 && expr2) { ... } Also, reflow that big block comment to fit in 80 cols. Cc: Johannes Weiner Cc: David Hildenbrand Cc: Michal Hocko Cc: Qi Zheng Cc: Shakeel Butt Cc: Lorenzo Stoakes Cc: Axel Rasmussen Cc: Yuanchu Xie Cc: Wei Xu Signed-off-by: Andrew Morton --- diff --git a/mm/vmscan.c b/mm/vmscan.c index 6cf5ee94be7ac..67234613fbfff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1276,58 +1276,58 @@ retry: * Try to allocate it some swap space here. * Lazyfree folio could be freed directly */ - if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { - if (!folio_test_swapcache(folio)) { - if (!(sc->gfp_mask & __GFP_IO)) - goto keep_locked; - if (folio_maybe_dma_pinned(folio)) - goto keep_locked; - if (folio_test_large(folio)) { - /* cannot split folio, skip it */ - if (folio_expected_ref_count(folio) != - folio_ref_count(folio) - 1) - goto activate_locked; - /* - * Split partially mapped folios right away. - * We can free the unmapped pages without IO. - */ - if (data_race(!list_empty(&folio->_deferred_list) && - folio_test_partially_mapped(folio)) && - split_folio_to_list(folio, folio_list)) - goto activate_locked; - } - if (folio_alloc_swap(folio)) { - int __maybe_unused order = folio_order(folio); - - if (!folio_test_large(folio)) - goto activate_locked_split; - /* Fallback to swap normal pages */ - if (split_folio_to_list(folio, folio_list)) - goto activate_locked; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (nr_pages >= HPAGE_PMD_NR) { - count_memcg_folio_events(folio, - THP_SWPOUT_FALLBACK, 1); - count_vm_event(THP_SWPOUT_FALLBACK); - } -#endif - count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); - if (folio_alloc_swap(folio)) - goto activate_locked_split; - } + if (folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio)) { + if (!(sc->gfp_mask & __GFP_IO)) + goto keep_locked; + if (folio_maybe_dma_pinned(folio)) + goto keep_locked; + if (folio_test_large(folio)) { + /* cannot split folio, skip it */ + if (folio_expected_ref_count(folio) != + folio_ref_count(folio) - 1) + goto activate_locked; /* - * Normally the folio will be dirtied in unmap because its - * pte should be dirty. A special case is MADV_FREE page. The - * page's pte could have dirty bit cleared but the folio's - * SwapBacked flag is still set because clearing the dirty bit - * and SwapBacked flag has no lock protected. For such folio, - * unmap will not set dirty bit for it, so folio reclaim will - * not write the folio out. This can cause data corruption when - * the folio is swapped in later. Always setting the dirty flag - * for the folio solves the problem. + * Split partially mapped folios right away. + * We can free the unmapped pages without IO. */ - folio_mark_dirty(folio); + if (data_race(!list_empty(&folio->_deferred_list) && + folio_test_partially_mapped(folio)) && + split_folio_to_list(folio, folio_list)) + goto activate_locked; + } + if (folio_alloc_swap(folio)) { + int __maybe_unused order = folio_order(folio); + + if (!folio_test_large(folio)) + goto activate_locked_split; + /* Fallback to swap normal pages */ + if (split_folio_to_list(folio, folio_list)) + goto activate_locked; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (nr_pages >= HPAGE_PMD_NR) { + count_memcg_folio_events(folio, + THP_SWPOUT_FALLBACK, 1); + count_vm_event(THP_SWPOUT_FALLBACK); + } +#endif + count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); + if (folio_alloc_swap(folio)) + goto activate_locked_split; } + /* + * Normally the folio will be dirtied in unmap because + * its pte should be dirty. A special case is MADV_FREE + * page. The page's pte could have dirty bit cleared but + * the folio's SwapBacked flag is still set because + * clearing the dirty bit and SwapBacked flag has no + * lock protected. For such folio, unmap will not set + * dirty bit for it, so folio reclaim will not write the + * folio out. This can cause data corruption when the + * folio is swapped in later. Always setting the dirty + * flag for the folio solves the problem. + */ + folio_mark_dirty(folio); } /*