]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/vmscan.c:shrink_folio_list(): save a tabstop
authorAndrew Morton <akpm@linux-foundation.org>
Mon, 15 Dec 2025 19:05:56 +0000 (11:05 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 Jan 2026 03:24:36 +0000 (19:24 -0800)
We have some needlessly deep indentation in this huge function due to

if (expr1) {
if (expr2) {
...
}
}

Convert this to

if (expr1 && expr2) {
...
}

Also, reflow that big block comment to fit in 80 cols.

Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Wei Xu <weixugc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 6cf5ee94be7accc30ebf3b315d2a344ced6cbf70..67234613fbfff34913bbcadb58263044e476729c 100644 (file)
@@ -1276,58 +1276,58 @@ retry:
                 * Try to allocate it some swap space here.
                 * Lazyfree folio could be freed directly
                 */
-               if (folio_test_anon(folio) && folio_test_swapbacked(folio)) {
-                       if (!folio_test_swapcache(folio)) {
-                               if (!(sc->gfp_mask & __GFP_IO))
-                                       goto keep_locked;
-                               if (folio_maybe_dma_pinned(folio))
-                                       goto keep_locked;
-                               if (folio_test_large(folio)) {
-                                       /* cannot split folio, skip it */
-                                       if (folio_expected_ref_count(folio) !=
-                                           folio_ref_count(folio) - 1)
-                                               goto activate_locked;
-                                       /*
-                                        * Split partially mapped folios right away.
-                                        * We can free the unmapped pages without IO.
-                                        */
-                                       if (data_race(!list_empty(&folio->_deferred_list) &&
-                                           folio_test_partially_mapped(folio)) &&
-                                           split_folio_to_list(folio, folio_list))
-                                               goto activate_locked;
-                               }
-                               if (folio_alloc_swap(folio)) {
-                                       int __maybe_unused order = folio_order(folio);
-
-                                       if (!folio_test_large(folio))
-                                               goto activate_locked_split;
-                                       /* Fallback to swap normal pages */
-                                       if (split_folio_to_list(folio, folio_list))
-                                               goto activate_locked;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                                       if (nr_pages >= HPAGE_PMD_NR) {
-                                               count_memcg_folio_events(folio,
-                                                       THP_SWPOUT_FALLBACK, 1);
-                                               count_vm_event(THP_SWPOUT_FALLBACK);
-                                       }
-#endif
-                                       count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
-                                       if (folio_alloc_swap(folio))
-                                               goto activate_locked_split;
-                               }
+               if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+                               !folio_test_swapcache(folio)) {
+                       if (!(sc->gfp_mask & __GFP_IO))
+                               goto keep_locked;
+                       if (folio_maybe_dma_pinned(folio))
+                               goto keep_locked;
+                       if (folio_test_large(folio)) {
+                               /* cannot split folio, skip it */
+                               if (folio_expected_ref_count(folio) !=
+                                   folio_ref_count(folio) - 1)
+                                       goto activate_locked;
                                /*
-                                * Normally the folio will be dirtied in unmap because its
-                                * pte should be dirty. A special case is MADV_FREE page. The
-                                * page's pte could have dirty bit cleared but the folio's
-                                * SwapBacked flag is still set because clearing the dirty bit
-                                * and SwapBacked flag has no lock protected. For such folio,
-                                * unmap will not set dirty bit for it, so folio reclaim will
-                                * not write the folio out. This can cause data corruption when
-                                * the folio is swapped in later. Always setting the dirty flag
-                                * for the folio solves the problem.
+                                * Split partially mapped folios right away.
+                                * We can free the unmapped pages without IO.
                                 */
-                               folio_mark_dirty(folio);
+                               if (data_race(!list_empty(&folio->_deferred_list) &&
+                                   folio_test_partially_mapped(folio)) &&
+                                   split_folio_to_list(folio, folio_list))
+                                       goto activate_locked;
+                       }
+                       if (folio_alloc_swap(folio)) {
+                               int __maybe_unused order = folio_order(folio);
+
+                               if (!folio_test_large(folio))
+                                       goto activate_locked_split;
+                               /* Fallback to swap normal pages */
+                               if (split_folio_to_list(folio, folio_list))
+                                       goto activate_locked;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+                               if (nr_pages >= HPAGE_PMD_NR) {
+                                       count_memcg_folio_events(folio,
+                                               THP_SWPOUT_FALLBACK, 1);
+                                       count_vm_event(THP_SWPOUT_FALLBACK);
+                               }
+#endif
+                               count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
+                               if (folio_alloc_swap(folio))
+                                       goto activate_locked_split;
                        }
+                       /*
+                        * Normally the folio will be dirtied in unmap because
+                        * its pte should be dirty. A special case is MADV_FREE
+                        * page. The page's pte could have dirty bit cleared but
+                        * the folio's SwapBacked flag is still set because
+                        * clearing the dirty bit and SwapBacked flag has no
+                        * lock protected. For such folio, unmap will not set
+                        * dirty bit for it, so folio reclaim will not write the
+                        * folio out. This can cause data corruption when the
+                        * folio is swapped in later. Always setting the dirty
+                        * flag for the folio solves the problem.
+                        */
+                       folio_mark_dirty(folio);
                }
 
                /*