]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: update folio stat after successful split
authorWei Yang <richard.weiyang@gmail.com>
Tue, 21 Oct 2025 21:21:40 +0000 (21:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Nov 2025 01:28:19 +0000 (17:28 -0800)
The current implementation complicates this process:

  * It iterates over the resulting new folios.
  * It uses a flag (@stop_split) to conditionally skip updating the stat
    for the folio at @split_at during the loop.
  * It then attempts to update the skipped stat on a subsequent failure
    path.

This logic is unnecessarily hard to follow.

This commit refactors the code to update the folio statistics only after a
successful split.  This makes the logic much cleaner and sets the stage
for further simplification of the stat-handling code.

Link: https://lkml.kernel.org/r/20251021212142.25766-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 5d95e3462c43311bb0d04ddf785e0374d1bfd3f5..85c472fbcbfa51df188de89f35c99a6dc3aea6b5 100644 (file)
@@ -3404,20 +3404,15 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        const bool is_anon = folio_test_anon(folio);
        int order = folio_order(folio);
        int start_order = uniform_split ? new_order : order - 1;
-       bool stop_split = false;
        struct folio *next;
        int split_order;
-       int ret = 0;
-
-       if (is_anon)
-               mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
 
        /*
         * split to new_order one order at a time. For uniform split,
         * folio is split to new_order directly.
         */
        for (split_order = start_order;
-            split_order >= new_order && !stop_split;
+            split_order >= new_order;
             split_order--) {
                struct folio *end_folio = folio_next(folio);
                int old_order = folio_order(folio);
@@ -3440,49 +3435,32 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                        else {
                                xas_set_order(xas, folio->index, split_order);
                                xas_try_split(xas, folio, old_order);
-                               if (xas_error(xas)) {
-                                       ret = xas_error(xas);
-                                       stop_split = true;
-                               }
+                               if (xas_error(xas))
+                                       return xas_error(xas);
                        }
                }
 
-               if (!stop_split) {
-                       folio_split_memcg_refs(folio, old_order, split_order);
-                       split_page_owner(&folio->page, old_order, split_order);
-                       pgalloc_tag_split(folio, old_order, split_order);
-
-                       __split_folio_to_order(folio, old_order, split_order);
-               }
+               folio_split_memcg_refs(folio, old_order, split_order);
+               split_page_owner(&folio->page, old_order, split_order);
+               pgalloc_tag_split(folio, old_order, split_order);
+               __split_folio_to_order(folio, old_order, split_order);
 
+               if (is_anon)
+                       mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
                /*
                 * Iterate through after-split folios and update folio stats.
-                * But in buddy allocator like split, the folio
-                * containing the specified page is skipped until its order
-                * is new_order, since the folio will be worked on in next
-                * iteration.
                 */
                for (new_folio = folio; new_folio != end_folio; new_folio = next) {
                        next = folio_next(new_folio);
-                       /*
-                        * for buddy allocator like split, new_folio containing
-                        * @split_at page could be split again, thus do not
-                        * change stats yet. Wait until new_folio's order is
-                        * @new_order or stop_split is set to true by the above
-                        * xas_split() failure.
-                        */
-                       if (new_folio == page_folio(split_at)) {
+                       if (new_folio == page_folio(split_at))
                                folio = new_folio;
-                               if (split_order != new_order && !stop_split)
-                                       continue;
-                       }
                        if (is_anon)
                                mod_mthp_stat(folio_order(new_folio),
                                              MTHP_STAT_NR_ANON, 1);
                }
        }
 
-       return ret;
+       return 0;
 }
 
 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,