]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/huge_memory: optimize old_order derivation during folio splitting
authorWei Yang <richard.weiyang@gmail.com>
Tue, 21 Oct 2025 21:21:42 +0000 (21:21 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Nov 2025 01:28:19 +0000 (17:28 -0800)
Folio splitting requires both the folio's original order (@old_order) and
the new target order (@split_order).

In the current implementation, @old_order is repeatedly retrieved using
folio_order().

However, for every iteration after the first, the folio being split is the
result of the previous split, meaning its order is already known to be
equal to the previous iteration's @split_order.

This commit optimizes the logic:

  * Instead of calling folio_order(), we now set @old_order directly to
    the value of @split_order from the previous iteration.

This change avoids unnecessary function calls and simplifies the loop
setup.

Also it removes a check for non-existent case, since for uniform splitting
we only do split when @split_order == @new_order.

Link: https://lkml.kernel.org/r/20251021212142.25766-5-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 52c4114a17f291e3fe5e2fc4c9668bc537f195c2..0a521cf9b10a473435651da86573602eb842d6f4 100644 (file)
@@ -3402,8 +3402,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                struct address_space *mapping, bool uniform_split)
 {
        const bool is_anon = folio_test_anon(folio);
-       int order = folio_order(folio);
-       int start_order = uniform_split ? new_order : order - 1;
+       int old_order = folio_order(folio);
+       int start_order = uniform_split ? new_order : old_order - 1;
        int split_order;
 
        /*
@@ -3413,14 +3413,11 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        for (split_order = start_order;
             split_order >= new_order;
             split_order--) {
-               int old_order = folio_order(folio);
                int nr_new_folios = 1UL << (old_order - split_order);
 
                /* order-1 anonymous folio is not supported */
                if (is_anon && split_order == 1)
                        continue;
-               if (uniform_split && split_order != new_order)
-                       continue;
 
                if (mapping) {
                        /*
@@ -3447,7 +3444,13 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
                        mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
                        mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
                }
+               /*
+                * If uniform split, the process is complete.
+                * If non-uniform, continue splitting the folio at @split_at
+                * as long as the next @split_order is >= @new_order.
+                */
                folio = page_folio(split_at);
+               old_order = split_order;
        }
 
        return 0;