]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/huge_memory: merge uniform_split_supported() and non_uniform_split_supported()
authorWei Yang <richard.weiyang@gmail.com>
Thu, 6 Nov 2025 03:41:55 +0000 (03:41 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:50 +0000 (15:08 -0800)
uniform_split_supported() and non_uniform_split_supported() share
significantly similar logic.

The only functional difference is that uniform_split_supported() includes
an additional check on the requested @new_order.

The reason for this check comes from the following two aspects:

  * some file system or swap cache just supports order-0 folio
  * the behavioral difference between uniform/non-uniform split

The behavioral difference between uniform split and non-uniform:

  * uniform split splits folio directly to @new_order
  * non-uniform split creates after-split folios with orders from
    folio_order(folio) - 1 to new_order.

This means for non-uniform split or !new_order split we should check the
file system and swap cache respectively.

This commit unifies the logic and merge the two functions into a single
combined helper, removing redundant code and simplifying the split
support checking mechanism.

Link: https://lkml.kernel.org/r/20251106034155.21398-3-richard.weiyang@gmail.com
Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages")
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: "David Hildenbrand (Red Hat)" <david@kernel.org>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c

index b74708dc5b5f6d166e187aad7f0c78a6a035a7b8..19d4a5f52ca2e80e76c43525350320b8083ad1f8 100644 (file)
@@ -374,10 +374,8 @@ int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list
                unsigned int new_order, bool unmapped);
 int min_order_for_split(struct folio *folio);
 int split_folio_to_list(struct folio *folio, struct list_head *list);
-bool uniform_split_supported(struct folio *folio, unsigned int new_order,
-               bool warns);
-bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
-               bool warns);
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+               enum split_type split_type, bool warns);
 int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
                struct list_head *list);
 
@@ -408,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o
 static inline int try_folio_split_to_order(struct folio *folio,
                struct page *page, unsigned int new_order)
 {
-       if (!non_uniform_split_supported(folio, new_order, /* warns= */ false))
+       if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
                return split_huge_page_to_order(&folio->page, new_order);
        return folio_split(folio, new_order, page, NULL);
 }
index 4118f330c55e268f022f8a2930998343870945c2..d79a4bb363de861317ca9ff6f87ce0be8acb0370 100644 (file)
@@ -3593,8 +3593,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
        return 0;
 }
 
-bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
-               bool warns)
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+               enum split_type split_type, bool warns)
 {
        if (folio_test_anon(folio)) {
                /* order-1 is not supported for anonymous THP. */
@@ -3602,48 +3602,41 @@ bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
                                "Cannot split to order-1 folio");
                if (new_order == 1)
                        return false;
-       } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
-           !mapping_large_folio_support(folio->mapping)) {
-               /*
-                * No split if the file system does not support large folio.
-                * Note that we might still have THPs in such mappings due to
-                * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
-                * does not actually support large folios properly.
-                */
-               VM_WARN_ONCE(warns,
-                       "Cannot split file folio to non-0 order");
-               return false;
-       }
-
-       /* Only swapping a whole PMD-mapped folio is supported */
-       if (folio_test_swapcache(folio)) {
-               VM_WARN_ONCE(warns,
-                       "Cannot split swapcache folio to non-0 order");
-               return false;
-       }
-
-       return true;
-}
-
-/* See comments in non_uniform_split_supported() */
-bool uniform_split_supported(struct folio *folio, unsigned int new_order,
-               bool warns)
-{
-       if (folio_test_anon(folio)) {
-               VM_WARN_ONCE(warns && new_order == 1,
-                               "Cannot split to order-1 folio");
-               if (new_order == 1)
-                       return false;
-       } else  if (new_order) {
+       } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
                if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
                    !mapping_large_folio_support(folio->mapping)) {
+                       /*
+                        * We can always split a folio down to a single page
+                        * (new_order == 0) uniformly.
+                        *
+                        * For any other scenario
+                        *   a) uniform split targeting a large folio
+                        *      (new_order > 0)
+                        *   b) any non-uniform split
+                        * we must confirm that the file system supports large
+                        * folios.
+                        *
+                        * Note that we might still have THPs in such
+                        * mappings, which is created from khugepaged when
+                        * CONFIG_READ_ONLY_THP_FOR_FS is enabled. But in that
+                        * case, the mapping does not actually support large
+                        * folios properly.
+                        */
                        VM_WARN_ONCE(warns,
                                "Cannot split file folio to non-0 order");
                        return false;
                }
        }
 
-       if (new_order && folio_test_swapcache(folio)) {
+       /*
+        * swapcache folio could only be split to order 0
+        *
+        * non-uniform split creates after-split folios with orders from
+        * folio_order(folio) - 1 to new_order, making it not suitable for any
+        * swapcache folio split. Only uniform split to order-0 can be used
+        * here.
+        */
+       if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
                VM_WARN_ONCE(warns,
                        "Cannot split swapcache folio to non-0 order");
                return false;
@@ -3711,11 +3704,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        if (new_order >= old_order)
                return -EINVAL;
 
-       if (split_type == SPLIT_TYPE_UNIFORM && !uniform_split_supported(folio, new_order, true))
-               return -EINVAL;
-
-       if (split_type == SPLIT_TYPE_NON_UNIFORM &&
-           !non_uniform_split_supported(folio, new_order, true))
+       if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true))
                return -EINVAL;
 
        is_hzp = is_huge_zero_folio(folio);