]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/huge_memory: make min_order_for_split() always return an order
authorZi Yan <ziy@nvidia.com>
Wed, 26 Nov 2025 21:06:17 +0000 (16:06 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 9 Dec 2025 19:25:33 +0000 (11:25 -0800)
min_order_for_split() returns -EBUSY when the folio is truncated and
cannot be split.  In commit 77008e1b2ef7 ("mm/huge_memory: do not change
split_huge_page*() target order silently"), memory_failure() does not
handle it and pass -EBUSY to try_to_split_thp_page() directly.
try_to_split_thp_page() returns -EINVAL since -EBUSY becomes 0xfffffff0 as
new_order is unsigned int in __folio_split() and this large new_order is
rejected as an invalid input.  The code does not cause a bug.
soft_offline_in_use_page() also uses min_order_for_split() but it always
passes 0 as new_order for split.

Fix it by making min_order_for_split() always return an order.  When the
given folio is truncated, namely folio->mapping == NULL, return 0 and let
a subsequent split function handle the situation and return -EBUSY.

Add kernel-doc to min_order_for_split() to clarify its use.

Link: https://lkml.kernel.org/r/20251126210618.1971206-4-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c

index 8a52e20387b0b38086cd5b974a52edf5d05a5710..21162493a0a0e5e73a419c72145612defb6d4c8a 100644 (file)
@@ -372,7 +372,7 @@ enum split_type {
 int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                unsigned int new_order);
 int folio_split_unmapped(struct folio *folio, unsigned int new_order);
-int min_order_for_split(struct folio *folio);
+unsigned int min_order_for_split(struct folio *folio);
 int split_folio_to_list(struct folio *folio, struct list_head *list);
 int folio_check_splittable(struct folio *folio, unsigned int new_order,
                           enum split_type split_type);
@@ -630,10 +630,10 @@ static inline int split_huge_page(struct page *page)
        return -EINVAL;
 }
 
-static inline int min_order_for_split(struct folio *folio)
+static inline unsigned int min_order_for_split(struct folio *folio)
 {
        VM_WARN_ON_ONCE_FOLIO(1, folio);
-       return -EINVAL;
+       return 0;
 }
 
 static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
index 5ce00d53b19e49f06da210a9d1d4c57f23c0797b..1a3273491cc5f76c11abfab6fd2f4e851eb2c52b 100644 (file)
@@ -4219,16 +4219,29 @@ int folio_split(struct folio *folio, unsigned int new_order,
                             SPLIT_TYPE_NON_UNIFORM);
 }
 
-int min_order_for_split(struct folio *folio)
+/**
+ * min_order_for_split() - get the minimum order @folio can be split to
+ * @folio: folio to split
+ *
+ * min_order_for_split() tells the minimum order @folio can be split to.
+ * If a file-backed folio is truncated, 0 will be returned. Any subsequent
+ * split attempt should get -EBUSY from split checking code.
+ *
+ * Return: @folio's minimum order for split
+ */
+unsigned int min_order_for_split(struct folio *folio)
 {
        if (folio_test_anon(folio))
                return 0;
 
-       if (!folio->mapping) {
-               if (folio_test_pmd_mappable(folio))
-                       count_vm_event(THP_SPLIT_PAGE_FAILED);
-               return -EBUSY;
-       }
+       /*
+        * If the folio got truncated, we don't know the previous mapping and
+        * consequently the old min order. But it doesn't matter, as any split
+        * attempt will immediately fail with -EBUSY as the folio cannot get
+        * split until freed.
+        */
+       if (!folio->mapping)
+               return 0;
 
        return mapping_min_folio_order(folio->mapping);
 }