]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/huge_memory: fix NULL pointer deference when splitting folio
authorWei Yang <richard.weiyang@gmail.com>
Mon, 1 Dec 2025 22:18:18 +0000 (17:18 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 6 Dec 2025 21:25:02 +0000 (06:25 +0900)
[ Upstream commit cff47b9e39a6abf03dde5f4f156f841b0c54bba0 ]

Commit c010d47f107f ("mm: thp: split huge page to any lower order pages")
introduced an early check on the folio's order via mapping->flags before
proceeding with the split work.

This check introduced a bug: for shmem folios in the swap cache and
truncated folios, the mapping pointer can be NULL.  Accessing
mapping->flags in this state leads directly to a NULL pointer dereference.

This commit fixes the issue by moving the check for mapping != NULL before
any attempt to access mapping->flags.

Link: https://lkml.kernel.org/r/20251119235302.24773-1-richard.weiyang@gmail.com
Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages")
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ applied fix to split_huge_page_to_list_to_order() instead of __folio_split() ]
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c

index d68a22c729fb387ab0c150156a89034c2b72926e..2065374c7e9e6181ccbe77d0037eb416641db27c 100644 (file)
@@ -3404,6 +3404,16 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
        if (new_order >= folio_order(folio))
                return -EINVAL;
 
+       /*
+        * Folios that just got truncated cannot get split. Signal to the
+        * caller that there was a race.
+        *
+        * TODO: this will also currently refuse shmem folios that are in the
+        * swapcache.
+        */
+       if (!is_anon && !folio->mapping)
+               return -EBUSY;
+
        if (is_anon) {
                /* order-1 is not supported for anonymous THP. */
                if (new_order == 1) {
@@ -3466,13 +3476,6 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
                gfp_t gfp;
 
                mapping = folio->mapping;
-
-               /* Truncated ? */
-               if (!mapping) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-
                min_order = mapping_min_folio_order(folio->mapping);
                if (new_order < min_order) {
                        VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",