]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/huge_memory: fix NULL pointer deference when splitting folio
authorWei Yang <richard.weiyang@gmail.com>
Wed, 19 Nov 2025 23:53:02 +0000 (23:53 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 6 Dec 2025 21:27:37 +0000 (06:27 +0900)
commit cff47b9e39a6abf03dde5f4f156f841b0c54bba0 upstream.

Commit c010d47f107f ("mm: thp: split huge page to any lower order pages")
introduced an early check on the folio's order via mapping->flags before
proceeding with the split work.

This check introduced a bug: for shmem folios in the swap cache and
truncated folios, the mapping pointer can be NULL.  Accessing
mapping->flags in this state leads directly to a NULL pointer dereference.

This commit fixes the issue by moving the check for mapping != NULL before
any attempt to access mapping->flags.

Link: https://lkml.kernel.org/r/20251119235302.24773-1-richard.weiyang@gmail.com
Fixes: c010d47f107f ("mm: thp: split huge page to any lower order pages")
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/huge_memory.c

index c5e1ee5668412330b6035760c9a181625e2fa348..5d63ee72c1f69001e34f8e882c5b54c8acbf1824 100644 (file)
@@ -3626,6 +3626,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        if (folio != page_folio(split_at) || folio != page_folio(lock_at))
                return -EINVAL;
 
+       /*
+        * Folios that just got truncated cannot get split. Signal to the
+        * caller that there was a race.
+        *
+        * TODO: this will also currently refuse shmem folios that are in the
+        * swapcache.
+        */
+       if (!is_anon && !folio->mapping)
+               return -EBUSY;
+
        if (new_order >= folio_order(folio))
                return -EINVAL;
 
@@ -3666,18 +3676,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                gfp_t gfp;
 
                mapping = folio->mapping;
-
-               /* Truncated ? */
-               /*
-                * TODO: add support for large shmem folio in swap cache.
-                * When shmem is in swap cache, mapping is NULL and
-                * folio_test_swapcache() is true.
-                */
-               if (!mapping) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-
                min_order = mapping_min_folio_order(folio->mapping);
                if (new_order < min_order) {
                        ret = -EINVAL;