]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
migrate: correct lock ordering for hugetlb file folios
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Jan 2026 04:13:42 +0000 (04:13 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 30 Jan 2026 09:27:41 +0000 (10:27 +0100)
commit b7880cb166ab62c2409046b2347261abf701530e upstream.

Syzbot has found a deadlock (analyzed by Lance Yang):

1) Task (5749): Holds folio_lock, then tries to acquire i_mmap_rwsem(read lock).
2) Task (5754): Holds i_mmap_rwsem(write lock), then tries to acquire
folio_lock.

migrate_pages()
  -> migrate_hugetlbs()
    -> unmap_and_move_huge_page()     <- Takes folio_lock!
      -> remove_migration_ptes()
        -> __rmap_walk_file()
          -> i_mmap_lock_read()       <- Waits for i_mmap_rwsem(read lock)!

hugetlbfs_fallocate()
  -> hugetlbfs_punch_hole()           <- Takes i_mmap_rwsem(write lock)!
    -> hugetlbfs_zero_partial_page()
     -> filemap_lock_hugetlb_folio()
      -> filemap_lock_folio()
        -> __filemap_get_folio        <- Waits for folio_lock!

The migration path is the one taking locks in the wrong order according to
the documentation at the top of mm/rmap.c.  So expand the scope of the
existing i_mmap_lock to cover the calls to remove_migration_ptes() too.

This is (mostly) how it used to be after commit c0d0381ade79.  That was
removed by 336bf30eb765 for both file & anon hugetlb pages when it should
only have been removed for anon hugetlb pages.

Link: https://lkml.kernel.org/r/20260109041345.3863089-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Fixes: 336bf30eb765 ("hugetlbfs: fix anon huge page migration race")
Reported-by: syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com
Link: https://lore.kernel.org/all/68e9715a.050a0220.1186a4.000d.GAE@google.com
Debugged-by: Lance Yang <lance.yang@linux.dev>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: Zi Yan <ziy@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Jann Horn <jannh@google.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/migrate.c

index 4ed470885217465945668fd108b846587c28f34a..0e291c0221408630017010e56c390afe0b7f3ecb 100644 (file)
@@ -1369,6 +1369,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
        int page_was_mapped = 0;
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
+       enum ttu_flags ttu = 0;
 
        if (folio_ref_count(src) == 1) {
                /* page was freed from under us. So we are done. */
@@ -1410,8 +1411,6 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
                goto put_anon;
 
        if (folio_mapped(src)) {
-               enum ttu_flags ttu = 0;
-
                if (!folio_test_anon(src)) {
                        /*
                         * In shared mappings, try_to_unmap could potentially
@@ -1428,9 +1427,6 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
 
                try_to_migrate(src, ttu);
                page_was_mapped = 1;
-
-               if (ttu & TTU_RMAP_LOCKED)
-                       i_mmap_unlock_write(mapping);
        }
 
        if (!folio_mapped(src))
@@ -1438,7 +1434,11 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
 
        if (page_was_mapped)
                remove_migration_ptes(src,
-                       rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
+                       rc == MIGRATEPAGE_SUCCESS ? dst : src,
+                               ttu ? true : false);
+
+       if (ttu & TTU_RMAP_LOCKED)
+               i_mmap_unlock_write(mapping);
 
 unlock_put_anon:
        folio_unlock(dst);