]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/migrate: correct nr_failed in migrate_pages_sync()
authorZi Yan <ziy@nvidia.com>
Tue, 17 Oct 2023 16:31:28 +0000 (12:31 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 22 May 2025 12:12:25 +0000 (14:12 +0200)
commit a259945efe6ada94087ef666e9b38f8e34ea34ba upstream.

nr_failed was missing the large folio splits from migrate_pages_batch()
and can cause a mismatch between migrate_pages() return value and the
number of not migrated pages, i.e., when the return value of
migrate_pages() is 0, there are still pages left in the from page list.
It will happen when a non-PMD THP large folio fails to migrate due to
-ENOMEM and is split successfully but not all the split pages are not
migrated, migrate_pages_batch() would return non-zero, but
astats.nr_thp_split = 0.  nr_failed would be 0 and returned to the caller
of migrate_pages(), but the not migrated pages are left in the from page
list without being added back to LRU lists.

Fix it by adding a new nr_split counter for large folio splits and adding
it to nr_failed in migrate_page_sync() after migrate_pages_batch() is
done.

Link: https://lkml.kernel.org/r/20231017163129.2025214-1-zi.yan@sent.com
Fixes: 2ef7dbb26990 ("migrate_pages: try migrate in batch asynchronously firstly")
Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: Huang Ying <ying.huang@intel.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/migrate.c

index 1004b1def1c2010cd0052ff095d523f7fe69ad3c..4ed470885217465945668fd108b846587c28f34a 100644 (file)
@@ -1504,6 +1504,7 @@ struct migrate_pages_stats {
        int nr_thp_succeeded;   /* THP migrated successfully */
        int nr_thp_failed;      /* THP failed to be migrated */
        int nr_thp_split;       /* THP split before migrating */
+       int nr_split;   /* Large folio (include THP) split before migrating */
 };
 
 /*
@@ -1623,6 +1624,7 @@ static int migrate_pages_batch(struct list_head *from,
        int nr_retry_pages = 0;
        int pass = 0;
        bool is_thp = false;
+       bool is_large = false;
        struct folio *folio, *folio2, *dst = NULL, *dst2;
        int rc, rc_saved = 0, nr_pages;
        LIST_HEAD(unmap_folios);
@@ -1638,7 +1640,8 @@ static int migrate_pages_batch(struct list_head *from,
                nr_retry_pages = 0;
 
                list_for_each_entry_safe(folio, folio2, from, lru) {
-                       is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
+                       is_large = folio_test_large(folio);
+                       is_thp = is_large && folio_test_pmd_mappable(folio);
                        nr_pages = folio_nr_pages(folio);
 
                        cond_resched();
@@ -1658,6 +1661,7 @@ static int migrate_pages_batch(struct list_head *from,
                                stats->nr_thp_failed++;
                                if (!try_split_folio(folio, split_folios)) {
                                        stats->nr_thp_split++;
+                                       stats->nr_split++;
                                        continue;
                                }
                                stats->nr_failed_pages += nr_pages;
@@ -1686,11 +1690,12 @@ static int migrate_pages_batch(struct list_head *from,
                                nr_failed++;
                                stats->nr_thp_failed += is_thp;
                                /* Large folio NUMA faulting doesn't split to retry. */
-                               if (folio_test_large(folio) && !nosplit) {
+                               if (is_large && !nosplit) {
                                        int ret = try_split_folio(folio, split_folios);
 
                                        if (!ret) {
                                                stats->nr_thp_split += is_thp;
+                                               stats->nr_split += is_large;
                                                break;
                                        } else if (reason == MR_LONGTERM_PIN &&
                                                   ret == -EAGAIN) {
@@ -1836,6 +1841,7 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
        stats->nr_succeeded += astats.nr_succeeded;
        stats->nr_thp_succeeded += astats.nr_thp_succeeded;
        stats->nr_thp_split += astats.nr_thp_split;
+       stats->nr_split += astats.nr_split;
        if (rc < 0) {
                stats->nr_failed_pages += astats.nr_failed_pages;
                stats->nr_thp_failed += astats.nr_thp_failed;
@@ -1843,7 +1849,11 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
                return rc;
        }
        stats->nr_thp_failed += astats.nr_thp_split;
-       nr_failed += astats.nr_thp_split;
+       /*
+        * Do not count rc, as pages will be retried below.
+        * Count nr_split only, since it includes nr_thp_split.
+        */
+       nr_failed += astats.nr_split;
        /*
         * Fall back to migrate all failed folios one by one synchronously. All
         * failed folios except split THPs will be retried, so their failure