]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Rework backport of mm-vmscan-fix-a-bug-calling-wakeup_kswapd-with-a-wro.patch
authorSasha Levin <sashal@kernel.org>
Wed, 13 Mar 2024 18:30:45 +0000 (14:30 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 13 Mar 2024 18:30:45 +0000 (14:30 -0400)
queue-6.6/mm-migrate-convert-numamigrate_isolate_page-to-numam.patch [deleted file]
queue-6.6/mm-migrate-remove-pagetranshuge-check-in-numamigrate.patch [deleted file]
queue-6.6/mm-migrate-remove-thp-mapcount-check-in-numamigrate_.patch [deleted file]
queue-6.6/mm-vmscan-fix-a-bug-calling-wakeup_kswapd-with-a-wro.patch
queue-6.6/series

diff --git a/queue-6.6/mm-migrate-convert-numamigrate_isolate_page-to-numam.patch b/queue-6.6/mm-migrate-convert-numamigrate_isolate_page-to-numam.patch
deleted file mode 100644 (file)
index c5664ef..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-From 4f2123962d79d1c209c30c367027ec1bdc474f1f Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 13 Sep 2023 17:51:26 +0800
-Subject: mm: migrate: convert numamigrate_isolate_page() to
- numamigrate_isolate_folio()
-
-From: Kefeng Wang <wangkefeng.wang@huawei.com>
-
-[ Upstream commit 2ac9e99f3b21b2864305fbfba4bae5913274c409 ]
-
-Rename numamigrate_isolate_page() to numamigrate_isolate_folio(), then
-make it takes a folio and use folio API to save compound_head() calls.
-
-Link: https://lkml.kernel.org/r/20230913095131.2426871-4-wangkefeng.wang@huawei.com
-Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
-Reviewed-by: Zi Yan <ziy@nvidia.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: "Huang, Ying" <ying.huang@intel.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Mike Kravetz <mike.kravetz@oracle.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Stable-dep-of: 2774f256e7c0 ("mm/vmscan: fix a bug calling wakeup_kswapd() with a wrong zone index")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/migrate.c | 20 ++++++++++----------
- 1 file changed, 10 insertions(+), 10 deletions(-)
-
-diff --git a/mm/migrate.c b/mm/migrate.c
-index c9fabb960996f..e5f2f7243a659 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -2501,10 +2501,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
-       return __folio_alloc_node(gfp, order, nid);
- }
--static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
-+static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
- {
--      int nr_pages = thp_nr_pages(page);
--      int order = compound_order(page);
-+      int nr_pages = folio_nr_pages(folio);
-       /* Avoid migrating to a node that is nearly full */
-       if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
-@@ -2516,22 +2515,23 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
-                       if (managed_zone(pgdat->node_zones + z))
-                               break;
-               }
--              wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
-+              wakeup_kswapd(pgdat->node_zones + z, 0,
-+                            folio_order(folio), ZONE_MOVABLE);
-               return 0;
-       }
--      if (!isolate_lru_page(page))
-+      if (!folio_isolate_lru(folio))
-               return 0;
--      mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
-+      node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
-                           nr_pages);
-       /*
--       * Isolating the page has taken another reference, so the
--       * caller's reference can be safely dropped without the page
-+       * Isolating the folio has taken another reference, so the
-+       * caller's reference can be safely dropped without the folio
-        * disappearing underneath us during migration.
-        */
--      put_page(page);
-+      folio_put(folio);
-       return 1;
- }
-@@ -2565,7 +2565,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
-       if (page_is_file_lru(page) && PageDirty(page))
-               goto out;
--      isolated = numamigrate_isolate_page(pgdat, page);
-+      isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
-       if (!isolated)
-               goto out;
--- 
-2.43.0
-
diff --git a/queue-6.6/mm-migrate-remove-pagetranshuge-check-in-numamigrate.patch b/queue-6.6/mm-migrate-remove-pagetranshuge-check-in-numamigrate.patch
deleted file mode 100644 (file)
index 276dbcc..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-From 33a638a72e8de7db8b0ae986a8c1b06728d757cc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 13 Sep 2023 17:51:24 +0800
-Subject: mm: migrate: remove PageTransHuge check in numamigrate_isolate_page()
-
-From: Kefeng Wang <wangkefeng.wang@huawei.com>
-
-[ Upstream commit a8ac4a767dcd9d87d8229045904d9fe15ea5e0e8 ]
-
-Patch series "mm: migrate: more folio conversion and unification", v3.
-
-Convert more migrate functions to use a folio, it is also a preparation
-for large folio migration support when balancing numa.
-
-This patch (of 8):
-
-The assert VM_BUG_ON_PAGE(order && !PageTransHuge(page), page) is not very
-useful,
-
-   1) for a tail/base page, order = 0, for a head page, the order > 0 &&
-      PageTransHuge() is true
-   2) there is a PageCompound() check and only base page is handled in
-      do_numa_page(), and do_huge_pmd_numa_page() only handle PMD-mapped
-      THP
-   3) even though the page is a tail page, isolate_lru_page() will post
-      a warning, and fail to isolate the page
-   4) if large folio/pte-mapped THP migration supported in the future,
-      we could migrate the entire folio if numa fault on a tail page
-
-so just remove the check.
-
-Link: https://lkml.kernel.org/r/20230913095131.2426871-1-wangkefeng.wang@huawei.com
-Link: https://lkml.kernel.org/r/20230913095131.2426871-2-wangkefeng.wang@huawei.com
-Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
-Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: Huang Ying <ying.huang@intel.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Mike Kravetz <mike.kravetz@oracle.com>
-Cc: Zi Yan <ziy@nvidia.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Stable-dep-of: 2774f256e7c0 ("mm/vmscan: fix a bug calling wakeup_kswapd() with a wrong zone index")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/migrate.c | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/mm/migrate.c b/mm/migrate.c
-index b4d972d80b10c..6f8ad6b64c9bc 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -2506,8 +2506,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
-       int nr_pages = thp_nr_pages(page);
-       int order = compound_order(page);
--      VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
--
-       /* Do not migrate THP mapped by multiple processes */
-       if (PageTransHuge(page) && total_mapcount(page) > 1)
-               return 0;
--- 
-2.43.0
-
diff --git a/queue-6.6/mm-migrate-remove-thp-mapcount-check-in-numamigrate_.patch b/queue-6.6/mm-migrate-remove-thp-mapcount-check-in-numamigrate_.patch
deleted file mode 100644 (file)
index ba41421..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-From 9429f944b65798873089ce0b50d6f44bddac674b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 13 Sep 2023 17:51:25 +0800
-Subject: mm: migrate: remove THP mapcount check in numamigrate_isolate_page()
-
-From: Kefeng Wang <wangkefeng.wang@huawei.com>
-
-[ Upstream commit 728be28fae8c838d52c91dce4867133798146357 ]
-
-The check of THP mapped by multiple processes was introduced by commit
-04fa5d6a6547 ("mm: migrate: check page_count of THP before migrating") and
-refactor by commit 340ef3902cf2 ("mm: numa: cleanup flow of transhuge page
-migration"), which is out of date, since migrate_misplaced_page() is now
-using the standard migrate_pages() for small pages and THPs, the reference
-count checking is in folio_migrate_mapping(), so let's remove the special
-check for THP.
-
-Link: https://lkml.kernel.org/r/20230913095131.2426871-3-wangkefeng.wang@huawei.com
-Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
-Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
-Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
-Cc: David Hildenbrand <david@redhat.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Mike Kravetz <mike.kravetz@oracle.com>
-Cc: Zi Yan <ziy@nvidia.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Stable-dep-of: 2774f256e7c0 ("mm/vmscan: fix a bug calling wakeup_kswapd() with a wrong zone index")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/migrate.c | 4 ----
- 1 file changed, 4 deletions(-)
-
-diff --git a/mm/migrate.c b/mm/migrate.c
-index 6f8ad6b64c9bc..c9fabb960996f 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -2506,10 +2506,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
-       int nr_pages = thp_nr_pages(page);
-       int order = compound_order(page);
--      /* Do not migrate THP mapped by multiple processes */
--      if (PageTransHuge(page) && total_mapcount(page) > 1)
--              return 0;
--
-       /* Avoid migrating to a node that is nearly full */
-       if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
-               int z;
--- 
-2.43.0
-
index 2ba91111adbf6cfb3b63568d4882d8290b662f1c..d8ff5de193d4bfd5612786630e3d386b51cbae38 100644 (file)
@@ -1,4 +1,4 @@
-From 8b6bc15674905f185c644d93e48f6b7437c0d184 Mon Sep 17 00:00:00 2001
+From 0423173aad5b8f8495007faf5567b9cb587d9469 Mon Sep 17 00:00:00 2001
 From: Sasha Levin <sashal@kernel.org>
 Date: Fri, 16 Feb 2024 20:15:02 +0900
 Subject: mm/vmscan: fix a bug calling wakeup_kswapd() with a wrong zone index
@@ -75,10 +75,10 @@ Signed-off-by: Sasha Levin <sashal@kernel.org>
  1 file changed, 8 insertions(+)
 
 diff --git a/mm/migrate.c b/mm/migrate.c
-index e5f2f7243a659..d69b4556cc15f 100644
+index b4d972d80b10c..5d7d39b1c0699 100644
 --- a/mm/migrate.c
 +++ b/mm/migrate.c
-@@ -2515,6 +2515,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
+@@ -2522,6 +2522,14 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
                        if (managed_zone(pgdat->node_zones + z))
                                break;
                }
@@ -90,9 +90,9 @@ index e5f2f7243a659..d69b4556cc15f 100644
 +              if (z < 0)
 +                      return 0;
 +
-               wakeup_kswapd(pgdat->node_zones + z, 0,
-                             folio_order(folio), ZONE_MOVABLE);
+               wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
                return 0;
+       }
 -- 
 2.43.0
 
index c7677786d80ecba3c8e067d027e661bf5c2087c2..291ceeab3c51876e81a2ce384f9c675eb865f9b4 100644 (file)
@@ -2,9 +2,6 @@ dt-bindings-dma-fsl-edma-add-fsl-edma.h-to-prevent-h.patch
 dmaengine-fsl-edma-utilize-common-dt-binding-header-.patch
 dmaengine-fsl-edma-correct-max_segment_size-setting.patch
 ceph-switch-to-corrected-encoding-of-max_xattr_size-.patch
-mm-migrate-remove-pagetranshuge-check-in-numamigrate.patch
-mm-migrate-remove-thp-mapcount-check-in-numamigrate_.patch
-mm-migrate-convert-numamigrate_isolate_page-to-numam.patch
 mm-vmscan-fix-a-bug-calling-wakeup_kswapd-with-a-wro.patch
 xfrm-pass-udp-encapsulation-in-tx-packet-offload.patch
 net-lan78xx-fix-runtime-pm-count-underflow-on-link-s.patch