]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop some 5.10 patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Dec 2022 14:24:28 +0000 (15:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 13 Dec 2022 14:24:28 +0000 (15:24 +0100)
queue-5.10/mm-__isolate_lru_page_prepare-in-isolate_migratepage.patch [deleted file]
queue-5.10/mm-compaction-do-page-isolation-first-in-compaction.patch [deleted file]
queue-5.10/mm-lru-introduce-testclearpagelru.patch [deleted file]
queue-5.10/mm-migrate-fix-thp-s-mapcount-on-isolation.patch [deleted file]
queue-5.10/mm-mlock-remove-__munlock_isolate_lru_page.patch [deleted file]
queue-5.10/mm-mlock-remove-lru_lock-on-testclearpagemlocked.patch [deleted file]
queue-5.10/mm-vmscan-__isolate_lru_page_prepare-cleanup.patch [deleted file]
queue-5.10/net-broadcom-add-ptp_1588_clock_optional-dependency-.patch [deleted file]
queue-5.10/series
queue-5.15/io_uring-fix-a-null-ptr-deref-in-io_tctx_exit_cb.patch

diff --git a/queue-5.10/mm-__isolate_lru_page_prepare-in-isolate_migratepage.patch b/queue-5.10/mm-__isolate_lru_page_prepare-in-isolate_migratepage.patch
deleted file mode 100644 (file)
index 2a42e3a..0000000
+++ /dev/null
@@ -1,318 +0,0 @@
-From 60accdd3d3a54a9d28d0f2d39ec740df38c167fa Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 22 Mar 2022 14:45:41 -0700
-Subject: mm: __isolate_lru_page_prepare() in isolate_migratepages_block()
-
-From: Hugh Dickins <hughd@google.com>
-
-[ Upstream commit 89f6c88a6ab4a11deb14c270f7f1454cda4f73d6 ]
-
-__isolate_lru_page_prepare() conflates two unrelated functions, with the
-flags to one disjoint from the flags to the other; and hides some of the
-important checks outside of isolate_migratepages_block(), where the
-sequence is better to be visible.  It comes from the days of lumpy
-reclaim, before compaction, when the combination made more sense.
-
-Move what's needed by mm/compaction.c isolate_migratepages_block() inline
-there, and what's needed by mm/vmscan.c isolate_lru_pages() inline there.
-
-Shorten "isolate_mode" to "mode", so the sequence of conditions is easier
-to read.  Declare a "mapping" variable, to save one call to page_mapping()
-(but not another: calling again after page is locked is necessary).
-Simplify isolate_lru_pages() with a "move_to" list pointer.
-
-Link: https://lkml.kernel.org/r/879d62a8-91cc-d3c6-fb3b-69768236df68@google.com
-Signed-off-by: Hugh Dickins <hughd@google.com>
-Acked-by: David Rientjes <rientjes@google.com>
-Reviewed-by: Alex Shi <alexs@kernel.org>
-Cc: Alexander Duyck <alexander.duyck@gmail.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/swap.h |   1 -
- mm/compaction.c      |  51 +++++++++++++++++++---
- mm/vmscan.c          | 101 ++++++++-----------------------------------
- 3 files changed, 62 insertions(+), 91 deletions(-)
-
-diff --git a/include/linux/swap.h b/include/linux/swap.h
-index 394d5de5d4b4..a502928c29c5 100644
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -358,7 +358,6 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- extern unsigned long zone_reclaimable_pages(struct zone *zone);
- extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                       gfp_t gfp_mask, nodemask_t *mask);
--extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
- extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
-                                                 unsigned long nr_pages,
-                                                 gfp_t gfp_mask,
-diff --git a/mm/compaction.c b/mm/compaction.c
-index ea46aadc7c21..57ce6b001b10 100644
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -784,7 +784,7 @@ static bool too_many_isolated(pg_data_t *pgdat)
-  * @cc:               Compaction control structure.
-  * @low_pfn:  The first PFN to isolate
-  * @end_pfn:  The one-past-the-last PFN to isolate, within same pageblock
-- * @isolate_mode: Isolation mode to be used.
-+ * @mode:     Isolation mode to be used.
-  *
-  * Isolate all pages that can be migrated from the range specified by
-  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
-@@ -798,7 +798,7 @@ static bool too_many_isolated(pg_data_t *pgdat)
-  */
- static unsigned long
- isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
--                      unsigned long end_pfn, isolate_mode_t isolate_mode)
-+                      unsigned long end_pfn, isolate_mode_t mode)
- {
-       pg_data_t *pgdat = cc->zone->zone_pgdat;
-       unsigned long nr_scanned = 0, nr_isolated = 0;
-@@ -806,6 +806,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-       unsigned long flags = 0;
-       bool locked = false;
-       struct page *page = NULL, *valid_page = NULL;
-+      struct address_space *mapping;
-       unsigned long start_pfn = low_pfn;
-       bool skip_on_failure = false;
-       unsigned long next_skip_pfn = 0;
-@@ -949,7 +950,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-                                       locked = false;
-                               }
--                              if (!isolate_movable_page(page, isolate_mode))
-+                              if (!isolate_movable_page(page, mode))
-                                       goto isolate_success;
-                       }
-@@ -961,15 +962,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-                * so avoid taking lru_lock and isolating it unnecessarily in an
-                * admittedly racy check.
-                */
--              if (!page_mapping(page) &&
--                  page_count(page) > page_mapcount(page))
-+              mapping = page_mapping(page);
-+              if (!mapping && page_count(page) > page_mapcount(page))
-                       goto isolate_fail;
-               /*
-                * Only allow to migrate anonymous pages in GFP_NOFS context
-                * because those do not depend on fs locks.
-                */
--              if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
-+              if (!(cc->gfp_mask & __GFP_FS) && mapping)
-                       goto isolate_fail;
-               /*
-@@ -980,9 +981,45 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-               if (unlikely(!get_page_unless_zero(page)))
-                       goto isolate_fail;
--              if (!__isolate_lru_page_prepare(page, isolate_mode))
-+              /* Only take pages on LRU: a check now makes later tests safe */
-+              if (!PageLRU(page))
-+                      goto isolate_fail_put;
-+
-+              /* Compaction might skip unevictable pages but CMA takes them */
-+              if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page))
-+                      goto isolate_fail_put;
-+
-+              /*
-+               * To minimise LRU disruption, the caller can indicate with
-+               * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages
-+               * it will be able to migrate without blocking - clean pages
-+               * for the most part.  PageWriteback would require blocking.
-+               */
-+              if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page))
-                       goto isolate_fail_put;
-+              if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) {
-+                      bool migrate_dirty;
-+
-+                      /*
-+                       * Only pages without mappings or that have a
-+                       * ->migratepage callback are possible to migrate
-+                       * without blocking. However, we can be racing with
-+                       * truncation so it's necessary to lock the page
-+                       * to stabilise the mapping as truncation holds
-+                       * the page lock until after the page is removed
-+                       * from the page cache.
-+                       */
-+                      if (!trylock_page(page))
-+                              goto isolate_fail_put;
-+
-+                      mapping = page_mapping(page);
-+                      migrate_dirty = !mapping || mapping->a_ops->migratepage;
-+                      unlock_page(page);
-+                      if (!migrate_dirty)
-+                              goto isolate_fail_put;
-+              }
-+
-               /* Try isolate the page */
-               if (!TestClearPageLRU(page))
-                       goto isolate_fail_put;
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 00a47845a15b..9cba0f890b33 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -1535,69 +1535,6 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-       return nr_reclaimed;
- }
--/*
-- * Attempt to remove the specified page from its LRU.  Only take this page
-- * if it is of the appropriate PageActive status.  Pages which are being
-- * freed elsewhere are also ignored.
-- *
-- * page:      page to consider
-- * mode:      one of the LRU isolation modes defined above
-- *
-- * returns true on success, false on failure.
-- */
--bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
--{
--      /* Only take pages on the LRU. */
--      if (!PageLRU(page))
--              return false;
--
--      /* Compaction should not handle unevictable pages but CMA can do so */
--      if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
--              return false;
--
--      /*
--       * To minimise LRU disruption, the caller can indicate that it only
--       * wants to isolate pages it will be able to operate on without
--       * blocking - clean pages for the most part.
--       *
--       * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
--       * that it is possible to migrate without blocking
--       */
--      if (mode & ISOLATE_ASYNC_MIGRATE) {
--              /* All the caller can do on PageWriteback is block */
--              if (PageWriteback(page))
--                      return false;
--
--              if (PageDirty(page)) {
--                      struct address_space *mapping;
--                      bool migrate_dirty;
--
--                      /*
--                       * Only pages without mappings or that have a
--                       * ->migratepage callback are possible to migrate
--                       * without blocking. However, we can be racing with
--                       * truncation so it's necessary to lock the page
--                       * to stabilise the mapping as truncation holds
--                       * the page lock until after the page is removed
--                       * from the page cache.
--                       */
--                      if (!trylock_page(page))
--                              return false;
--
--                      mapping = page_mapping(page);
--                      migrate_dirty = !mapping || mapping->a_ops->migratepage;
--                      unlock_page(page);
--                      if (!migrate_dirty)
--                              return false;
--              }
--      }
--
--      if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
--              return false;
--
--      return true;
--}
--
- /*
-  * Update LRU sizes after isolating pages. The LRU size updates must
-  * be complete before mem_cgroup_update_lru_size due to a sanity check.
-@@ -1647,11 +1584,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-       unsigned long skipped = 0;
-       unsigned long scan, total_scan, nr_pages;
-       LIST_HEAD(pages_skipped);
--      isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
-       total_scan = 0;
-       scan = 0;
-       while (scan < nr_to_scan && !list_empty(src)) {
-+              struct list_head *move_to = src;
-               struct page *page;
-               page = lru_to_page(src);
-@@ -1661,9 +1598,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-               total_scan += nr_pages;
-               if (page_zonenum(page) > sc->reclaim_idx) {
--                      list_move(&page->lru, &pages_skipped);
-                       nr_skipped[page_zonenum(page)] += nr_pages;
--                      continue;
-+                      move_to = &pages_skipped;
-+                      goto move;
-               }
-               /*
-@@ -1671,37 +1608,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-                * return with no isolated pages if the LRU mostly contains
-                * ineligible pages.  This causes the VM to not reclaim any
-                * pages, triggering a premature OOM.
--               *
--               * Account all tail pages of THP.  This would not cause
--               * premature OOM since __isolate_lru_page() returns -EBUSY
--               * only when the page is being freed somewhere else.
-+               * Account all tail pages of THP.
-                */
-               scan += nr_pages;
--              if (!__isolate_lru_page_prepare(page, mode)) {
--                      /* It is being freed elsewhere */
--                      list_move(&page->lru, src);
--                      continue;
--              }
-+
-+              if (!PageLRU(page))
-+                      goto move;
-+              if (!sc->may_unmap && page_mapped(page))
-+                      goto move;
-+
-               /*
-                * Be careful not to clear PageLRU until after we're
-                * sure the page is not being freed elsewhere -- the
-                * page release code relies on it.
-                */
--              if (unlikely(!get_page_unless_zero(page))) {
--                      list_move(&page->lru, src);
--                      continue;
--              }
-+              if (unlikely(!get_page_unless_zero(page)))
-+                      goto move;
-               if (!TestClearPageLRU(page)) {
-                       /* Another thread is already isolating this page */
-                       put_page(page);
--                      list_move(&page->lru, src);
--                      continue;
-+                      goto move;
-               }
-               nr_taken += nr_pages;
-               nr_zone_taken[page_zonenum(page)] += nr_pages;
--              list_move(&page->lru, dst);
-+              move_to = dst;
-+move:
-+              list_move(&page->lru, move_to);
-       }
-       /*
-@@ -1725,7 +1659,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-       }
-       *nr_scanned = total_scan;
-       trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
--                                  total_scan, skipped, nr_taken, mode, lru);
-+                                  total_scan, skipped, nr_taken,
-+                                  sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru);
-       update_lru_sizes(lruvec, lru, nr_zone_taken);
-       return nr_taken;
- }
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-compaction-do-page-isolation-first-in-compaction.patch b/queue-5.10/mm-compaction-do-page-isolation-first-in-compaction.patch
deleted file mode 100644 (file)
index 45d5ebf..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-From ef33d369381db1a4df5a9f474b014cc4e4664606 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 15 Dec 2020 12:34:20 -0800
-Subject: mm/compaction: do page isolation first in compaction
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Alex Shi <alex.shi@linux.alibaba.com>
-
-[ Upstream commit 9df41314390b81a541ca6e84c8340bad0959e4b5 ]
-
-Currently, compaction would get the lru_lock and then do page isolation
-which works fine with pgdat->lru_lock, since any page isoltion would
-compete for the lru_lock.  If we want to change to memcg lru_lock, we have
-to isolate the page before getting lru_lock, thus isoltion would block
-page's memcg change which relay on page isoltion too.  Then we could
-safely use per memcg lru_lock later.
-
-The new page isolation use previous introduced TestClearPageLRU() + pgdat
-lru locking which will be changed to memcg lru lock later.
-
-Hugh Dickins <hughd@google.com> fixed following bugs in this patch's early
-version:
-
-Fix lots of crashes under compaction load: isolate_migratepages_block()
-must clean up appropriately when rejecting a page, setting PageLRU again
-if it had been cleared; and a put_page() after get_page_unless_zero()
-cannot safely be done while holding locked_lruvec - it may turn out to be
-the final put_page(), which will take an lruvec lock when PageLRU.
-
-And move __isolate_lru_page_prepare back after get_page_unless_zero to
-make trylock_page() safe: trylock_page() is not safe to use at this time:
-its setting PG_locked can race with the page being freed or allocated
-("Bad page"), and can also erase flags being set by one of those "sole
-owners" of a freshly allocated page who use non-atomic __SetPageFlag().
-
-Link: https://lkml.kernel.org/r/1604566549-62481-16-git-send-email-alex.shi@linux.alibaba.com
-Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
-Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
-Acked-by: Hugh Dickins <hughd@google.com>
-Acked-by: Johannes Weiner <hannes@cmpxchg.org>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Cc: Matthew Wilcox <willy@infradead.org>
-Cc: Alexander Duyck <alexander.duyck@gmail.com>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
-Cc: "Chen, Rong A" <rong.a.chen@intel.com>
-Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
-Cc: "Huang, Ying" <ying.huang@intel.com>
-Cc: Jann Horn <jannh@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Kirill A. Shutemov <kirill@shutemov.name>
-Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
-Cc: Mel Gorman <mgorman@techsingularity.net>
-Cc: Michal Hocko <mhocko@kernel.org>
-Cc: Michal Hocko <mhocko@suse.com>
-Cc: Mika Penttilä <mika.penttila@nextfour.com>
-Cc: Minchan Kim <minchan@kernel.org>
-Cc: Shakeel Butt <shakeelb@google.com>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
-Cc: Wei Yang <richard.weiyang@gmail.com>
-Cc: Yang Shi <yang.shi@linux.alibaba.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/swap.h |  2 +-
- mm/compaction.c      | 42 +++++++++++++++++++++++++++++++++---------
- mm/vmscan.c          | 43 ++++++++++++++++++++++---------------------
- 3 files changed, 56 insertions(+), 31 deletions(-)
-
-diff --git a/include/linux/swap.h b/include/linux/swap.h
-index fbc6805358da..3577d3a6ec37 100644
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -358,7 +358,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- extern unsigned long zone_reclaimable_pages(struct zone *zone);
- extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                       gfp_t gfp_mask, nodemask_t *mask);
--extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
-+extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
- extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
-                                                 unsigned long nr_pages,
-                                                 gfp_t gfp_mask,
-diff --git a/mm/compaction.c b/mm/compaction.c
-index 8dfbe86bd74f..ba3e907f03b7 100644
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -890,6 +890,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-               if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
-                       if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
-                               low_pfn = end_pfn;
-+                              page = NULL;
-                               goto isolate_abort;
-                       }
-                       valid_page = page;
-@@ -971,6 +972,21 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-               if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
-                       goto isolate_fail;
-+              /*
-+               * Be careful not to clear PageLRU until after we're
-+               * sure the page is not being freed elsewhere -- the
-+               * page release code relies on it.
-+               */
-+              if (unlikely(!get_page_unless_zero(page)))
-+                      goto isolate_fail;
-+
-+              if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
-+                      goto isolate_fail_put;
-+
-+              /* Try isolate the page */
-+              if (!TestClearPageLRU(page))
-+                      goto isolate_fail_put;
-+
-               /* If we already hold the lock, we can skip some rechecking */
-               if (!locked) {
-                       locked = compact_lock_irqsave(&pgdat->lru_lock,
-@@ -983,10 +999,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-                                       goto isolate_abort;
-                       }
--                      /* Recheck PageLRU and PageCompound under lock */
--                      if (!PageLRU(page))
--                              goto isolate_fail;
--
-                       /*
-                        * Page become compound since the non-locked check,
-                        * and it's on LRU. It can only be a THP so the order
-@@ -994,16 +1006,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-                        */
-                       if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
-                               low_pfn += compound_nr(page) - 1;
--                              goto isolate_fail;
-+                              SetPageLRU(page);
-+                              goto isolate_fail_put;
-                       }
-               }
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
--              /* Try isolate the page */
--              if (__isolate_lru_page(page, isolate_mode) != 0)
--                      goto isolate_fail;
--
-               /* The whole page is taken off the LRU; skip the tail pages. */
-               if (PageCompound(page))
-                       low_pfn += compound_nr(page) - 1;
-@@ -1032,6 +1041,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-               }
-               continue;
-+
-+isolate_fail_put:
-+              /* Avoid potential deadlock in freeing page under lru_lock */
-+              if (locked) {
-+                      spin_unlock_irqrestore(&pgdat->lru_lock, flags);
-+                      locked = false;
-+              }
-+              put_page(page);
-+
- isolate_fail:
-               if (!skip_on_failure)
-                       continue;
-@@ -1068,9 +1086,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-       if (unlikely(low_pfn > end_pfn))
-               low_pfn = end_pfn;
-+      page = NULL;
-+
- isolate_abort:
-       if (locked)
-               spin_unlock_irqrestore(&pgdat->lru_lock, flags);
-+      if (page) {
-+              SetPageLRU(page);
-+              put_page(page);
-+      }
-       /*
-        * Updated the cached scanner pfn once the pageblock has been scanned
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 8d62eedfc794..5ada402c8d95 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -1545,7 +1545,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-  *
-  * returns 0 on success, -ve errno on failure.
-  */
--int __isolate_lru_page(struct page *page, isolate_mode_t mode)
-+int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
- {
-       int ret = -EBUSY;
-@@ -1597,22 +1597,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
-       if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
-               return ret;
--      if (likely(get_page_unless_zero(page))) {
--              /*
--               * Be careful not to clear PageLRU until after we're
--               * sure the page is not being freed elsewhere -- the
--               * page release code relies on it.
--               */
--              if (TestClearPageLRU(page))
--                      ret = 0;
--              else
--                      put_page(page);
--      }
--
--      return ret;
-+      return 0;
- }
--
- /*
-  * Update LRU sizes after isolating pages. The LRU size updates must
-  * be complete before mem_cgroup_update_lru_size due to a sanity check.
-@@ -1692,20 +1679,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-                * only when the page is being freed somewhere else.
-                */
-               scan += nr_pages;
--              switch (__isolate_lru_page(page, mode)) {
-+              switch (__isolate_lru_page_prepare(page, mode)) {
-               case 0:
-+                      /*
-+                       * Be careful not to clear PageLRU until after we're
-+                       * sure the page is not being freed elsewhere -- the
-+                       * page release code relies on it.
-+                       */
-+                      if (unlikely(!get_page_unless_zero(page)))
-+                              goto busy;
-+
-+                      if (!TestClearPageLRU(page)) {
-+                              /*
-+                               * This page may in other isolation path,
-+                               * but we still hold lru_lock.
-+                               */
-+                              put_page(page);
-+                              goto busy;
-+                      }
-+
-                       nr_taken += nr_pages;
-                       nr_zone_taken[page_zonenum(page)] += nr_pages;
-                       list_move(&page->lru, dst);
-                       break;
--              case -EBUSY:
-+              default:
-+busy:
-                       /* else it is being freed elsewhere */
-                       list_move(&page->lru, src);
--                      continue;
--
--              default:
--                      BUG();
-               }
-       }
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-lru-introduce-testclearpagelru.patch b/queue-5.10/mm-lru-introduce-testclearpagelru.patch
deleted file mode 100644 (file)
index abe6301..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-From d84c0415a11eafaa01336ef3fa61f707986b5656 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 15 Dec 2020 12:34:16 -0800
-Subject: mm/lru: introduce TestClearPageLRU()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Alex Shi <alex.shi@linux.alibaba.com>
-
-[ Upstream commit d25b5bd8a8f420b15517c19c4626c0c009f72a63 ]
-
-Currently lru_lock still guards both lru list and page's lru bit, that's
-ok.  but if we want to use specific lruvec lock on the page, we need to
-pin down the page's lruvec/memcg during locking.  Just taking lruvec lock
-first may be undermined by the page's memcg charge/migration.  To fix this
-problem, we will clear the lru bit out of locking and use it as pin down
-action to block the page isolation in memcg changing.
-
-So now a standard steps of page isolation is following:
-       1, get_page();         #pin the page avoid to be free
-       2, TestClearPageLRU(); #block other isolation like memcg change
-       3, spin_lock on lru_lock; #serialize lru list access
-       4, delete page from lru list;
-
-This patch start with the first part: TestClearPageLRU, which combines
-PageLRU check and ClearPageLRU into a macro func TestClearPageLRU.  This
-function will be used as page isolation precondition to prevent other
-isolations some where else.  Then there are may !PageLRU page on lru list,
-need to remove BUG() checking accordingly.
-
-There 2 rules for lru bit now:
-1, the lru bit still indicate if a page on lru list, just in some
-   temporary moment(isolating), the page may have no lru bit when
-   it's on lru list.  but the page still must be on lru list when the
-   lru bit set.
-2, have to remove lru bit before delete it from lru list.
-
-As Andrew Morton mentioned this change would dirty cacheline for a page
-which isn't on the LRU.  But the loss would be acceptable in Rong Chen
-<rong.a.chen@intel.com> report:
-https://lore.kernel.org/lkml/20200304090301.GB5972@shao2-debian/
-
-Link: https://lkml.kernel.org/r/1604566549-62481-15-git-send-email-alex.shi@linux.alibaba.com
-Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
-Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
-Acked-by: Hugh Dickins <hughd@google.com>
-Acked-by: Johannes Weiner <hannes@cmpxchg.org>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Cc: Michal Hocko <mhocko@kernel.org>
-Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
-Cc: Alexander Duyck <alexander.duyck@gmail.com>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
-Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
-Cc: "Huang, Ying" <ying.huang@intel.com>
-Cc: Jann Horn <jannh@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Kirill A. Shutemov <kirill@shutemov.name>
-Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Mel Gorman <mgorman@techsingularity.net>
-Cc: Michal Hocko <mhocko@suse.com>
-Cc: Mika Penttilä <mika.penttila@nextfour.com>
-Cc: Minchan Kim <minchan@kernel.org>
-Cc: Shakeel Butt <shakeelb@google.com>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Wei Yang <richard.weiyang@gmail.com>
-Cc: Yang Shi <yang.shi@linux.alibaba.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/page-flags.h |  1 +
- mm/mlock.c                 |  3 +--
- mm/vmscan.c                | 39 +++++++++++++++++++-------------------
- 3 files changed, 21 insertions(+), 22 deletions(-)
-
-diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
-index 4f6ba9379112..14a0cac9e099 100644
---- a/include/linux/page-flags.h
-+++ b/include/linux/page-flags.h
-@@ -335,6 +335,7 @@ PAGEFLAG(Referenced, referenced, PF_HEAD)
- PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
-       __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
- PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
-+      TESTCLEARFLAG(LRU, lru, PF_HEAD)
- PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
-       TESTCLEARFLAG(Active, active, PF_HEAD)
- PAGEFLAG(Workingset, workingset, PF_HEAD)
-diff --git a/mm/mlock.c b/mm/mlock.c
-index d487aa864e86..7b0e6334be6f 100644
---- a/mm/mlock.c
-+++ b/mm/mlock.c
-@@ -276,10 +276,9 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
-                        * We already have pin from follow_page_mask()
-                        * so we can spare the get_page() here.
-                        */
--                      if (PageLRU(page)) {
-+                      if (TestClearPageLRU(page)) {
-                               struct lruvec *lruvec;
--                              ClearPageLRU(page);
-                               lruvec = mem_cgroup_page_lruvec(page,
-                                                       page_pgdat(page));
-                               del_page_from_lru_list(page, lruvec,
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 51ccd80e70b6..8d62eedfc794 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -1547,7 +1547,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-  */
- int __isolate_lru_page(struct page *page, isolate_mode_t mode)
- {
--      int ret = -EINVAL;
-+      int ret = -EBUSY;
-       /* Only take pages on the LRU. */
-       if (!PageLRU(page))
-@@ -1557,8 +1557,6 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
-       if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
-               return ret;
--      ret = -EBUSY;
--
-       /*
-        * To minimise LRU disruption, the caller can indicate that it only
-        * wants to isolate pages it will be able to operate on without
-@@ -1605,8 +1603,10 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
-                * sure the page is not being freed elsewhere -- the
-                * page release code relies on it.
-                */
--              ClearPageLRU(page);
--              ret = 0;
-+              if (TestClearPageLRU(page))
-+                      ret = 0;
-+              else
-+                      put_page(page);
-       }
-       return ret;
-@@ -1672,8 +1672,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-               page = lru_to_page(src);
-               prefetchw_prev_lru_page(page, src, flags);
--              VM_BUG_ON_PAGE(!PageLRU(page), page);
--
-               nr_pages = compound_nr(page);
-               total_scan += nr_pages;
-@@ -1770,21 +1768,18 @@ int isolate_lru_page(struct page *page)
-       VM_BUG_ON_PAGE(!page_count(page), page);
-       WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
--      if (PageLRU(page)) {
-+      if (TestClearPageLRU(page)) {
-               pg_data_t *pgdat = page_pgdat(page);
-               struct lruvec *lruvec;
--              spin_lock_irq(&pgdat->lru_lock);
-+              get_page(page);
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
--              if (PageLRU(page)) {
--                      int lru = page_lru(page);
--                      get_page(page);
--                      ClearPageLRU(page);
--                      del_page_from_lru_list(page, lruvec, lru);
--                      ret = 0;
--              }
-+              spin_lock_irq(&pgdat->lru_lock);
-+              del_page_from_lru_list(page, lruvec, page_lru(page));
-               spin_unlock_irq(&pgdat->lru_lock);
-+              ret = 0;
-       }
-+
-       return ret;
- }
-@@ -4291,6 +4286,10 @@ void check_move_unevictable_pages(struct pagevec *pvec)
-               nr_pages = thp_nr_pages(page);
-               pgscanned += nr_pages;
-+              /* block memcg migration during page moving between lru */
-+              if (!TestClearPageLRU(page))
-+                      continue;
-+
-               if (pagepgdat != pgdat) {
-                       if (pgdat)
-                               spin_unlock_irq(&pgdat->lru_lock);
-@@ -4299,10 +4298,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
-               }
-               lruvec = mem_cgroup_page_lruvec(page, pgdat);
--              if (!PageLRU(page) || !PageUnevictable(page))
--                      continue;
--
--              if (page_evictable(page)) {
-+              if (page_evictable(page) && PageUnevictable(page)) {
-                       enum lru_list lru = page_lru_base_type(page);
-                       VM_BUG_ON_PAGE(PageActive(page), page);
-@@ -4311,12 +4307,15 @@ void check_move_unevictable_pages(struct pagevec *pvec)
-                       add_page_to_lru_list(page, lruvec, lru);
-                       pgrescued += nr_pages;
-               }
-+              SetPageLRU(page);
-       }
-       if (pgdat) {
-               __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
-               __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
-               spin_unlock_irq(&pgdat->lru_lock);
-+      } else if (pgscanned) {
-+              count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
-       }
- }
- EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-migrate-fix-thp-s-mapcount-on-isolation.patch b/queue-5.10/mm-migrate-fix-thp-s-mapcount-on-isolation.patch
deleted file mode 100644 (file)
index 21b8107..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-From 91f25a9aa0bb126c81ed361cef0f8608ac4c3f15 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 24 Nov 2022 17:55:23 +0800
-Subject: mm: migrate: fix THP's mapcount on isolation
-
-From: Gavin Shan <gshan@redhat.com>
-
-[ Upstream commit 829ae0f81ce093d674ff2256f66a714753e9ce32 ]
-
-The issue is reported when removing memory through virtio_mem device.  The
-transparent huge page, experienced copy-on-write fault, is wrongly
-regarded as pinned.  The transparent huge page is escaped from being
-isolated in isolate_migratepages_block().  The transparent huge page can't
-be migrated and the corresponding memory block can't be put into offline
-state.
-
-Fix it by replacing page_mapcount() with total_mapcount().  With this, the
-transparent huge page can be isolated and migrated, and the memory block
-can be put into offline state.  Besides, The page's refcount is increased
-a bit earlier to avoid the page is released when the check is executed.
-
-Link: https://lkml.kernel.org/r/20221124095523.31061-1-gshan@redhat.com
-Fixes: 1da2f328fa64 ("mm,thp,compaction,cma: allow THP migration for CMA allocations")
-Signed-off-by: Gavin Shan <gshan@redhat.com>
-Reported-by: Zhenyu Zhang <zhenyzha@redhat.com>
-Tested-by: Zhenyu Zhang <zhenyzha@redhat.com>
-Suggested-by: David Hildenbrand <david@redhat.com>
-Acked-by: David Hildenbrand <david@redhat.com>
-Cc: Alistair Popple <apopple@nvidia.com>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Matthew Wilcox <willy@infradead.org>
-Cc: William Kucharski <william.kucharski@oracle.com>
-Cc: Zi Yan <ziy@nvidia.com>
-Cc: <stable@vger.kernel.org>   [5.7+]
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/compaction.c | 22 +++++++++++-----------
- 1 file changed, 11 insertions(+), 11 deletions(-)
-
-diff --git a/mm/compaction.c b/mm/compaction.c
-index 57ce6b001b10..54d1041560c7 100644
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -957,29 +957,29 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-                       goto isolate_fail;
-               }
-+              /*
-+               * Be careful not to clear PageLRU until after we're
-+               * sure the page is not being freed elsewhere -- the
-+               * page release code relies on it.
-+               */
-+              if (unlikely(!get_page_unless_zero(page)))
-+                      goto isolate_fail;
-+
-               /*
-                * Migration will fail if an anonymous page is pinned in memory,
-                * so avoid taking lru_lock and isolating it unnecessarily in an
-                * admittedly racy check.
-                */
-               mapping = page_mapping(page);
--              if (!mapping && page_count(page) > page_mapcount(page))
--                      goto isolate_fail;
-+              if (!mapping && (page_count(page) - 1) > total_mapcount(page))
-+                      goto isolate_fail_put;
-               /*
-                * Only allow to migrate anonymous pages in GFP_NOFS context
-                * because those do not depend on fs locks.
-                */
-               if (!(cc->gfp_mask & __GFP_FS) && mapping)
--                      goto isolate_fail;
--
--              /*
--               * Be careful not to clear PageLRU until after we're
--               * sure the page is not being freed elsewhere -- the
--               * page release code relies on it.
--               */
--              if (unlikely(!get_page_unless_zero(page)))
--                      goto isolate_fail;
-+                      goto isolate_fail_put;
-               /* Only take pages on LRU: a check now makes later tests safe */
-               if (!PageLRU(page))
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-mlock-remove-__munlock_isolate_lru_page.patch b/queue-5.10/mm-mlock-remove-__munlock_isolate_lru_page.patch
deleted file mode 100644 (file)
index b5a8094..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-From 6649227e330b37c9583146cd7446b41771b3a7f1 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 15 Dec 2020 12:34:11 -0800
-Subject: mm/mlock: remove __munlock_isolate_lru_page()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Alex Shi <alex.shi@linux.alibaba.com>
-
-[ Upstream commit 13805a88a9bd3fb37f33dd8972d904de62796f3d ]
-
-__munlock_isolate_lru_page() only has one caller, remove it to clean up
-and simplify code.
-
-Link: https://lkml.kernel.org/r/1604566549-62481-14-git-send-email-alex.shi@linux.alibaba.com
-Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
-Acked-by: Hugh Dickins <hughd@google.com>
-Acked-by: Johannes Weiner <hannes@cmpxchg.org>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Alexander Duyck <alexander.duyck@gmail.com>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
-Cc: "Chen, Rong A" <rong.a.chen@intel.com>
-Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
-Cc: "Huang, Ying" <ying.huang@intel.com>
-Cc: Jann Horn <jannh@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Kirill A. Shutemov <kirill@shutemov.name>
-Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Mel Gorman <mgorman@techsingularity.net>
-Cc: Michal Hocko <mhocko@kernel.org>
-Cc: Michal Hocko <mhocko@suse.com>
-Cc: Mika Penttilä <mika.penttila@nextfour.com>
-Cc: Minchan Kim <minchan@kernel.org>
-Cc: Shakeel Butt <shakeelb@google.com>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
-Cc: Wei Yang <richard.weiyang@gmail.com>
-Cc: Yang Shi <yang.shi@linux.alibaba.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/mlock.c | 31 +++++++++----------------------
- 1 file changed, 9 insertions(+), 22 deletions(-)
-
-diff --git a/mm/mlock.c b/mm/mlock.c
-index 796c726a0407..d487aa864e86 100644
---- a/mm/mlock.c
-+++ b/mm/mlock.c
-@@ -105,26 +105,6 @@ void mlock_vma_page(struct page *page)
-       }
- }
--/*
-- * Isolate a page from LRU with optional get_page() pin.
-- * Assumes lru_lock already held and page already pinned.
-- */
--static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
--{
--      if (PageLRU(page)) {
--              struct lruvec *lruvec;
--
--              lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
--              if (getpage)
--                      get_page(page);
--              ClearPageLRU(page);
--              del_page_from_lru_list(page, lruvec, page_lru(page));
--              return true;
--      }
--
--      return false;
--}
--
- /*
-  * Finish munlock after successful page isolation
-  *
-@@ -296,9 +276,16 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
-                        * We already have pin from follow_page_mask()
-                        * so we can spare the get_page() here.
-                        */
--                      if (__munlock_isolate_lru_page(page, false))
-+                      if (PageLRU(page)) {
-+                              struct lruvec *lruvec;
-+
-+                              ClearPageLRU(page);
-+                              lruvec = mem_cgroup_page_lruvec(page,
-+                                                      page_pgdat(page));
-+                              del_page_from_lru_list(page, lruvec,
-+                                                      page_lru(page));
-                               continue;
--                      else
-+                      } else
-                               __munlock_isolation_failed(page);
-               } else {
-                       delta_munlocked++;
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-mlock-remove-lru_lock-on-testclearpagemlocked.patch b/queue-5.10/mm-mlock-remove-lru_lock-on-testclearpagemlocked.patch
deleted file mode 100644 (file)
index 2efbb62..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-From b824ddafd0a14e7a943171ce5903b83057e0c587 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 15 Dec 2020 12:34:07 -0800
-Subject: mm/mlock: remove lru_lock on TestClearPageMlocked
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-From: Alex Shi <alex.shi@linux.alibaba.com>
-
-[ Upstream commit 3db19aa39bac33f2e850fa1ddd67be29b192e51f ]
-
-In the func munlock_vma_page, comments mentained lru_lock needed for
-serialization with split_huge_pages.  But the page must be PageLocked as
-well as pages in split_huge_page series funcs.  Thus the PageLocked is
-enough to serialize both funcs.
-
-Further more, Hugh Dickins pointed: before splitting in
-split_huge_page_to_list, the page was unmap_page() to remove pmd/ptes
-which protect the page from munlock.  Thus, no needs to guard
-__split_huge_page_tail for mlock clean, just keep the lru_lock there for
-isolation purpose.
-
-LKP found a preempt issue on __mod_zone_page_state which need change to
-mod_zone_page_state.  Thanks!
-
-Link: https://lkml.kernel.org/r/1604566549-62481-13-git-send-email-alex.shi@linux.alibaba.com
-Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
-Acked-by: Hugh Dickins <hughd@google.com>
-Acked-by: Johannes Weiner <hannes@cmpxchg.org>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
-Cc: Alexander Duyck <alexander.duyck@gmail.com>
-Cc: Andrea Arcangeli <aarcange@redhat.com>
-Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
-Cc: "Chen, Rong A" <rong.a.chen@intel.com>
-Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
-Cc: "Huang, Ying" <ying.huang@intel.com>
-Cc: Jann Horn <jannh@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Kirill A. Shutemov <kirill@shutemov.name>
-Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
-Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
-Cc: Mel Gorman <mgorman@techsingularity.net>
-Cc: Michal Hocko <mhocko@kernel.org>
-Cc: Michal Hocko <mhocko@suse.com>
-Cc: Mika Penttilä <mika.penttila@nextfour.com>
-Cc: Minchan Kim <minchan@kernel.org>
-Cc: Shakeel Butt <shakeelb@google.com>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
-Cc: Wei Yang <richard.weiyang@gmail.com>
-Cc: Yang Shi <yang.shi@linux.alibaba.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- mm/mlock.c | 26 +++++---------------------
- 1 file changed, 5 insertions(+), 21 deletions(-)
-
-diff --git a/mm/mlock.c b/mm/mlock.c
-index 884b1216da6a..796c726a0407 100644
---- a/mm/mlock.c
-+++ b/mm/mlock.c
-@@ -187,40 +187,24 @@ static void __munlock_isolation_failed(struct page *page)
- unsigned int munlock_vma_page(struct page *page)
- {
-       int nr_pages;
--      pg_data_t *pgdat = page_pgdat(page);
-       /* For try_to_munlock() and to serialize with page migration */
-       BUG_ON(!PageLocked(page));
--
-       VM_BUG_ON_PAGE(PageTail(page), page);
--      /*
--       * Serialize with any parallel __split_huge_page_refcount() which
--       * might otherwise copy PageMlocked to part of the tail pages before
--       * we clear it in the head page. It also stabilizes thp_nr_pages().
--       */
--      spin_lock_irq(&pgdat->lru_lock);
--
-       if (!TestClearPageMlocked(page)) {
-               /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
--              nr_pages = 1;
--              goto unlock_out;
-+              return 0;
-       }
-       nr_pages = thp_nr_pages(page);
--      __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
-+      mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
--      if (__munlock_isolate_lru_page(page, true)) {
--              spin_unlock_irq(&pgdat->lru_lock);
-+      if (!isolate_lru_page(page))
-               __munlock_isolated_page(page);
--              goto out;
--      }
--      __munlock_isolation_failed(page);
--
--unlock_out:
--      spin_unlock_irq(&pgdat->lru_lock);
-+      else
-+              __munlock_isolation_failed(page);
--out:
-       return nr_pages - 1;
- }
--- 
-2.35.1
-
diff --git a/queue-5.10/mm-vmscan-__isolate_lru_page_prepare-cleanup.patch b/queue-5.10/mm-vmscan-__isolate_lru_page_prepare-cleanup.patch
deleted file mode 100644 (file)
index 5ab6584..0000000
+++ /dev/null
@@ -1,183 +0,0 @@
-From af78db2daeeeec6283747a8d591daf6df57e1961 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 24 Feb 2021 12:08:01 -0800
-Subject: mm/vmscan: __isolate_lru_page_prepare() cleanup
-
-From: Alex Shi <alex.shi@linux.alibaba.com>
-
-[ Upstream commit c2135f7c570bc274035834848d9bf46ea89ba763 ]
-
-The function just returns 2 results, so using a 'switch' to deal with its
-result is unnecessary.  Also simplify it to a bool func as Vlastimil
-suggested.
-
-Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's
-suggestion to update comments in function.
-
-Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@linux.alibaba.com
-Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
-Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
-Acked-by: Vlastimil Babka <vbabka@suse.cz>
-Cc: Matthew Wilcox <willy@infradead.org>
-Cc: Hugh Dickins <hughd@google.com>
-Cc: Yu Zhao <yuzhao@google.com>
-Cc: Michal Hocko <mhocko@suse.com>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-Stable-dep-of: 829ae0f81ce0 ("mm: migrate: fix THP's mapcount on isolation")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/swap.h |  2 +-
- mm/compaction.c      |  2 +-
- mm/vmscan.c          | 68 ++++++++++++++++++++------------------------
- 3 files changed, 33 insertions(+), 39 deletions(-)
-
-diff --git a/include/linux/swap.h b/include/linux/swap.h
-index 3577d3a6ec37..394d5de5d4b4 100644
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -358,7 +358,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- extern unsigned long zone_reclaimable_pages(struct zone *zone);
- extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                       gfp_t gfp_mask, nodemask_t *mask);
--extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
-+extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
- extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
-                                                 unsigned long nr_pages,
-                                                 gfp_t gfp_mask,
-diff --git a/mm/compaction.c b/mm/compaction.c
-index ba3e907f03b7..ea46aadc7c21 100644
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -980,7 +980,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
-               if (unlikely(!get_page_unless_zero(page)))
-                       goto isolate_fail;
--              if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
-+              if (!__isolate_lru_page_prepare(page, isolate_mode))
-                       goto isolate_fail_put;
-               /* Try isolate the page */
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 5ada402c8d95..00a47845a15b 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -1543,19 +1543,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
-  * page:      page to consider
-  * mode:      one of the LRU isolation modes defined above
-  *
-- * returns 0 on success, -ve errno on failure.
-+ * returns true on success, false on failure.
-  */
--int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
-+bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
- {
--      int ret = -EBUSY;
--
-       /* Only take pages on the LRU. */
-       if (!PageLRU(page))
--              return ret;
-+              return false;
-       /* Compaction should not handle unevictable pages but CMA can do so */
-       if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
--              return ret;
-+              return false;
-       /*
-        * To minimise LRU disruption, the caller can indicate that it only
-@@ -1568,7 +1566,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
-       if (mode & ISOLATE_ASYNC_MIGRATE) {
-               /* All the caller can do on PageWriteback is block */
-               if (PageWriteback(page))
--                      return ret;
-+                      return false;
-               if (PageDirty(page)) {
-                       struct address_space *mapping;
-@@ -1584,20 +1582,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
-                        * from the page cache.
-                        */
-                       if (!trylock_page(page))
--                              return ret;
-+                              return false;
-                       mapping = page_mapping(page);
-                       migrate_dirty = !mapping || mapping->a_ops->migratepage;
-                       unlock_page(page);
-                       if (!migrate_dirty)
--                              return ret;
-+                              return false;
-               }
-       }
-       if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
--              return ret;
-+              return false;
--      return 0;
-+      return true;
- }
- /*
-@@ -1679,35 +1677,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
-                * only when the page is being freed somewhere else.
-                */
-               scan += nr_pages;
--              switch (__isolate_lru_page_prepare(page, mode)) {
--              case 0:
--                      /*
--                       * Be careful not to clear PageLRU until after we're
--                       * sure the page is not being freed elsewhere -- the
--                       * page release code relies on it.
--                       */
--                      if (unlikely(!get_page_unless_zero(page)))
--                              goto busy;
--
--                      if (!TestClearPageLRU(page)) {
--                              /*
--                               * This page may in other isolation path,
--                               * but we still hold lru_lock.
--                               */
--                              put_page(page);
--                              goto busy;
--                      }
--
--                      nr_taken += nr_pages;
--                      nr_zone_taken[page_zonenum(page)] += nr_pages;
--                      list_move(&page->lru, dst);
--                      break;
-+              if (!__isolate_lru_page_prepare(page, mode)) {
-+                      /* It is being freed elsewhere */
-+                      list_move(&page->lru, src);
-+                      continue;
-+              }
-+              /*
-+               * Be careful not to clear PageLRU until after we're
-+               * sure the page is not being freed elsewhere -- the
-+               * page release code relies on it.
-+               */
-+              if (unlikely(!get_page_unless_zero(page))) {
-+                      list_move(&page->lru, src);
-+                      continue;
-+              }
--              default:
--busy:
--                      /* else it is being freed elsewhere */
-+              if (!TestClearPageLRU(page)) {
-+                      /* Another thread is already isolating this page */
-+                      put_page(page);
-                       list_move(&page->lru, src);
-+                      continue;
-               }
-+
-+              nr_taken += nr_pages;
-+              nr_zone_taken[page_zonenum(page)] += nr_pages;
-+              list_move(&page->lru, dst);
-       }
-       /*
--- 
-2.35.1
-
diff --git a/queue-5.10/net-broadcom-add-ptp_1588_clock_optional-dependency-.patch b/queue-5.10/net-broadcom-add-ptp_1588_clock_optional-dependency-.patch
deleted file mode 100644 (file)
index 85dd491..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-From 876f244f4ef1d9bd6121dee4e3066f26e179c573 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 25 Nov 2022 19:50:03 +0800
-Subject: net: broadcom: Add PTP_1588_CLOCK_OPTIONAL dependency for BCMGENET
- under ARCH_BCM2835
-
-From: YueHaibing <yuehaibing@huawei.com>
-
-[ Upstream commit 421f8663b3a775c32f724f793264097c60028f2e ]
-
-commit 8d820bc9d12b ("net: broadcom: Fix BCMGENET Kconfig") fixes the build
-that contain 99addbe31f55 ("net: broadcom: Select BROADCOM_PHY for BCMGENET")
-and enable BCMGENET=y but PTP_1588_CLOCK_OPTIONAL=m, which otherwise
-leads to a link failure. However this may trigger a runtime failure.
-
-Fix the original issue by propagating the PTP_1588_CLOCK_OPTIONAL dependency
-of BROADCOM_PHY down to BCMGENET.
-
-Fixes: 8d820bc9d12b ("net: broadcom: Fix BCMGENET Kconfig")
-Fixes: 99addbe31f55 ("net: broadcom: Select BROADCOM_PHY for BCMGENET")
-Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
-Suggested-by: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: YueHaibing <yuehaibing@huawei.com>
-Acked-by: Arnd Bergmann <arnd@arndb.de>
-Link: https://lore.kernel.org/r/20221125115003.30308-1-yuehaibing@huawei.com
-Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- drivers/net/ethernet/broadcom/Kconfig | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
-index 7b79528d6eed..06aaeaadf2e9 100644
---- a/drivers/net/ethernet/broadcom/Kconfig
-+++ b/drivers/net/ethernet/broadcom/Kconfig
-@@ -63,6 +63,7 @@ config BCM63XX_ENET
- config BCMGENET
-       tristate "Broadcom GENET internal MAC support"
-       depends on HAS_IOMEM
-+      depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
-       select MII
-       select PHYLIB
-       select FIXED_PHY
--- 
-2.35.1
-
index 7e239fafac8006a5b48ab0f122c360a13eb50aea..ec377f3569284327d92cb3e798dea7af570989a1 100644 (file)
@@ -1,10 +1,3 @@
-mm-mlock-remove-lru_lock-on-testclearpagemlocked.patch
-mm-mlock-remove-__munlock_isolate_lru_page.patch
-mm-lru-introduce-testclearpagelru.patch
-mm-compaction-do-page-isolation-first-in-compaction.patch
-mm-vmscan-__isolate_lru_page_prepare-cleanup.patch
-mm-__isolate_lru_page_prepare-in-isolate_migratepage.patch
-mm-migrate-fix-thp-s-mapcount-on-isolation.patch
 arm64-dts-rockchip-keep-i2s1-disabled-for-gpio-funct.patch
 arm-dts-rockchip-fix-node-name-for-hym8563-rtc.patch
 arm-dts-rockchip-fix-ir-receiver-node-names.patch
@@ -69,7 +62,6 @@ drm-bridge-ti-sn65dsi86-fix-output-polarity-setting-.patch
 gpio-amd8111-fix-pci-device-reference-count-leak.patch
 e1000e-fix-tx-dispatch-condition.patch
 igb-allocate-msi-x-vector-when-testing.patch
-net-broadcom-add-ptp_1588_clock_optional-dependency-.patch
 drm-bridge-dw_hdmi-fix-preference-of-rgb-modes-over-.patch
 af_unix-get-user_ns-from-in_skb-in-unix_diag_get_exa.patch
 vmxnet3-correctly-report-encapsulated-lro-packet.patch
index 62d0fc3b6cc3221116c3d9f498c76a285a44ef60..36fd1f7c7aa08ce596757a94bc6ac9dc773f981f 100644 (file)
@@ -73,14 +73,12 @@ Link: https://lore.kernel.org/r/20221206093833.3812138-1-harshit.m.mogalapalli@o
 Signed-off-by: Jens Axboe <axboe@kernel.dk>
 Signed-off-by: Sasha Levin <sashal@kernel.org>
 ---
- io_uring/io_uring.c | 4 +++-
+ io_uring/io_uring.c |    4 +++-
  1 file changed, 3 insertions(+), 1 deletion(-)
 
-diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
-index 1279b5c5c959..eebbe8a6da0c 100644
 --- a/io_uring/io_uring.c
 +++ b/io_uring/io_uring.c
-@@ -9467,8 +9467,10 @@ static void io_tctx_exit_cb(struct callback_head *cb)
+@@ -9467,8 +9467,10 @@ static void io_tctx_exit_cb(struct callb
        /*
         * When @in_idle, we're in cancellation and it's racy to remove the
         * node. It'll be removed by the end of cancellation, just ignore it.
@@ -92,6 +90,3 @@ index 1279b5c5c959..eebbe8a6da0c 100644
                io_uring_del_tctx_node((unsigned long)work->ctx);
        complete(&work->completion);
  }
--- 
-2.35.1
-