]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: vmscan: refactor move_folios_to_lru()
authorMuchun Song <songmuchun@bytedance.com>
Thu, 5 Mar 2026 11:52:23 +0000 (19:52 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 18 Apr 2026 07:10:44 +0000 (00:10 -0700)
In a subsequent patch, we'll reparent the LRU folios.  The folios that are
moved to the appropriate LRU list can undergo reparenting during the
move_folios_to_lru() process.  Hence, it's incorrect for the caller to
hold a lruvec lock.  Instead, we should utilize the more general interface
of folio_lruvec_relock_irq() to obtain the correct lruvec lock.

This patch involves only code refactoring and doesn't introduce any
functional changes.

Link: https://lore.kernel.org/6f1dac88b61e2e3cb7a3e90bacdf06b654acfc15.1772711148.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Allen Pais <apais@linux.microsoft.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Chen Ridong <chenridong@huawei.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: Hamza Mahfooz <hamzamahfooz@linux.microsoft.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Imran Khan <imran.f.khan@oracle.com>
Cc: Kamalesh Babulal <kamalesh.babulal@oracle.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Yosry Ahmed <yosry@kernel.org>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index d88d00f0c2cd7b371077da3fa01b14d31910978e..031fbd35ae100d928f692ad67168e96bea2b662c 100644 (file)
@@ -1885,24 +1885,27 @@ static bool too_many_isolated(struct pglist_data *pgdat, int file,
 /*
  * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
  *
- * Returns the number of pages moved to the given lruvec.
+ * Returns the number of pages moved to the appropriate lruvec.
+ *
+ * Note: The caller must not hold any lruvec lock.
  */
-static unsigned int move_folios_to_lru(struct lruvec *lruvec,
-               struct list_head *list)
+static unsigned int move_folios_to_lru(struct list_head *list)
 {
        int nr_pages, nr_moved = 0;
+       struct lruvec *lruvec = NULL;
        struct folio_batch free_folios;
 
        folio_batch_init(&free_folios);
        while (!list_empty(list)) {
                struct folio *folio = lru_to_folio(list);
 
+               lruvec = folio_lruvec_relock_irq(folio, lruvec);
                VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
                list_del(&folio->lru);
                if (unlikely(!folio_evictable(folio))) {
-                       spin_unlock_irq(&lruvec->lru_lock);
+                       lruvec_unlock_irq(lruvec);
                        folio_putback_lru(folio);
-                       spin_lock_irq(&lruvec->lru_lock);
+                       lruvec = NULL;
                        continue;
                }
 
@@ -1924,19 +1927,15 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
 
                        folio_unqueue_deferred_split(folio);
                        if (folio_batch_add(&free_folios, folio) == 0) {
-                               spin_unlock_irq(&lruvec->lru_lock);
+                               lruvec_unlock_irq(lruvec);
                                mem_cgroup_uncharge_folios(&free_folios);
                                free_unref_folios(&free_folios);
-                               spin_lock_irq(&lruvec->lru_lock);
+                               lruvec = NULL;
                        }
 
                        continue;
                }
 
-               /*
-                * All pages were isolated from the same lruvec (and isolation
-                * inhibits memcg migration).
-                */
                VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
                lruvec_add_folio(lruvec, folio);
                nr_pages = folio_nr_pages(folio);
@@ -1945,11 +1944,12 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
                        workingset_age_nonresident(lruvec, nr_pages);
        }
 
+       if (lruvec)
+               lruvec_unlock_irq(lruvec);
+
        if (free_folios.nr) {
-               spin_unlock_irq(&lruvec->lru_lock);
                mem_cgroup_uncharge_folios(&free_folios);
                free_unref_folios(&free_folios);
-               spin_lock_irq(&lruvec->lru_lock);
        }
 
        return nr_moved;
@@ -2016,8 +2016,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
        nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false,
                                         lruvec_memcg(lruvec));
 
-       spin_lock_irq(&lruvec->lru_lock);
-       move_folios_to_lru(lruvec, &folio_list);
+       move_folios_to_lru(&folio_list);
 
        mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
                                        stat.nr_demoted);
@@ -2026,6 +2025,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
        mod_lruvec_state(lruvec, item, nr_reclaimed);
        mod_lruvec_state(lruvec, PGSTEAL_ANON + file, nr_reclaimed);
 
+       spin_lock_irq(&lruvec->lru_lock);
        lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
                                        nr_scanned - nr_reclaimed);
 
@@ -2162,16 +2162,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
        /*
         * Move folios back to the lru list.
         */
-       spin_lock_irq(&lruvec->lru_lock);
-
-       nr_activate = move_folios_to_lru(lruvec, &l_active);
-       nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
+       nr_activate = move_folios_to_lru(&l_active);
+       nr_deactivate = move_folios_to_lru(&l_inactive);
 
        count_vm_events(PGDEACTIVATE, nr_deactivate);
        count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
-
        mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
 
+       spin_lock_irq(&lruvec->lru_lock);
        lru_note_cost_unlock_irq(lruvec, file, 0, nr_rotated);
        trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
                        nr_deactivate, nr_rotated, sc->priority, file);
@@ -4749,14 +4747,14 @@ retry:
                        set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active));
        }
 
-       spin_lock_irq(&lruvec->lru_lock);
-
-       move_folios_to_lru(lruvec, &list);
+       move_folios_to_lru(&list);
 
        walk = current->reclaim_state->mm_walk;
        if (walk && walk->batched) {
                walk->lruvec = lruvec;
+               spin_lock_irq(&lruvec->lru_lock);
                reset_batch_size(walk);
+               spin_unlock_irq(&lruvec->lru_lock);
        }
 
        mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
@@ -4766,8 +4764,6 @@ retry:
        mod_lruvec_state(lruvec, item, reclaimed);
        mod_lruvec_state(lruvec, PGSTEAL_ANON + type, reclaimed);
 
-       spin_unlock_irq(&lruvec->lru_lock);
-
        list_splice_init(&clean, &list);
 
        if (!list_empty(&list)) {