]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: rename unlock_page_lruvec_irq and its variants
authorMuchun Song <songmuchun@bytedance.com>
Thu, 5 Mar 2026 11:52:21 +0000 (19:52 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 18 Apr 2026 07:10:44 +0000 (00:10 -0700)
It is inappropriate to use folio_lruvec_lock() variants in conjunction
with unlock_page_lruvec() variants, as this involves the inconsistent
operation of locking a folio while unlocking a page.  To rectify this, the
functions unlock_page_lruvec{_irq, _irqrestore} are renamed to
lruvec_unlock{_irq,_irqrestore}.

Link: https://lore.kernel.org/4e5e05271a250df4d1812e1832be65636a78c957.1772711148.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Chen Ridong <chenridong@huawei.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Allen Pais <apais@linux.microsoft.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Hamza Mahfooz <hamzamahfooz@linux.microsoft.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Imran Khan <imran.f.khan@oracle.com>
Cc: Kamalesh Babulal <kamalesh.babulal@oracle.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Yosry Ahmed <yosry@kernel.org>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/compaction.c
mm/huge_memory.c
mm/mlock.c
mm/swap.c
mm/vmscan.c

index 5173a9f1672128a2ee40f63b56cb01da3293d5fc..6e88288e90d8245f17859b374a5ecd0af909790d 100644 (file)
@@ -1479,17 +1479,17 @@ static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
        return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
 }
 
-static inline void unlock_page_lruvec(struct lruvec *lruvec)
+static inline void lruvec_unlock(struct lruvec *lruvec)
 {
        spin_unlock(&lruvec->lru_lock);
 }
 
-static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
+static inline void lruvec_unlock_irq(struct lruvec *lruvec)
 {
        spin_unlock_irq(&lruvec->lru_lock);
 }
 
-static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+static inline void lruvec_unlock_irqrestore(struct lruvec *lruvec,
                unsigned long flags)
 {
        spin_unlock_irqrestore(&lruvec->lru_lock, flags);
@@ -1511,7 +1511,7 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
                if (folio_matches_lruvec(folio, locked_lruvec))
                        return locked_lruvec;
 
-               unlock_page_lruvec_irq(locked_lruvec);
+               lruvec_unlock_irq(locked_lruvec);
        }
 
        return folio_lruvec_lock_irq(folio);
@@ -1525,7 +1525,7 @@ static inline void folio_lruvec_relock_irqsave(struct folio *folio,
                if (folio_matches_lruvec(folio, *lruvecp))
                        return;
 
-               unlock_page_lruvec_irqrestore(*lruvecp, *flags);
+               lruvec_unlock_irqrestore(*lruvecp, *flags);
        }
 
        *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
index 1e8f8eca318c6844c27682677a0a9ea552316828..c3e338aaa0ffb36bfd7c7148f67e15e0c499d97f 100644 (file)
@@ -913,7 +913,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                 */
                if (!(low_pfn % COMPACT_CLUSTER_MAX)) {
                        if (locked) {
-                               unlock_page_lruvec_irqrestore(locked, flags);
+                               lruvec_unlock_irqrestore(locked, flags);
                                locked = NULL;
                        }
 
@@ -964,7 +964,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        }
                        /* for alloc_contig case */
                        if (locked) {
-                               unlock_page_lruvec_irqrestore(locked, flags);
+                               lruvec_unlock_irqrestore(locked, flags);
                                locked = NULL;
                        }
 
@@ -1053,7 +1053,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                        if (unlikely(page_has_movable_ops(page)) &&
                            !PageMovableOpsIsolated(page)) {
                                if (locked) {
-                                       unlock_page_lruvec_irqrestore(locked, flags);
+                                       lruvec_unlock_irqrestore(locked, flags);
                                        locked = NULL;
                                }
 
@@ -1158,7 +1158,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                /* If we already hold the lock, we can skip some rechecking */
                if (lruvec != locked) {
                        if (locked)
-                               unlock_page_lruvec_irqrestore(locked, flags);
+                               lruvec_unlock_irqrestore(locked, flags);
 
                        compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
                        locked = lruvec;
@@ -1226,7 +1226,7 @@ isolate_success_no_list:
 isolate_fail_put:
                /* Avoid potential deadlock in freeing page under lru_lock */
                if (locked) {
-                       unlock_page_lruvec_irqrestore(locked, flags);
+                       lruvec_unlock_irqrestore(locked, flags);
                        locked = NULL;
                }
                folio_put(folio);
@@ -1242,7 +1242,7 @@ isolate_fail:
                 */
                if (nr_isolated) {
                        if (locked) {
-                               unlock_page_lruvec_irqrestore(locked, flags);
+                               lruvec_unlock_irqrestore(locked, flags);
                                locked = NULL;
                        }
                        putback_movable_pages(&cc->migratepages);
@@ -1274,7 +1274,7 @@ isolate_fail:
 
 isolate_abort:
        if (locked)
-               unlock_page_lruvec_irqrestore(locked, flags);
+               lruvec_unlock_irqrestore(locked, flags);
        if (folio) {
                folio_set_lru(folio);
                folio_put(folio);
index 42c983821c03116464ce2872f7c67ef13b47730b..958b580c661964a74ab1eb596a045a24fec07e76 100644 (file)
@@ -3994,7 +3994,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
                folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
 
                if (do_lru)
-                       unlock_page_lruvec(lruvec);
+                       lruvec_unlock(lruvec);
 
                if (ci)
                        swap_cluster_unlock(ci);
index fdbd1434a35fa616e91c80926125cace119890c6..8c227fefa2df0e6ab31350b7852d39d66696e278 100644 (file)
@@ -205,7 +205,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
        }
 
        if (lruvec)
-               unlock_page_lruvec_irq(lruvec);
+               lruvec_unlock_irq(lruvec);
        folios_put(fbatch);
 }
 
index 78b4aa811fc61babc96fa3ce18c96cccd824396b..23df893e2ed71ab7a907a2234188348d33e0b4ef 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -91,7 +91,7 @@ static void page_cache_release(struct folio *folio)
 
        __page_cache_release(folio, &lruvec, &flags);
        if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
+               lruvec_unlock_irqrestore(lruvec, flags);
 }
 
 void __folio_put(struct folio *folio)
@@ -175,7 +175,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
        }
 
        if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
+               lruvec_unlock_irqrestore(lruvec, flags);
        folios_put(fbatch);
 }
 
@@ -349,7 +349,7 @@ void folio_activate(struct folio *folio)
 
        lruvec = folio_lruvec_lock_irq(folio);
        lru_activate(lruvec, folio);
-       unlock_page_lruvec_irq(lruvec);
+       lruvec_unlock_irq(lruvec);
        folio_set_lru(folio);
 }
 #endif
@@ -963,7 +963,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
 
                if (folio_is_zone_device(folio)) {
                        if (lruvec) {
-                               unlock_page_lruvec_irqrestore(lruvec, flags);
+                               lruvec_unlock_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
                        if (folio_ref_sub_and_test(folio, nr_refs))
@@ -977,7 +977,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
                /* hugetlb has its own memcg */
                if (folio_test_hugetlb(folio)) {
                        if (lruvec) {
-                               unlock_page_lruvec_irqrestore(lruvec, flags);
+                               lruvec_unlock_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
                        free_huge_folio(folio);
@@ -991,7 +991,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
                j++;
        }
        if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
+               lruvec_unlock_irqrestore(lruvec, flags);
        if (!j) {
                folio_batch_reinit(folios);
                return;
index 4bf091b1c8afe01f381640b47b8a15510baf3e62..88bb3337e5ebf7e3b3f0288be854178bd6526cdf 100644 (file)
@@ -1831,7 +1831,7 @@ bool folio_isolate_lru(struct folio *folio)
                folio_get(folio);
                lruvec = folio_lruvec_lock_irq(folio);
                lruvec_del_folio(lruvec, folio);
-               unlock_page_lruvec_irq(lruvec);
+               lruvec_unlock_irq(lruvec);
                ret = true;
        }
 
@@ -7898,7 +7898,7 @@ void check_move_unevictable_folios(struct folio_batch *fbatch)
        if (lruvec) {
                __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
                __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
-               unlock_page_lruvec_irq(lruvec);
+               lruvec_unlock_irq(lruvec);
        } else if (pgscanned) {
                count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
        }