]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
memcg: remove __mod_lruvec_state
authorShakeel Butt <shakeel.butt@linux.dev>
Mon, 10 Nov 2025 23:20:07 +0000 (15:20 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:54 +0000 (15:08 -0800)
__mod_lruvec_state() is already safe against irqs, so there is no need to
have a separate interface (i.e.  mod_lruvec_state) which wraps calls to it
with irq disabling and reenabling.  Let's rename __mod_lruvec_state() to
mod_lruvec_state().

Link: https://lkml.kernel.org/r/20251110232008.1352063-4-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm_inline.h
include/linux/vmstat.h
mm/memcontrol.c
mm/migrate.c
mm/vmscan.c

index ca7a18351797e3adaa9905363a081a6bb8a76d07..b58f34c4fe92ebb854932b01be43b593d60097b8 100644 (file)
@@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
        lockdep_assert_held(&lruvec->lru_lock);
        WARN_ON_ONCE(nr_pages != (int)nr_pages);
 
-       __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
+       mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
        __mod_zone_page_state(&pgdat->node_zones[zid],
                                NR_ZONE_LRU_BASE + lru, nr_pages);
 }
index 11a37aaa4dd9c58dced6ec89146ee2e50255df57..4eb7753e6e5c45a347d81da91cc8669b1d3b0bd6 100644 (file)
@@ -520,19 +520,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
 
 #ifdef CONFIG_MEMCG
 
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                        int val);
 
-static inline void mod_lruvec_state(struct lruvec *lruvec,
-                                   enum node_stat_item idx, int val)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __mod_lruvec_state(lruvec, idx, val);
-       local_irq_restore(flags);
-}
-
 void __lruvec_stat_mod_folio(struct folio *folio,
                             enum node_stat_item idx, int val);
 
@@ -554,12 +544,6 @@ static inline void mod_lruvec_page_state(struct page *page,
 
 #else
 
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
-                                     enum node_stat_item idx, int val)
-{
-       mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-}
-
 static inline void mod_lruvec_state(struct lruvec *lruvec,
                                    enum node_stat_item idx, int val)
 {
index ae154f51931eb8f27ef590dbd3dfe05d0d45908f..9a659f16af778e78904edcffe9d68f6eb46f81ec 100644 (file)
@@ -757,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
 }
 
 /**
- * __mod_lruvec_state - update lruvec memory statistics
+ * mod_lruvec_state - update lruvec memory statistics
  * @lruvec: the lruvec
  * @idx: the stat item
  * @val: delta to add to the counter, can be negative
@@ -766,7 +766,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
  * function updates the all three counters that are affected by a
  * change of state at this level: per-node, per-cgroup, per-lruvec.
  */
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                        int val)
 {
        /* Update node */
@@ -794,7 +794,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
        }
 
        lruvec = mem_cgroup_lruvec(memcg, pgdat);
-       __mod_lruvec_state(lruvec, idx, val);
+       mod_lruvec_state(lruvec, idx, val);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL(__lruvec_stat_mod_folio);
@@ -818,7 +818,7 @@ void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
                mod_node_page_state(pgdat, idx, val);
        } else {
                lruvec = mem_cgroup_lruvec(memcg, pgdat);
-               __mod_lruvec_state(lruvec, idx, val);
+               mod_lruvec_state(lruvec, idx, val);
        }
        rcu_read_unlock();
 }
index b2ad78bf85d5b1e70c9d888e6dd8c5559c754873..5169f9717f60693689c391a9ec57dc5304551d95 100644 (file)
@@ -675,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping,
                old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
                new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
-               __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
-               __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
+               mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+               mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
                if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
-                       __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
-                       __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
+                       mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+                       mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
 
                        if (folio_test_pmd_mappable(folio)) {
-                               __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
-                               __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
+                               mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
+                               mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
                        }
                }
 #ifdef CONFIG_SWAP
                if (folio_test_swapcache(folio)) {
-                       __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
-                       __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
+                       mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
+                       mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
                }
 #endif
                if (dirty && mapping_can_writeback(mapping)) {
-                       __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+                       mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
                        __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
-                       __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+                       mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
                        __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
                }
        }
index 51ffd32e6e019b43ffd60cc9424b0b4e89fb3e8f..720772baf2a7c2d9b551849de7e22b2f0c2962bc 100644 (file)
@@ -2018,7 +2018,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
        spin_lock_irq(&lruvec->lru_lock);
        move_folios_to_lru(lruvec, &folio_list);
 
-       __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+       mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
                                        stat.nr_demoted);
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
        item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
@@ -4744,7 +4744,7 @@ retry:
                reset_batch_size(walk);
        }
 
-       __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+       mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
                                        stat.nr_demoted);
 
        item = PGSTEAL_KSWAPD + reclaimer_offset(sc);