]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
memcg: remove __lruvec_stat_mod_folio
authorShakeel Butt <shakeel.butt@linux.dev>
Mon, 10 Nov 2025 23:20:08 +0000 (15:20 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 24 Nov 2025 23:08:54 +0000 (15:08 -0800)
__lruvec_stat_mod_folio() is already safe against irqs, so there is no
need to have a separate interface (i.e.  lruvec_stat_mod_folio) which
wraps calls to it with irq disabling and reenabling.  Let's rename
__lruvec_stat_mod_folio() to lruvec_stat_mod_folio().

Link: https://lkml.kernel.org/r/20251110232008.1352063-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/vmstat.h
mm/filemap.c
mm/huge_memory.c
mm/khugepaged.c
mm/memcontrol.c
mm/page-writeback.c
mm/rmap.c
mm/shmem.c

index 4eb7753e6e5c45a347d81da91cc8669b1d3b0bd6..3398a345bda89e31a087a82ad2d6eb7c9f40cb5f 100644 (file)
@@ -523,19 +523,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
 void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                        int val);
 
-void __lruvec_stat_mod_folio(struct folio *folio,
+void lruvec_stat_mod_folio(struct folio *folio,
                             enum node_stat_item idx, int val);
 
-static inline void lruvec_stat_mod_folio(struct folio *folio,
-                                        enum node_stat_item idx, int val)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __lruvec_stat_mod_folio(folio, idx, val);
-       local_irq_restore(flags);
-}
-
 static inline void mod_lruvec_page_state(struct page *page,
                                         enum node_stat_item idx, int val)
 {
@@ -550,12 +540,6 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
        mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 }
 
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
-                                        enum node_stat_item idx, int val)
-{
-       mod_node_page_state(folio_pgdat(folio), idx, val);
-}
-
 static inline void lruvec_stat_mod_folio(struct folio *folio,
                                         enum node_stat_item idx, int val)
 {
@@ -570,18 +554,6 @@ static inline void mod_lruvec_page_state(struct page *page,
 
 #endif /* CONFIG_MEMCG */
 
-static inline void __lruvec_stat_add_folio(struct folio *folio,
-                                          enum node_stat_item idx)
-{
-       __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
-}
-
-static inline void __lruvec_stat_sub_folio(struct folio *folio,
-                                          enum node_stat_item idx)
-{
-       __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
-}
-
 static inline void lruvec_stat_add_folio(struct folio *folio,
                                         enum node_stat_item idx)
 {
index 07634b7d9934ce2b9fa1d1a8589801d0918cb546..7d15a9c216efe3e8d609ee59a15942197fdaf158 100644 (file)
@@ -182,13 +182,13 @@ static void filemap_unaccount_folio(struct address_space *mapping,
 
        nr = folio_nr_pages(folio);
 
-       __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+       lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
        if (folio_test_swapbacked(folio)) {
-               __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+               lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
                if (folio_test_pmd_mappable(folio))
-                       __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
+                       lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
        } else if (folio_test_pmd_mappable(folio)) {
-               __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
+               lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
                filemap_nr_thps_dec(mapping);
        }
        if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
@@ -844,13 +844,13 @@ void replace_page_cache_folio(struct folio *old, struct folio *new)
        old->mapping = NULL;
        /* hugetlb pages do not participate in page cache accounting. */
        if (!folio_test_hugetlb(old))
-               __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
+               lruvec_stat_sub_folio(old, NR_FILE_PAGES);
        if (!folio_test_hugetlb(new))
-               __lruvec_stat_add_folio(new, NR_FILE_PAGES);
+               lruvec_stat_add_folio(new, NR_FILE_PAGES);
        if (folio_test_swapbacked(old))
-               __lruvec_stat_sub_folio(old, NR_SHMEM);
+               lruvec_stat_sub_folio(old, NR_SHMEM);
        if (folio_test_swapbacked(new))
-               __lruvec_stat_add_folio(new, NR_SHMEM);
+               lruvec_stat_add_folio(new, NR_SHMEM);
        xas_unlock_irq(&xas);
        if (free_folio)
                free_folio(old);
@@ -933,9 +933,9 @@ noinline int __filemap_add_folio(struct address_space *mapping,
 
                /* hugetlb pages do not participate in page cache accounting */
                if (!huge) {
-                       __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+                       lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
                        if (folio_test_pmd_mappable(folio))
-                               __lruvec_stat_mod_folio(folio,
+                               lruvec_stat_mod_folio(folio,
                                                NR_FILE_THPS, nr);
                }
 
index 53a8d380eab2dafcd7fbb94d46ac2c790dc5af31..7af3e037d8914665460aba5bc8b4c098debd54bd 100644 (file)
@@ -3783,10 +3783,10 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
                        if (folio_test_pmd_mappable(folio) &&
                            new_order < HPAGE_PMD_ORDER) {
                                if (folio_test_swapbacked(folio)) {
-                                       __lruvec_stat_mod_folio(folio,
+                                       lruvec_stat_mod_folio(folio,
                                                        NR_SHMEM_THPS, -nr);
                                } else {
-                                       __lruvec_stat_mod_folio(folio,
+                                       lruvec_stat_mod_folio(folio,
                                                        NR_FILE_THPS, -nr);
                                        filemap_nr_thps_dec(mapping);
                                }
index 40f9d5939aa5b496278bc3b8255977b2a6415cc6..89c33ef7aac3edf8add88ff62c325bc4ffddb74b 100644 (file)
@@ -2195,14 +2195,14 @@ immap_locked:
        }
 
        if (is_shmem)
-               __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
+               lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
        else
-               __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
+               lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
 
        if (nr_none) {
-               __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
+               lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
                /* nr_none is always 0 for non-shmem. */
-               __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
+               lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
        }
 
        /*
index 9a659f16af778e78904edcffe9d68f6eb46f81ec..9b07db2cb23284a37aad5216312d80937073a700 100644 (file)
@@ -777,7 +777,7 @@ void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                mod_memcg_lruvec_state(lruvec, idx, val);
 }
 
-void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
+void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
                             int val)
 {
        struct mem_cgroup *memcg;
@@ -797,7 +797,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
        mod_lruvec_state(lruvec, idx, val);
        rcu_read_unlock();
 }
-EXPORT_SYMBOL(__lruvec_stat_mod_folio);
+EXPORT_SYMBOL(lruvec_stat_mod_folio);
 
 void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 {
index 757bc4d3b5b5515c5b69fdeaaecb8307d8cc451b..d6b339cc876d89054831be178e5212be0a71c67f 100644 (file)
@@ -2658,7 +2658,7 @@ static void folio_account_dirtied(struct folio *folio,
                inode_attach_wb(inode, folio);
                wb = inode_to_wb(inode);
 
-               __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
+               lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
                __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
                __node_stat_mod_folio(folio, NR_DIRTIED, nr);
                wb_stat_mod(wb, WB_RECLAIMABLE, nr);
index d871f2eb821c746af846322bd8c76b22a6d61d12..f955f02d570ed8bcf770804a829cebc82cfdd2f3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1212,12 +1212,12 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
 
        if (nr) {
                idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
-               __lruvec_stat_mod_folio(folio, idx, nr);
+               lruvec_stat_mod_folio(folio, idx, nr);
        }
        if (nr_pmdmapped) {
                if (folio_test_anon(folio)) {
                        idx = NR_ANON_THPS;
-                       __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
+                       lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
                } else {
                        /* NR_*_PMDMAPPED are not maintained per-memcg */
                        idx = folio_test_swapbacked(folio) ?
index fc835b3e4914a76f181342ab87cc37c0171d9661..ad18172ff831b3131f509d06df65969b49f4ca80 100644 (file)
@@ -871,9 +871,9 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
 static void shmem_update_stats(struct folio *folio, int nr_pages)
 {
        if (folio_test_pmd_mappable(folio))
-               __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
-       __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
-       __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+               lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+       lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+       lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
 }
 
 /*