]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm: memcg: rename memcg_check_events()
authorRoman Gushchin <roman.gushchin@linux.dev>
Tue, 25 Jun 2024 00:58:59 +0000 (17:58 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 5 Jul 2024 01:05:53 +0000 (18:05 -0700)
Rename memcg_check_events() into memcg1_check_events() for consistency
with other cgroup v1-specific functions.

Link: https://lkml.kernel.org/r/20240625005906.106920-8-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol-v1.c
mm/memcontrol-v1.h
mm/memcontrol.c

index 4b2290ceace693ffd77225181d5da6810efa8c9d..d7b5c4c147324310a492f8c2218eee4f3d723291 100644 (file)
@@ -835,9 +835,9 @@ static int mem_cgroup_move_account(struct folio *folio,
 
        local_irq_disable();
        mem_cgroup_charge_statistics(to, nr_pages);
-       memcg_check_events(to, nid);
+       memcg1_check_events(to, nid);
        mem_cgroup_charge_statistics(from, -nr_pages);
-       memcg_check_events(from, nid);
+       memcg1_check_events(from, nid);
        local_irq_enable();
 out:
        return ret;
@@ -1424,7 +1424,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
  * Check events in order.
  *
  */
-void memcg_check_events(struct mem_cgroup *memcg, int nid)
+void memcg1_check_events(struct mem_cgroup *memcg, int nid)
 {
        if (IS_ENABLED(CONFIG_PREEMPT_RT))
                return;
index 524a2c76ffc97c738b643bd12772b01bb35835b9..ef1b7037cbdcc7dfcbed6a613d825c633686acf8 100644 (file)
@@ -12,7 +12,7 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
 }
 
 void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
-void memcg_check_events(struct mem_cgroup *memcg, int nid);
+void memcg1_check_events(struct mem_cgroup *memcg, int nid);
 void memcg_oom_recover(struct mem_cgroup *memcg);
 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
                     unsigned int nr_pages);
index 5a5bd0767fb0fc91f2c624b06267e1b5722e8247..71794161271ce0dbe00ba1de746752debf451bf6 100644 (file)
@@ -2630,7 +2630,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 
        local_irq_disable();
        mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
-       memcg_check_events(memcg, folio_nid(folio));
+       memcg1_check_events(memcg, folio_nid(folio));
        local_irq_enable();
 }
 
@@ -5662,7 +5662,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
        local_irq_save(flags);
        __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
        __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
-       memcg_check_events(ug->memcg, ug->nid);
+       memcg1_check_events(ug->memcg, ug->nid);
        local_irq_restore(flags);
 
        /* drop reference from uncharge_folio */
@@ -5801,7 +5801,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
 
        local_irq_save(flags);
        mem_cgroup_charge_statistics(memcg, nr_pages);
-       memcg_check_events(memcg, folio_nid(new));
+       memcg1_check_events(memcg, folio_nid(new));
        local_irq_restore(flags);
 }
 
@@ -6070,7 +6070,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
        memcg_stats_lock();
        mem_cgroup_charge_statistics(memcg, -nr_entries);
        memcg_stats_unlock();
-       memcg_check_events(memcg, folio_nid(folio));
+       memcg1_check_events(memcg, folio_nid(folio));
 
        css_put(&memcg->css);
 }