void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-
-static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_kmem_state(p, idx, val);
- local_irq_restore(flags);
-}
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count);
{
}
-static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- struct page *page = virt_to_head_page(p);
-
- mod_node_page_state(page_pgdat(page), idx, val);
-}
-
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
#endif
} __aligned(8);
-static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
+static inline void inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_kmem_state(p, idx, 1);
+ mod_lruvec_kmem_state(p, idx, 1);
}
-static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
+static inline void dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_kmem_state(p, idx, -1);
+ mod_lruvec_kmem_state(p, idx, -1);
}
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
}
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
pg_data_t *pgdat = page_pgdat(virt_to_page(p));
struct mem_cgroup *memcg;
if (WARN_ON_ONCE(node->count != node->nr_values))
goto out_invalid;
xa_delete_node(node, workingset_update_node);
- __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
+ inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
out_invalid:
xa_unlock_irq(&mapping->i_pages);