void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
-void __lruvec_stat_mod_folio(struct folio *folio,
+void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __lruvec_stat_mod_folio(folio, idx, val);
- local_irq_restore(flags);
-}
-
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_node_page_state(folio_pgdat(folio), idx, val);
-}
-
static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
#endif /* CONFIG_MEMCG */
-static inline void __lruvec_stat_add_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
-}
-
-static inline void __lruvec_stat_sub_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
nr = folio_nr_pages(folio);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
} else if (folio_test_pmd_mappable(folio)) {
- __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
old->mapping = NULL;
/* hugetlb pages do not participate in page cache accounting. */
if (!folio_test_hugetlb(old))
- __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
+ lruvec_stat_sub_folio(old, NR_FILE_PAGES);
if (!folio_test_hugetlb(new))
- __lruvec_stat_add_folio(new, NR_FILE_PAGES);
+ lruvec_stat_add_folio(new, NR_FILE_PAGES);
if (folio_test_swapbacked(old))
- __lruvec_stat_sub_folio(old, NR_SHMEM);
+ lruvec_stat_sub_folio(old, NR_SHMEM);
if (folio_test_swapbacked(new))
- __lruvec_stat_add_folio(new, NR_SHMEM);
+ lruvec_stat_add_folio(new, NR_SHMEM);
xas_unlock_irq(&xas);
if (free_folio)
free_folio(old);
/* hugetlb pages do not participate in page cache accounting */
if (!huge) {
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, nr);
}
if (folio_test_pmd_mappable(folio) &&
new_order < HPAGE_PMD_ORDER) {
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_SHMEM_THPS, -nr);
} else {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
}
if (is_shmem)
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
else
- __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
if (nr_none) {
- __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
}
/*
mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
+void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
struct mem_cgroup *memcg;
mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__lruvec_stat_mod_folio);
+EXPORT_SYMBOL(lruvec_stat_mod_folio);
void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
inode_attach_wb(inode, folio);
wb = inode_to_wb(inode);
- __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
__node_stat_mod_folio(folio, NR_DIRTIED, nr);
wb_stat_mod(wb, WB_RECLAIMABLE, nr);
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
- __lruvec_stat_mod_folio(folio, idx, nr);
+ lruvec_stat_mod_folio(folio, idx, nr);
}
if (nr_pmdmapped) {
if (folio_test_anon(folio)) {
idx = NR_ANON_THPS;
- __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
+ lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
} else {
/* NR_*_PMDMAPPED are not maintained per-memcg */
idx = folio_test_swapbacked(folio) ?
static void shmem_update_stats(struct folio *folio, int nr_pages)
{
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
}
/*