folio->index, NODE_TYPE_REGULAR, true);
f2fs_bug_on(sbi, folio->index != nid_of_node(folio));
}
- if (f2fs_in_warm_node_list(sbi, folio))
+ if (f2fs_in_warm_node_list(folio))
f2fs_del_fsync_node_entry(sbi, folio);
dec_page_count(sbi, type);
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio);
+bool f2fs_in_warm_node_list(struct folio *folio);
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
start, nr);
}
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
+bool f2fs_in_warm_node_list(struct folio *folio)
{
return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
}
}
/* should add to global list before clearing PAGECACHE status */
- if (f2fs_in_warm_node_list(sbi, folio)) {
+ if (f2fs_in_warm_node_list(folio)) {
seq = f2fs_add_fsync_node_entry(sbi, folio);
if (seq_id)
*seq_id = seq;
if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
folio_end_writeback(folio);
- if (f2fs_in_warm_node_list(fio->sbi, folio))
+ if (f2fs_in_warm_node_list(folio))
f2fs_del_fsync_node_entry(fio->sbi, folio);
f2fs_bug_on(fio->sbi, !is_set_ckpt_flags(fio->sbi,
CP_ERROR_FLAG));