return last_folio;
}
-static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
- struct writeback_control *wbc, bool do_balance,
- enum iostat_type io_type, unsigned int *seq_id)
+static bool __write_node_folio(struct folio *folio, bool atomic, bool do_fsync,
+ bool *submitted, struct writeback_control *wbc,
+ bool do_balance, enum iostat_type io_type,
+ unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
nid_t nid;
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
+ set_dentry_mark(folio, false);
+ set_fsync_mark(folio, do_fsync);
if (IS_INODE(folio) && (atomic || is_fsync_dnode(folio)))
set_dentry_mark(folio,
f2fs_need_dentry_mark(sbi, ino_of_node(folio)));
goto out_folio;
}
- if (!__write_node_folio(node_folio, false, NULL,
+ if (!__write_node_folio(node_folio, false, false, NULL,
&wbc, false, FS_GC_NODE_IO, NULL))
err = -EAGAIN;
goto release_folio;
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
bool submitted = false;
+ bool do_fsync = false;
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_folio_put(last_folio, false);
f2fs_folio_wait_writeback(folio, NODE, true, true);
- set_fsync_mark(folio, 0);
- set_dentry_mark(folio, 0);
-
if (!atomic || folio == last_folio) {
- set_fsync_mark(folio, 1);
+ do_fsync = true;
percpu_counter_inc(&sbi->rf_node_block_count);
if (IS_INODE(folio)) {
if (is_inode_flag_set(inode,
if (!__write_node_folio(folio, atomic &&
folio == last_folio,
- &submitted, wbc, true,
- FS_NODE_IO, seq_id)) {
+ do_fsync, &submitted,
+ wbc, true, FS_NODE_IO,
+ seq_id)) {
f2fs_folio_put(last_folio, false);
folio_batch_release(&fbatch);
ret = -EIO;
if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
- set_fsync_mark(folio, 0);
- set_dentry_mark(folio, 0);
-
- if (!__write_node_folio(folio, false, &submitted,
+ if (!__write_node_folio(folio, false, false, &submitted,
wbc, do_balance, io_type, NULL)) {
folio_batch_release(&fbatch);
ret = -EIO;