Use f2fs_{down,up}_write_trace for cp_global_sem to trace lock elapsed time.
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct f2fs_lock_context lc;
long diff, written;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
/* if locked failed, cp will flush dirty pages instead */
- if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
+ if (!f2fs_down_write_trylock_trace(&sbi->cp_global_sem, &lc))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc);
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
- f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write_trace(&sbi->cp_global_sem, &lc);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_lock_context lc;
unsigned long long ckpt_ver;
int err = 0;
f2fs_warn(sbi, "Start checkpoint disabled!");
}
if (cpc->reason != CP_RESIZE)
- f2fs_down_write(&sbi->cp_global_sem);
+ f2fs_down_write_trace(&sbi->cp_global_sem, &lc);
stat_cp_time(cpc, CP_TIME_LOCK);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_CHECKPOINT);
out:
if (cpc->reason != CP_RESIZE)
- f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write_trace(&sbi->cp_global_sem, &lc);
return err;
}
LOCK_NAME_NODE_CHANGE,
LOCK_NAME_NODE_WRITE,
LOCK_NAME_GC_LOCK,
+ LOCK_NAME_CP_GLOBAL,
};
/*
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
struct f2fs_lock_context lc;
struct f2fs_lock_context glc;
+ struct f2fs_lock_context clc;
unsigned int secs;
int err = 0;
__u32 rem;
}
f2fs_down_write_trace(&sbi->gc_lock, &glc);
- f2fs_down_write(&sbi->cp_global_sem);
+ f2fs_down_write_trace(&sbi->cp_global_sem, &clc);
spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) +
spin_unlock(&sbi->stat_lock);
}
out_err:
- f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write_trace(&sbi->cp_global_sem, &clc);
f2fs_up_write_trace(&sbi->gc_lock, &glc);
thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
return err;
LIST_HEAD(inode_list);
LIST_HEAD(tmp_inode_list);
LIST_HEAD(dir_list);
+ struct f2fs_lock_context lc;
int err;
int ret = 0;
unsigned long s_flags = sbi->sb->s_flags;
f2fs_info(sbi, "recover fsync data on readonly fs");
/* prevent checkpoint */
- f2fs_down_write(&sbi->cp_global_sem);
+ f2fs_down_write_trace(&sbi->cp_global_sem, &lc);
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list, check_only, &new_inode);
if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING);
- f2fs_up_write(&sbi->cp_global_sem);
+ f2fs_up_write_trace(&sbi->cp_global_sem, &lc);
/* let's drop all the directory inodes for clean checkpoint */
destroy_fsync_dnodes(&dir_list, err);
/* initialize locks within allocated memory */
init_f2fs_rwsem_trace(&sbi->gc_lock, sbi, LOCK_NAME_GC_LOCK);
mutex_init(&sbi->writepages);
- init_f2fs_rwsem(&sbi->cp_global_sem);
+ init_f2fs_rwsem_trace(&sbi->cp_global_sem, sbi, LOCK_NAME_CP_GLOBAL);
init_f2fs_rwsem_trace(&sbi->node_write, sbi, LOCK_NAME_NODE_WRITE);
init_f2fs_rwsem_trace(&sbi->node_change, sbi, LOCK_NAME_NODE_CHANGE);
spin_lock_init(&sbi->stat_lock);
{ LOCK_NAME_CP_RWSEM, "cp_rwsem" }, \
{ LOCK_NAME_NODE_CHANGE, "node_change" }, \
{ LOCK_NAME_NODE_WRITE, "node_write" }, \
- { LOCK_NAME_GC_LOCK, "gc_lock" })
+ { LOCK_NAME_GC_LOCK, "gc_lock" }, \
+ { LOCK_NAME_CP_GLOBAL, "cp_global" })
struct f2fs_sb_info;
struct f2fs_io_info;