Use f2fs_{down,up}_write_trace for gc_lock to trace lock elapsed time.
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
{
struct cp_control cpc = { .reason = CP_SYNC, };
+ struct f2fs_lock_context lc;
int err;
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
err = f2fs_write_checkpoint(sbi, &cpc);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
return err;
}
cpc.reason = __get_cp_reason(sbi);
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC ||
sbi->umount_lock_holder == current) {
+ struct f2fs_lock_context lc;
int ret;
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
ret = f2fs_write_checkpoint(sbi, &cpc);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
return ret;
}
LOCK_NAME_CP_RWSEM,
LOCK_NAME_NODE_CHANGE,
LOCK_NAME_NODE_WRITE,
+ LOCK_NAME_GC_LOCK,
};
/*
unsigned long long age_threshold; /* age threshold */
};
-struct f2fs_gc_control {
- unsigned int victim_segno; /* target victim segment number */
- int init_gc_type; /* FG_GC or BG_GC */
- bool no_bg_gc; /* check the space and stop bg_gc */
- bool should_migrate_blocks; /* should migrate blocks */
- bool err_gc_skipped; /* return EAGAIN if GC skipped */
- bool one_time; /* require one time GC in one migration unit */
- unsigned int nr_free_secs; /* # of free sections to do GC */
-};
-
struct f2fs_time_stat {
unsigned long long total_time; /* total wall clock time */
#ifdef CONFIG_64BIT
bool lock_trace;
};
+struct f2fs_gc_control {
+ unsigned int victim_segno; /* target victim segment number */
+ int init_gc_type; /* FG_GC or BG_GC */
+ bool no_bg_gc; /* check the space and stop bg_gc */
+ bool should_migrate_blocks; /* should migrate blocks */
+ bool err_gc_skipped; /* return EAGAIN if GC skipped */
+ bool one_time; /* require one time GC in one migration unit */
+ unsigned int nr_free_secs; /* # of free sections to do GC */
+ struct f2fs_lock_context lc; /* lock context for gc_lock */
+};
+
/*
* For s_flag in struct f2fs_sb_info
* Modification on enum should be synchronized with s_flag array
if (has_not_enough_free_secs(sbi, 0,
sbi->reserved_pin_section)) {
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err && err != -ENODATA) {
return ret;
if (!sync) {
- if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock_trace(&sbi->gc_lock,
+ &gc_control.lc)) {
ret = -EBUSY;
goto out;
}
} else {
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
}
gc_control.init_gc_type = sync ? FG_GC : BG_GC;
do_more:
if (!range->sync) {
- if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock_trace(&sbi->gc_lock, &gc_control.lc)) {
ret = -EBUSY;
goto out;
}
} else {
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
}
gc_control.victim_segno = GET_SEGNO(sbi, range->start);
end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) {
- if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock_trace(&sbi->gc_lock, &gc_control.lc)) {
ret = -EBUSY;
goto out;
}
if (sbi->gc_mode == GC_URGENT_HIGH ||
sbi->gc_mode == GC_URGENT_MID) {
wait_ms = gc_th->urgent_sleep_time;
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
goto do_gc;
}
if (foreground) {
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
goto do_gc;
- } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ } else if (!f2fs_down_write_trylock_trace(&sbi->gc_lock,
+ &gc_control.lc)) {
stat_other_skip_bggc_count(sbi);
goto next;
}
if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &gc_control.lc);
stat_io_skip_bggc_count(sbi);
goto next;
}
if (has_enough_free_blocks(sbi,
gc_th->no_zoned_gc_percent)) {
wait_ms = gc_th->no_gc_sleep_time;
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock,
+ &gc_control.lc);
goto next;
}
if (wait_ms == gc_th->no_gc_sleep_time)
reserved_segments(sbi),
prefree_segments(sbi));
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &gc_control->lc);
put_gc_inode(&gc_list);
__u64 old_block_count, shrunk_blocks;
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
struct f2fs_lock_context lc;
+ struct f2fs_lock_context glc;
unsigned int secs;
int err = 0;
__u32 rem;
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
/* stop other GC */
- if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
+ if (!f2fs_down_write_trylock_trace(&sbi->gc_lock, &glc)) {
err = -EAGAIN;
goto out_drop_write;
}
out_unlock:
f2fs_unlock_op(sbi, &lc);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &glc);
out_drop_write:
mnt_drop_write_file(filp);
if (err)
return -EROFS;
}
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &glc);
f2fs_down_write(&sbi->cp_global_sem);
spin_lock(&sbi->stat_lock);
}
out_err:
f2fs_up_write(&sbi->cp_global_sem);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &glc);
thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
return err;
}
.should_migrate_blocks = false,
.err_gc_skipped = false,
.nr_free_secs = 1 };
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
stat_inc_gc_call_count(sbi, FOREGROUND);
f2fs_gc(sbi, &gc_control);
}
f2fs_unlock_op(sbi, &lc);
if (f2fs_sb_has_blkzoned(sbi) && err == -EAGAIN && gc_required) {
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
err = f2fs_gc_range(sbi, 0, sbi->first_seq_zone_segno - 1,
true, ZONED_PIN_SEC_REQUIRED_COUNT);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
gc_required = false;
if (!err)
block_t start_block, end_block;
struct cp_control cpc;
struct discard_policy dpolicy;
+ struct f2fs_lock_context lc;
unsigned long long trimmed = 0;
int err = 0;
bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
if (sbi->discard_blks == 0)
goto out;
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
if (err)
goto out;
{
unsigned int s_flags = sbi->sb->s_flags;
struct cp_control cpc;
+ struct f2fs_lock_context lc;
unsigned int gc_mode = sbi->gc_mode;
int err = 0;
int ret;
.no_bg_gc = true,
.nr_free_secs = 1 };
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err == -ENODATA) {
}
skip_gc:
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
stat_inc_cp_call_count(sbi, TOTAL_CALL);
spin_unlock(&sbi->stat_lock);
out_unlock:
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
restore_flag:
sbi->gc_mode = gc_mode;
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
long long start, writeback, lock, sync_inode, end;
int ret;
+ struct f2fs_lock_context lc;
f2fs_info(sbi, "%s start, meta: %lld, node: %lld, data: %lld",
__func__,
sync_inode = ktime_get();
- f2fs_down_write(&sbi->gc_lock);
+ f2fs_down_write_trace(&sbi->gc_lock, &lc);
f2fs_dirty_to_prefree(sbi);
clear_sbi_flag(sbi, SBI_CP_DISABLED);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- f2fs_up_write(&sbi->gc_lock);
+ f2fs_up_write_trace(&sbi->gc_lock, &lc);
f2fs_info(sbi, "%s sync_fs, meta: %lld, imeta: %lld, node: %lld, dents: %lld, qdata: %lld",
__func__,
sbi->sb = sb;
/* initialize locks within allocated memory */
- init_f2fs_rwsem(&sbi->gc_lock);
+ init_f2fs_rwsem_trace(&sbi->gc_lock, sbi, LOCK_NAME_GC_LOCK);
mutex_init(&sbi->writepages);
init_f2fs_rwsem(&sbi->cp_global_sem);
init_f2fs_rwsem_trace(&sbi->node_write, sbi, LOCK_NAME_NODE_WRITE);
__print_symbolic(lock, \
{ LOCK_NAME_CP_RWSEM, "cp_rwsem" }, \
{ LOCK_NAME_NODE_CHANGE, "node_change" }, \
- { LOCK_NAME_NODE_WRITE, "node_write" })
+ { LOCK_NAME_NODE_WRITE, "node_write" }, \
+ { LOCK_NAME_GC_LOCK, "gc_lock" })
struct f2fs_sb_info;
struct f2fs_io_info;