continue;
}
+ gc_control.one_time = false;
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
wait_ms = gc_th->max_sleep_time;
}
- if (need_to_boost_gc(sbi))
+ if (need_to_boost_gc(sbi)) {
decrease_sleep_time(gc_th, &wait_ms);
- else
+ if (f2fs_sb_has_blkzoned(sbi))
+ gc_control.one_time = true;
+ } else {
increase_sleep_time(gc_th, &wait_ms);
+ }
do_gc:
stat_inc_gc_call_count(sbi, foreground ?
FOREGROUND : BACKGROUND);
- sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
+ gc_control.one_time;
/* foreground GC was been triggered via f2fs_balance_fs() */
if (foreground)
static int do_garbage_collect(struct f2fs_sb_info *sbi,
unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type,
- bool force_migrate)
+ bool force_migrate, bool one_time)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
sec_end_segno -= SEGS_PER_SEC(sbi) -
f2fs_usable_segs_in_sec(sbi, segno);
- if (gc_type == BG_GC) {
+ if (gc_type == BG_GC || one_time) {
unsigned int window_granularity =
sbi->migration_window_granularity;
}
seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
- gc_control->should_migrate_blocks);
+ gc_control->should_migrate_blocks,
+ gc_control->one_time);
if (seg_freed < 0)
goto stop;
total_sec_freed++;
}
+ if (gc_control->one_time)
+ goto stop;
+
if (gc_type == FG_GC) {
sbi->cur_victim_sec = NULL_SEGNO;
};
do_garbage_collect(sbi, segno, &gc_list, FG_GC,
- dry_run_sections == 0);
+ dry_run_sections == 0, false);
put_gc_inode(&gc_list);
if (!dry_run && get_valid_blocks(sbi, segno, true))