]> git.ipfire.org Git - people/ms/linux.git/blame - fs/f2fs/gc.c
f2fs: invalidate meta pages only for post_read required inode
[people/ms/linux.git] / fs / f2fs / gc.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
7bc09003
JK
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7bc09003
JK
7 */
8#include <linux/fs.h>
9#include <linux/module.h>
7bc09003
JK
10#include <linux/init.h>
11#include <linux/f2fs_fs.h>
12#include <linux/kthread.h>
13#include <linux/delay.h>
14#include <linux/freezer.h>
b4b10061 15#include <linux/sched/signal.h>
6691d940 16#include <linux/random.h>
4034247a 17#include <linux/sched/mm.h>
7bc09003
JK
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
22#include "gc.h"
52118743 23#include "iostat.h"
8e46b3ed 24#include <trace/events/f2fs.h>
7bc09003 25
093749e2
CY
26static struct kmem_cache *victim_entry_slab;
27
da52f8ad
JQ
28static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
7bc09003
JK
31static int gc_thread_func(void *data)
32{
33 struct f2fs_sb_info *sbi = data;
b59d0bae 34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
7bc09003 35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
5911d2d1 36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
b8c502b8 37 unsigned int wait_ms;
d147ea4a
JK
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
c58d7c55
JK
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
7bc09003 42
b59d0bae 43 wait_ms = gc_th->min_sleep_time;
7bc09003 44
1d7be270 45 set_freezable();
7bc09003 46 do {
5911d2d1 47 bool sync_mode, foreground = false;
bbbc34fd 48
1d7be270 49 wait_event_interruptible_timeout(*wq,
d9872a69 50 kthread_should_stop() || freezing(current) ||
5911d2d1 51 waitqueue_active(fggc_wq) ||
d9872a69 52 gc_th->gc_wake,
1d7be270
JK
53 msecs_to_jiffies(wait_ms));
54
5911d2d1
CY
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
56 foreground = true;
57
d9872a69
JK
58 /* give it a try one time */
59 if (gc_th->gc_wake)
60 gc_th->gc_wake = 0;
61
274bd9ba
CY
62 if (try_to_freeze()) {
63 stat_other_skip_bggc_count(sbi);
7bc09003 64 continue;
274bd9ba 65 }
7bc09003
JK
66 if (kthread_should_stop())
67 break;
68
d6212a5f 69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
88dd8934 70 increase_sleep_time(gc_th, &wait_ms);
274bd9ba 71 stat_other_skip_bggc_count(sbi);
d6212a5f
CL
72 continue;
73 }
74
55523519 75 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
c45d6002 76 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
0f348028 77 f2fs_stop_checkpoint(sbi, false);
55523519 78 }
0f348028 79
274bd9ba
CY
80 if (!sb_start_write_trylock(sbi->sb)) {
81 stat_other_skip_bggc_count(sbi);
dc6febb6 82 continue;
274bd9ba 83 }
dc6febb6 84
7bc09003
JK
85 /*
86 * [GC triggering condition]
87 * 0. GC is not conducted currently.
88 * 1. There are enough dirty segments.
89 * 2. IO subsystem is idle by checking the # of writeback pages.
90 * 3. IO subsystem is idle by checking the # of requests in
91 * bdev's request list.
92 *
e1c42045 93 * Note) We have to avoid triggering GCs frequently.
7bc09003
JK
94 * Because it is possible that some segments can be
95 * invalidated soon after by user update or deletion.
96 * So, I'd like to wait some time to collect dirty segments.
97 */
0e5e8111 98 if (sbi->gc_mode == GC_URGENT_HIGH) {
325163e9
DJ
99 spin_lock(&sbi->gc_urgent_high_lock);
100 if (sbi->gc_urgent_high_limited) {
101 if (!sbi->gc_urgent_high_remaining) {
102 sbi->gc_urgent_high_limited = false;
103 spin_unlock(&sbi->gc_urgent_high_lock);
104 sbi->gc_mode = GC_NORMAL;
105 continue;
106 }
107 sbi->gc_urgent_high_remaining--;
108 }
109 spin_unlock(&sbi->gc_urgent_high_lock);
d98af5f4 110 }
325163e9 111
d98af5f4
DJ
112 if (sbi->gc_mode == GC_URGENT_HIGH ||
113 sbi->gc_mode == GC_URGENT_MID) {
d9872a69 114 wait_ms = gc_th->urgent_sleep_time;
e4544b63 115 f2fs_down_write(&sbi->gc_lock);
d9872a69
JK
116 goto do_gc;
117 }
118
5911d2d1 119 if (foreground) {
e4544b63 120 f2fs_down_write(&sbi->gc_lock);
5911d2d1 121 goto do_gc;
e4544b63 122 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
274bd9ba 123 stat_other_skip_bggc_count(sbi);
69babac0 124 goto next;
274bd9ba 125 }
69babac0 126
a7d10cf3 127 if (!is_idle(sbi, GC_TIME)) {
88dd8934 128 increase_sleep_time(gc_th, &wait_ms);
e4544b63 129 f2fs_up_write(&sbi->gc_lock);
274bd9ba 130 stat_io_skip_bggc_count(sbi);
dc6febb6 131 goto next;
7bc09003
JK
132 }
133
134 if (has_enough_invalid_blocks(sbi))
88dd8934 135 decrease_sleep_time(gc_th, &wait_ms);
7bc09003 136 else
88dd8934 137 increase_sleep_time(gc_th, &wait_ms);
d9872a69 138do_gc:
5911d2d1
CY
139 if (!foreground)
140 stat_inc_bggc_count(sbi->stat_info);
7bc09003 141
bbbc34fd
CY
142 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
143
5911d2d1
CY
144 /* foreground GC was been triggered via f2fs_balance_fs() */
145 if (foreground)
146 sync_mode = false;
147
d147ea4a
JK
148 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
149 gc_control.no_bg_gc = foreground;
c81d5bae 150 gc_control.nr_free_secs = foreground ? 1 : 0;
d147ea4a 151
43727527 152 /* if return value is not zero, no victim was selected */
d147ea4a 153 if (f2fs_gc(sbi, &gc_control))
b59d0bae 154 wait_ms = gc_th->no_gc_sleep_time;
81eb8d6e 155
5911d2d1
CY
156 if (foreground)
157 wake_up_all(&gc_th->fggc_wq);
158
84e4214f
JK
159 trace_f2fs_background_gc(sbi->sb, wait_ms,
160 prefree_segments(sbi), free_segments(sbi));
161
4660f9c0 162 /* balancing f2fs's metadata periodically */
7bcd0cfa 163 f2fs_balance_fs_bg(sbi, true);
dc6febb6
CY
164next:
165 sb_end_write(sbi->sb);
81eb8d6e 166
7bc09003
JK
167 } while (!kthread_should_stop());
168 return 0;
169}
170
4d57b86d 171int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
7bc09003 172{
1042d60f 173 struct f2fs_gc_kthread *gc_th;
ec7b1f2d 174 dev_t dev = sbi->sb->s_bdev->bd_dev;
7a267f8d 175 int err = 0;
7bc09003 176
1ecc0c5c 177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
7a267f8d
NJ
178 if (!gc_th) {
179 err = -ENOMEM;
180 goto out;
181 }
7bc09003 182
d9872a69 183 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
b59d0bae
NJ
184 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
185 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
186 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
187
5f029c04 188 gc_th->gc_wake = 0;
d2dc095f 189
7bc09003
JK
190 sbi->gc_thread = gc_th;
191 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
5911d2d1 192 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
7bc09003 193 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
ec7b1f2d 194 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
7bc09003 195 if (IS_ERR(gc_th->f2fs_gc_task)) {
7a267f8d 196 err = PTR_ERR(gc_th->f2fs_gc_task);
c8eb7024 197 kfree(gc_th);
25718423 198 sbi->gc_thread = NULL;
7bc09003 199 }
7a267f8d
NJ
200out:
201 return err;
7bc09003
JK
202}
203
4d57b86d 204void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
7bc09003
JK
205{
206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
5f029c04 207
7bc09003
JK
208 if (!gc_th)
209 return;
210 kthread_stop(gc_th->f2fs_gc_task);
5911d2d1 211 wake_up_all(&gc_th->fggc_wq);
c8eb7024 212 kfree(gc_th);
7bc09003
JK
213 sbi->gc_thread = NULL;
214}
215
5b0e9539 216static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
7bc09003 217{
093749e2
CY
218 int gc_mode;
219
220 if (gc_type == BG_GC) {
221 if (sbi->am.atgc_enabled)
222 gc_mode = GC_AT;
223 else
224 gc_mode = GC_CB;
225 } else {
226 gc_mode = GC_GREEDY;
227 }
d2dc095f 228
5b0e9539
JK
229 switch (sbi->gc_mode) {
230 case GC_IDLE_CB:
231 gc_mode = GC_CB;
232 break;
233 case GC_IDLE_GREEDY:
0e5e8111 234 case GC_URGENT_HIGH:
b27bc809 235 gc_mode = GC_GREEDY;
5b0e9539 236 break;
093749e2
CY
237 case GC_IDLE_AT:
238 gc_mode = GC_AT;
239 break;
5b0e9539 240 }
093749e2 241
d2dc095f 242 return gc_mode;
7bc09003
JK
243}
244
245static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
246 int type, struct victim_sel_policy *p)
247{
248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
249
4ebefc44 250 if (p->alloc_mode == SSR) {
7bc09003 251 p->gc_mode = GC_GREEDY;
da52f8ad 252 p->dirty_bitmap = dirty_i->dirty_segmap[type];
a26b7c8a 253 p->max_search = dirty_i->nr_dirty[type];
7bc09003 254 p->ofs_unit = 1;
093749e2
CY
255 } else if (p->alloc_mode == AT_SSR) {
256 p->gc_mode = GC_GREEDY;
257 p->dirty_bitmap = dirty_i->dirty_segmap[type];
258 p->max_search = dirty_i->nr_dirty[type];
259 p->ofs_unit = 1;
7bc09003 260 } else {
5b0e9539 261 p->gc_mode = select_gc_type(sbi, gc_type);
7bc09003 262 p->ofs_unit = sbi->segs_per_sec;
da52f8ad
JQ
263 if (__is_large_section(sbi)) {
264 p->dirty_bitmap = dirty_i->dirty_secmap;
265 p->max_search = count_bits(p->dirty_bitmap,
266 0, MAIN_SECS(sbi));
267 } else {
268 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
269 p->max_search = dirty_i->nr_dirty[DIRTY];
270 }
7bc09003 271 }
a26b7c8a 272
7a88ddb5
CY
273 /*
274 * adjust candidates range, should select all dirty segments for
275 * foreground GC and urgent GC cases.
276 */
b27bc809 277 if (gc_type != FG_GC &&
0e5e8111 278 (sbi->gc_mode != GC_URGENT_HIGH) &&
093749e2 279 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
b27bc809 280 p->max_search > sbi->max_victim_search)
b1c57c1c 281 p->max_search = sbi->max_victim_search;
a26b7c8a 282
b94929d9 283 /* let's select beginning hot/small space first in no_heap mode*/
6691d940
DJ
284 if (f2fs_need_rand_seg(sbi))
285 p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
286 else if (test_opt(sbi, NOHEAP) &&
b94929d9 287 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
7a20b8a6
JK
288 p->offset = 0;
289 else
e066b83c 290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
7bc09003
JK
291}
292
293static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
294 struct victim_sel_policy *p)
295{
b7250d2d
JK
296 /* SSR allocates in a segment unit */
297 if (p->alloc_mode == SSR)
3519e3f9 298 return sbi->blocks_per_seg;
093749e2
CY
299 else if (p->alloc_mode == AT_SSR)
300 return UINT_MAX;
301
302 /* LFS */
7bc09003 303 if (p->gc_mode == GC_GREEDY)
c541a51b 304 return 2 * sbi->blocks_per_seg * p->ofs_unit;
7bc09003
JK
305 else if (p->gc_mode == GC_CB)
306 return UINT_MAX;
093749e2
CY
307 else if (p->gc_mode == GC_AT)
308 return UINT_MAX;
7bc09003
JK
309 else /* No other gc_mode */
310 return 0;
311}
312
313static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
314{
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f 316 unsigned int secno;
7bc09003
JK
317
318 /*
319 * If the gc_type is FG_GC, we can select victim segments
320 * selected by background GC before.
321 * Those segments guarantee they have small valid blocks.
322 */
7cd8558b 323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
5ec4e49f 324 if (sec_usage_check(sbi, secno))
b65ee148 325 continue;
5ec4e49f 326 clear_bit(secno, dirty_i->victim_secmap);
4ddb1a4d 327 return GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
328 }
329 return NULL_SEGNO;
330}
331
332static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
333{
334 struct sit_info *sit_i = SIT_I(sbi);
4ddb1a4d
JK
335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
337 unsigned long long mtime = 0;
338 unsigned int vblocks;
339 unsigned char age = 0;
340 unsigned char u;
341 unsigned int i;
de881df9 342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
7bc09003 343
de881df9 344 for (i = 0; i < usable_segs_per_sec; i++)
7bc09003 345 mtime += get_seg_entry(sbi, start + i)->mtime;
302bd348 346 vblocks = get_valid_blocks(sbi, segno, true);
7bc09003 347
de881df9
AR
348 mtime = div_u64(mtime, usable_segs_per_sec);
349 vblocks = div_u64(vblocks, usable_segs_per_sec);
7bc09003
JK
350
351 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
352
e1c42045 353 /* Handle if the system time has changed by the user */
7bc09003
JK
354 if (mtime < sit_i->min_mtime)
355 sit_i->min_mtime = mtime;
356 if (mtime > sit_i->max_mtime)
357 sit_i->max_mtime = mtime;
358 if (sit_i->max_mtime != sit_i->min_mtime)
359 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
360 sit_i->max_mtime - sit_i->min_mtime);
361
362 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
363}
364
a57e564d
JX
365static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
366 unsigned int segno, struct victim_sel_policy *p)
7bc09003
JK
367{
368 if (p->alloc_mode == SSR)
2afce76a 369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
7bc09003
JK
370
371 /* alloc_mode == LFS */
372 if (p->gc_mode == GC_GREEDY)
91f4382b 373 return get_valid_blocks(sbi, segno, true);
093749e2 374 else if (p->gc_mode == GC_CB)
7bc09003 375 return get_cb_cost(sbi, segno);
093749e2
CY
376
377 f2fs_bug_on(sbi, 1);
378 return 0;
7bc09003
JK
379}
380
688159b6
FL
381static unsigned int count_bits(const unsigned long *addr,
382 unsigned int offset, unsigned int len)
383{
384 unsigned int end = offset + len, sum = 0;
385
386 while (offset < end) {
387 if (test_bit(offset++, addr))
388 ++sum;
389 }
390 return sum;
391}
392
093749e2
CY
393static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
394 unsigned long long mtime, unsigned int segno,
395 struct rb_node *parent, struct rb_node **p,
396 bool left_most)
397{
398 struct atgc_management *am = &sbi->am;
399 struct victim_entry *ve;
400
32410577
CY
401 ve = f2fs_kmem_cache_alloc(victim_entry_slab,
402 GFP_NOFS, true, NULL);
093749e2
CY
403
404 ve->mtime = mtime;
405 ve->segno = segno;
406
407 rb_link_node(&ve->rb_node, parent, p);
408 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
409
410 list_add_tail(&ve->list, &am->victim_list);
411
412 am->victim_count++;
413
414 return ve;
415}
416
417static void insert_victim_entry(struct f2fs_sb_info *sbi,
418 unsigned long long mtime, unsigned int segno)
419{
420 struct atgc_management *am = &sbi->am;
421 struct rb_node **p;
422 struct rb_node *parent = NULL;
423 bool left_most = true;
424
425 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
426 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
427}
428
429static void add_victim_entry(struct f2fs_sb_info *sbi,
430 struct victim_sel_policy *p, unsigned int segno)
431{
432 struct sit_info *sit_i = SIT_I(sbi);
433 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
434 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
435 unsigned long long mtime = 0;
436 unsigned int i;
437
438 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
439 if (p->gc_mode == GC_AT &&
440 get_valid_blocks(sbi, segno, true) == 0)
441 return;
093749e2
CY
442 }
443
444 for (i = 0; i < sbi->segs_per_sec; i++)
445 mtime += get_seg_entry(sbi, start + i)->mtime;
446 mtime = div_u64(mtime, sbi->segs_per_sec);
447
448 /* Handle if the system time has changed by the user */
449 if (mtime < sit_i->min_mtime)
450 sit_i->min_mtime = mtime;
451 if (mtime > sit_i->max_mtime)
452 sit_i->max_mtime = mtime;
453 if (mtime < sit_i->dirty_min_mtime)
454 sit_i->dirty_min_mtime = mtime;
455 if (mtime > sit_i->dirty_max_mtime)
456 sit_i->dirty_max_mtime = mtime;
457
458 /* don't choose young section as candidate */
459 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
460 return;
461
462 insert_victim_entry(sbi, mtime, segno);
463}
464
465static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
466 struct victim_sel_policy *p)
467{
468 struct atgc_management *am = &sbi->am;
469 struct rb_node *parent = NULL;
470 bool left_most;
471
472 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
473
474 return parent;
475}
476
477static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
478 struct victim_sel_policy *p)
479{
480 struct sit_info *sit_i = SIT_I(sbi);
481 struct atgc_management *am = &sbi->am;
482 struct rb_root_cached *root = &am->root;
483 struct rb_node *node;
484 struct rb_entry *re;
485 struct victim_entry *ve;
486 unsigned long long total_time;
487 unsigned long long age, u, accu;
488 unsigned long long max_mtime = sit_i->dirty_max_mtime;
489 unsigned long long min_mtime = sit_i->dirty_min_mtime;
074b5ea2 490 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
093749e2
CY
491 unsigned int vblocks;
492 unsigned int dirty_threshold = max(am->max_candidate_count,
493 am->candidate_ratio *
494 am->victim_count / 100);
495 unsigned int age_weight = am->age_weight;
496 unsigned int cost;
497 unsigned int iter = 0;
498
499 if (max_mtime < min_mtime)
500 return;
501
502 max_mtime += 1;
503 total_time = max_mtime - min_mtime;
504
505 accu = div64_u64(ULLONG_MAX, total_time);
506 accu = min_t(unsigned long long, div_u64(accu, 100),
507 DEFAULT_ACCURACY_CLASS);
508
509 node = rb_first_cached(root);
510next:
511 re = rb_entry_safe(node, struct rb_entry, rb_node);
512 if (!re)
513 return;
514
515 ve = (struct victim_entry *)re;
516
517 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
518 goto skip;
519
520 /* age = 10000 * x% * 60 */
521 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
522 age_weight;
523
524 vblocks = get_valid_blocks(sbi, ve->segno, true);
525 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
526
527 /* u = 10000 * x% * 40 */
528 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
529 (100 - age_weight);
530
531 f2fs_bug_on(sbi, age + u >= UINT_MAX);
532
533 cost = UINT_MAX - (age + u);
534 iter++;
535
536 if (cost < p->min_cost ||
537 (cost == p->min_cost && age > p->oldest_age)) {
538 p->min_cost = cost;
539 p->oldest_age = age;
540 p->min_segno = ve->segno;
541 }
542skip:
543 if (iter < dirty_threshold) {
544 node = rb_next(node);
545 goto next;
546 }
547}
548
549/*
550 * select candidates around source section in range of
551 * [target - dirty_threshold, target + dirty_threshold]
552 */
553static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
554 struct victim_sel_policy *p)
555{
556 struct sit_info *sit_i = SIT_I(sbi);
557 struct atgc_management *am = &sbi->am;
558 struct rb_node *node;
559 struct rb_entry *re;
560 struct victim_entry *ve;
561 unsigned long long age;
562 unsigned long long max_mtime = sit_i->dirty_max_mtime;
563 unsigned long long min_mtime = sit_i->dirty_min_mtime;
564 unsigned int seg_blocks = sbi->blocks_per_seg;
565 unsigned int vblocks;
566 unsigned int dirty_threshold = max(am->max_candidate_count,
567 am->candidate_ratio *
568 am->victim_count / 100);
569 unsigned int cost;
570 unsigned int iter = 0;
571 int stage = 0;
572
573 if (max_mtime < min_mtime)
574 return;
575 max_mtime += 1;
576next_stage:
577 node = lookup_central_victim(sbi, p);
578next_node:
579 re = rb_entry_safe(node, struct rb_entry, rb_node);
580 if (!re) {
581 if (stage == 0)
582 goto skip_stage;
583 return;
584 }
585
586 ve = (struct victim_entry *)re;
587
588 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
589 goto skip_node;
590
591 age = max_mtime - ve->mtime;
592
593 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
594 f2fs_bug_on(sbi, !vblocks);
595
596 /* rare case */
597 if (vblocks == seg_blocks)
598 goto skip_node;
599
600 iter++;
601
602 age = max_mtime - abs(p->age - age);
603 cost = UINT_MAX - vblocks;
604
605 if (cost < p->min_cost ||
606 (cost == p->min_cost && age > p->oldest_age)) {
607 p->min_cost = cost;
608 p->oldest_age = age;
609 p->min_segno = ve->segno;
610 }
611skip_node:
612 if (iter < dirty_threshold) {
613 if (stage == 0)
614 node = rb_prev(node);
615 else if (stage == 1)
616 node = rb_next(node);
617 goto next_node;
618 }
619skip_stage:
620 if (stage < 1) {
621 stage++;
622 iter = 0;
623 goto next_stage;
624 }
625}
626static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
627 struct victim_sel_policy *p)
628{
629 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
630 &sbi->am.root, true));
631
632 if (p->gc_mode == GC_AT)
633 atgc_lookup_victim(sbi, p);
634 else if (p->alloc_mode == AT_SSR)
635 atssr_lookup_victim(sbi, p);
636 else
637 f2fs_bug_on(sbi, 1);
638}
639
640static void release_victim_entry(struct f2fs_sb_info *sbi)
641{
642 struct atgc_management *am = &sbi->am;
643 struct victim_entry *ve, *tmp;
644
645 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
646 list_del(&ve->list);
647 kmem_cache_free(victim_entry_slab, ve);
648 am->victim_count--;
649 }
650
651 am->root = RB_ROOT_CACHED;
652
653 f2fs_bug_on(sbi, am->victim_count);
654 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
655}
656
71419129
CY
657static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
658{
659 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
660 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
661
662 if (!dirty_i->enable_pin_section)
663 return false;
664 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
665 dirty_i->pinned_secmap_cnt++;
666 return true;
667}
668
669static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
670{
671 return dirty_i->pinned_secmap_cnt;
672}
673
674static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
675 unsigned int secno)
676{
677 return dirty_i->enable_pin_section &&
678 f2fs_pinned_section_exists(dirty_i) &&
679 test_bit(secno, dirty_i->pinned_secmap);
680}
681
682static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
683{
684 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
685
686 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
687 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
688 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
689 }
690 DIRTY_I(sbi)->enable_pin_section = enable;
691}
692
693static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
694 unsigned int segno)
695{
696 if (!f2fs_is_pinned_file(inode))
697 return 0;
698 if (gc_type != FG_GC)
699 return -EBUSY;
700 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
701 f2fs_pin_file_control(inode, true);
702 return -EAGAIN;
703}
704
0a8165d7 705/*
111d2495 706 * This function is called from two paths.
7bc09003
JK
707 * One is garbage collection and the other is SSR segment selection.
708 * When it is called during GC, it just gets a victim segment
709 * and it does not remove it from dirty seglist.
710 * When it is called from SSR segment selection, it finds a segment
711 * which has minimum valid blocks and removes it from dirty seglist.
712 */
713static int get_victim_by_default(struct f2fs_sb_info *sbi,
093749e2
CY
714 unsigned int *result, int gc_type, int type,
715 char alloc_mode, unsigned long long age)
7bc09003
JK
716{
717 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
e066b83c 718 struct sit_info *sm = SIT_I(sbi);
7bc09003 719 struct victim_sel_policy p;
3fa56503 720 unsigned int secno, last_victim;
04f0b2ea 721 unsigned int last_segment;
093749e2
CY
722 unsigned int nsearched;
723 bool is_atgc;
97767500 724 int ret = 0;
7bc09003 725
210f41bc 726 mutex_lock(&dirty_i->seglist_lock);
04f0b2ea 727 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
210f41bc 728
7bc09003 729 p.alloc_mode = alloc_mode;
093749e2
CY
730 p.age = age;
731 p.age_threshold = sbi->am.age_threshold;
7bc09003 732
093749e2
CY
733retry:
734 select_policy(sbi, gc_type, type, &p);
7bc09003 735 p.min_segno = NULL_SEGNO;
093749e2 736 p.oldest_age = 0;
3fa56503 737 p.min_cost = get_max_cost(sbi, &p);
7bc09003 738
093749e2
CY
739 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
740 nsearched = 0;
741
742 if (is_atgc)
743 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
744
e066b83c 745 if (*result != NULL_SEGNO) {
97767500
QZ
746 if (!get_valid_blocks(sbi, *result, false)) {
747 ret = -ENODATA;
748 goto out;
749 }
750
751 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
752 ret = -EBUSY;
753 else
e066b83c
JK
754 p.min_segno = *result;
755 goto out;
756 }
757
97767500 758 ret = -ENODATA;
3342bb30
CY
759 if (p.max_search == 0)
760 goto out;
761
e3080b01
CY
762 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
763 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
764 p.min_segno = sbi->next_victim_seg[BG_GC];
765 *result = p.min_segno;
766 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
767 goto got_result;
768 }
769 if (gc_type == FG_GC &&
770 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
771 p.min_segno = sbi->next_victim_seg[FG_GC];
772 *result = p.min_segno;
773 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
774 goto got_result;
775 }
776 }
777
e066b83c 778 last_victim = sm->last_victim[p.gc_mode];
7bc09003
JK
779 if (p.alloc_mode == LFS && gc_type == FG_GC) {
780 p.min_segno = check_bg_victims(sbi);
781 if (p.min_segno != NULL_SEGNO)
782 goto got_it;
783 }
784
785 while (1) {
da52f8ad
JQ
786 unsigned long cost, *dirty_bitmap;
787 unsigned int unit_no, segno;
788
789 dirty_bitmap = p.dirty_bitmap;
790 unit_no = find_next_bit(dirty_bitmap,
791 last_segment / p.ofs_unit,
792 p.offset / p.ofs_unit);
793 segno = unit_no * p.ofs_unit;
a43f7ec3 794 if (segno >= last_segment) {
e066b83c
JK
795 if (sm->last_victim[p.gc_mode]) {
796 last_segment =
797 sm->last_victim[p.gc_mode];
798 sm->last_victim[p.gc_mode] = 0;
7bc09003
JK
799 p.offset = 0;
800 continue;
801 }
802 break;
803 }
a57e564d
JX
804
805 p.offset = segno + p.ofs_unit;
da52f8ad 806 nsearched++;
688159b6 807
bbf9f7d9
ST
808#ifdef CONFIG_F2FS_CHECK_FS
809 /*
810 * skip selecting the invalid segno (that is failed due to block
811 * validity check failure during GC) to avoid endless GC loop in
812 * such cases.
813 */
814 if (test_bit(segno, sm->invalid_segmap))
815 goto next;
816#endif
817
4ddb1a4d 818 secno = GET_SEC_FROM_SEG(sbi, segno);
7bc09003 819
5ec4e49f 820 if (sec_usage_check(sbi, secno))
688159b6 821 goto next;
61461fc9 822
4354994f 823 /* Don't touch checkpointed data */
61461fc9
CY
824 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
825 if (p.alloc_mode == LFS) {
826 /*
827 * LFS is set to find source section during GC.
828 * The victim should have no checkpointed data.
829 */
830 if (get_ckpt_valid_blocks(sbi, segno, true))
831 goto next;
832 } else {
833 /*
834 * SSR | AT_SSR are set to find target segment
835 * for writes which can be full by checkpointed
836 * and newly written blocks.
837 */
838 if (!f2fs_segment_has_free_slot(sbi, segno))
839 goto next;
840 }
841 }
842
5ec4e49f 843 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
688159b6 844 goto next;
7bc09003 845
71419129
CY
846 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
847 goto next;
848
093749e2
CY
849 if (is_atgc) {
850 add_victim_entry(sbi, &p, segno);
851 goto next;
852 }
853
7bc09003
JK
854 cost = get_gc_cost(sbi, segno, &p);
855
856 if (p.min_cost > cost) {
857 p.min_segno = segno;
858 p.min_cost = cost;
a57e564d 859 }
688159b6
FL
860next:
861 if (nsearched >= p.max_search) {
e066b83c 862 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
da52f8ad
JQ
863 sm->last_victim[p.gc_mode] =
864 last_victim + p.ofs_unit;
4ce53776 865 else
da52f8ad 866 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
04f0b2ea
QS
867 sm->last_victim[p.gc_mode] %=
868 (MAIN_SECS(sbi) * sbi->segs_per_sec);
7bc09003
JK
869 break;
870 }
871 }
093749e2
CY
872
873 /* get victim for GC_AT/AT_SSR */
874 if (is_atgc) {
875 lookup_victim_by_age(sbi, &p);
876 release_victim_entry(sbi);
877 }
878
879 if (is_atgc && p.min_segno == NULL_SEGNO &&
880 sm->elapsed_time < p.age_threshold) {
881 p.age_threshold = 0;
882 goto retry;
883 }
884
7bc09003 885 if (p.min_segno != NULL_SEGNO) {
b2b3460a 886got_it:
e3080b01
CY
887 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
888got_result:
7bc09003 889 if (p.alloc_mode == LFS) {
4ddb1a4d 890 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
5ec4e49f
JK
891 if (gc_type == FG_GC)
892 sbi->cur_victim_sec = secno;
893 else
894 set_bit(secno, dirty_i->victim_secmap);
7bc09003 895 }
97767500 896 ret = 0;
8e46b3ed 897
e3c59108
ST
898 }
899out:
900 if (p.min_segno != NULL_SEGNO)
8e46b3ed
NJ
901 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
902 sbi->cur_victim_sec,
903 prefree_segments(sbi), free_segments(sbi));
7bc09003
JK
904 mutex_unlock(&dirty_i->seglist_lock);
905
97767500 906 return ret;
7bc09003
JK
907}
908
909static const struct victim_selection default_v_ops = {
910 .get_victim = get_victim_by_default,
911};
912
7dda2af8 913static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
7bc09003 914{
7bc09003
JK
915 struct inode_entry *ie;
916
7dda2af8
CL
917 ie = radix_tree_lookup(&gc_list->iroot, ino);
918 if (ie)
919 return ie->inode;
7bc09003
JK
920 return NULL;
921}
922
7dda2af8 923static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
7bc09003 924{
6cc4af56
GZ
925 struct inode_entry *new_ie;
926
7dda2af8 927 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
6cc4af56
GZ
928 iput(inode);
929 return;
7bc09003 930 }
32410577
CY
931 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
932 GFP_NOFS, true, NULL);
7bc09003 933 new_ie->inode = inode;
f28e5034
CY
934
935 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
7dda2af8 936 list_add_tail(&new_ie->list, &gc_list->ilist);
7bc09003
JK
937}
938
7dda2af8 939static void put_gc_inode(struct gc_inode_list *gc_list)
7bc09003
JK
940{
941 struct inode_entry *ie, *next_ie;
5f029c04 942
7dda2af8
CL
943 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
944 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
7bc09003
JK
945 iput(ie->inode);
946 list_del(&ie->list);
4d57b86d 947 kmem_cache_free(f2fs_inode_entry_slab, ie);
7bc09003
JK
948 }
949}
950
951static int check_valid_map(struct f2fs_sb_info *sbi,
952 unsigned int segno, int offset)
953{
954 struct sit_info *sit_i = SIT_I(sbi);
955 struct seg_entry *sentry;
956 int ret;
957
3d26fa6b 958 down_read(&sit_i->sentry_lock);
7bc09003
JK
959 sentry = get_seg_entry(sbi, segno);
960 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
3d26fa6b 961 up_read(&sit_i->sentry_lock);
43727527 962 return ret;
7bc09003
JK
963}
964
0a8165d7 965/*
7bc09003
JK
966 * This function compares node address got in summary with that in NAT.
967 * On validity, copy that node with cold status, otherwise (invalid node)
968 * ignore that.
969 */
48018b4c 970static int gc_node_segment(struct f2fs_sb_info *sbi,
7bc09003
JK
971 struct f2fs_summary *sum, unsigned int segno, int gc_type)
972{
7bc09003 973 struct f2fs_summary *entry;
26d58599 974 block_t start_addr;
7bc09003 975 int off;
7ea984b0 976 int phase = 0;
c29fd0c0 977 bool fggc = (gc_type == FG_GC);
48018b4c 978 int submitted = 0;
de881df9 979 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003 980
26d58599
JK
981 start_addr = START_BLOCK(sbi, segno);
982
7bc09003
JK
983next_step:
984 entry = sum;
c718379b 985
c29fd0c0
CY
986 if (fggc && phase == 2)
987 atomic_inc(&sbi->wb_sync_req[NODE]);
988
de881df9 989 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003
JK
990 nid_t nid = le32_to_cpu(entry->nid);
991 struct page *node_page;
26d58599 992 struct node_info ni;
48018b4c 993 int err;
7bc09003 994
43727527 995 /* stop BG_GC if there is not enough free sections. */
7f3037a5 996 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
48018b4c 997 return submitted;
7bc09003 998
43727527 999 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1000 continue;
1001
7ea984b0 1002 if (phase == 0) {
4d57b86d 1003 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1004 META_NAT, true);
1005 continue;
1006 }
1007
1008 if (phase == 1) {
4d57b86d 1009 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1010 continue;
1011 }
7ea984b0
CY
1012
1013 /* phase == 2 */
4d57b86d 1014 node_page = f2fs_get_node_page(sbi, nid);
7bc09003
JK
1015 if (IS_ERR(node_page))
1016 continue;
1017
4d57b86d 1018 /* block may become invalid during f2fs_get_node_page */
9a01b56b
HY
1019 if (check_valid_map(sbi, segno, off) == 0) {
1020 f2fs_put_page(node_page, 1);
1021 continue;
26d58599
JK
1022 }
1023
a9419b63 1024 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
7735730d
CY
1025 f2fs_put_page(node_page, 1);
1026 continue;
1027 }
1028
26d58599
JK
1029 if (ni.blk_addr != start_addr + off) {
1030 f2fs_put_page(node_page, 1);
1031 continue;
9a01b56b
HY
1032 }
1033
48018b4c
CY
1034 err = f2fs_move_node_page(node_page, gc_type);
1035 if (!err && gc_type == FG_GC)
1036 submitted++;
e1235983 1037 stat_inc_node_blk_count(sbi, 1, gc_type);
7bc09003 1038 }
c718379b 1039
7ea984b0 1040 if (++phase < 3)
7bc09003 1041 goto next_step;
c29fd0c0
CY
1042
1043 if (fggc)
1044 atomic_dec(&sbi->wb_sync_req[NODE]);
48018b4c 1045 return submitted;
7bc09003
JK
1046}
1047
0a8165d7 1048/*
9af45ef5
JK
1049 * Calculate start block index indicating the given node offset.
1050 * Be careful, caller should give this node offset only indicating direct node
1051 * blocks. If any node offsets, which point the other types of node blocks such
1052 * as indirect or double indirect node blocks, are given, it must be a caller's
1053 * bug.
7bc09003 1054 */
4d57b86d 1055block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
7bc09003 1056{
ce19a5d4
JK
1057 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1058 unsigned int bidx;
7bc09003 1059
ce19a5d4
JK
1060 if (node_ofs == 0)
1061 return 0;
7bc09003 1062
ce19a5d4 1063 if (node_ofs <= 2) {
7bc09003
JK
1064 bidx = node_ofs - 1;
1065 } else if (node_ofs <= indirect_blks) {
ce19a5d4 1066 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
5f029c04 1067
7bc09003
JK
1068 bidx = node_ofs - 2 - dec;
1069 } else {
ce19a5d4 1070 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
5f029c04 1071
7bc09003
JK
1072 bidx = node_ofs - 5 - dec;
1073 }
d02a6e61 1074 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
7bc09003
JK
1075}
1076
c1079892 1077static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7bc09003
JK
1078 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1079{
1080 struct page *node_page;
1081 nid_t nid;
1082 unsigned int ofs_in_node;
1083 block_t source_blkaddr;
1084
1085 nid = le32_to_cpu(sum->nid);
1086 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1087
4d57b86d 1088 node_page = f2fs_get_node_page(sbi, nid);
7bc09003 1089 if (IS_ERR(node_page))
c1079892 1090 return false;
7bc09003 1091
a9419b63 1092 if (f2fs_get_node_info(sbi, nid, dni, false)) {
7735730d
CY
1093 f2fs_put_page(node_page, 1);
1094 return false;
1095 }
7bc09003
JK
1096
1097 if (sum->version != dni->version) {
dcbb4c10
JP
1098 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1099 __func__);
c13ff37e 1100 set_sbi_flag(sbi, SBI_NEED_FSCK);
7bc09003
JK
1101 }
1102
6d18762e
CY
1103 if (f2fs_check_nid_range(sbi, dni->ino)) {
1104 f2fs_put_page(node_page, 1);
77900c45 1105 return false;
6d18762e 1106 }
77900c45 1107
7bc09003 1108 *nofs = ofs_of_node(node_page);
a2ced1ce 1109 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
7bc09003
JK
1110 f2fs_put_page(node_page, 1);
1111
bbf9f7d9
ST
1112 if (source_blkaddr != blkaddr) {
1113#ifdef CONFIG_F2FS_CHECK_FS
1114 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1115 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1116
1117 if (unlikely(check_valid_map(sbi, segno, offset))) {
1118 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
833dcd35
JP
1119 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1120 blkaddr, source_blkaddr, segno);
f6db4307 1121 set_sbi_flag(sbi, SBI_NEED_FSCK);
bbf9f7d9
ST
1122 }
1123 }
1124#endif
c1079892 1125 return false;
bbf9f7d9 1126 }
c1079892 1127 return true;
7bc09003
JK
1128}
1129
6aa58d8a
CY
1130static int ra_data_block(struct inode *inode, pgoff_t index)
1131{
1132 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1133 struct address_space *mapping = inode->i_mapping;
1134 struct dnode_of_data dn;
1135 struct page *page;
1136 struct extent_info ei = {0, 0, 0};
1137 struct f2fs_io_info fio = {
1138 .sbi = sbi,
1139 .ino = inode->i_ino,
1140 .type = DATA,
1141 .temp = COLD,
1142 .op = REQ_OP_READ,
1143 .op_flags = 0,
1144 .encrypted_page = NULL,
1145 .in_list = false,
1146 .retry = false,
1147 };
1148 int err;
1149
1150 page = f2fs_grab_cache_page(mapping, index, true);
1151 if (!page)
1152 return -ENOMEM;
1153
1154 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1155 dn.data_blkaddr = ei.blk + index - ei.fofs;
93770ab7
CY
1156 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1157 DATA_GENERIC_ENHANCE_READ))) {
10f966bb 1158 err = -EFSCORRUPTED;
93770ab7
CY
1159 goto put_page;
1160 }
6aa58d8a
CY
1161 goto got_it;
1162 }
1163
1164 set_new_dnode(&dn, inode, NULL, NULL, 0);
1165 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1166 if (err)
1167 goto put_page;
1168 f2fs_put_dnode(&dn);
1169
93770ab7
CY
1170 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1171 err = -ENOENT;
1172 goto put_page;
1173 }
6aa58d8a 1174 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
93770ab7 1175 DATA_GENERIC_ENHANCE))) {
10f966bb 1176 err = -EFSCORRUPTED;
6aa58d8a
CY
1177 goto put_page;
1178 }
1179got_it:
1180 /* read page */
1181 fio.page = page;
1182 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1183
9bf1a3f7
YS
1184 /*
1185 * don't cache encrypted data into meta inode until previous dirty
1186 * data were writebacked to avoid racing between GC and flush.
1187 */
bae0ee7a 1188 f2fs_wait_on_page_writeback(page, DATA, true, true);
9bf1a3f7
YS
1189
1190 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1191
6aa58d8a
CY
1192 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1193 dn.data_blkaddr,
1194 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1195 if (!fio.encrypted_page) {
1196 err = -ENOMEM;
1197 goto put_page;
1198 }
1199
1200 err = f2fs_submit_page_bio(&fio);
1201 if (err)
1202 goto put_encrypted_page;
1203 f2fs_put_page(fio.encrypted_page, 0);
1204 f2fs_put_page(page, 1);
8b83ac81
CY
1205
1206 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
9c122384 1207 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
8b83ac81 1208
6aa58d8a
CY
1209 return 0;
1210put_encrypted_page:
1211 f2fs_put_page(fio.encrypted_page, 1);
1212put_page:
1213 f2fs_put_page(page, 1);
1214 return err;
1215}
1216
d4c759ee
JK
1217/*
1218 * Move data block via META_MAPPING while keeping locked data page.
1219 * This can be used to move blocks, aka LBAs, directly on disk.
1220 */
48018b4c 1221static int move_data_block(struct inode *inode, block_t bidx,
2ef79ecb 1222 int gc_type, unsigned int segno, int off)
4375a336
JK
1223{
1224 struct f2fs_io_info fio = {
1225 .sbi = F2FS_I_SB(inode),
39d787be 1226 .ino = inode->i_ino,
4375a336 1227 .type = DATA,
a912b54d 1228 .temp = COLD,
04d328de 1229 .op = REQ_OP_READ,
70fd7614 1230 .op_flags = 0,
4375a336 1231 .encrypted_page = NULL,
fb830fc5 1232 .in_list = false,
fe16efe6 1233 .retry = false,
4375a336
JK
1234 };
1235 struct dnode_of_data dn;
1236 struct f2fs_summary sum;
1237 struct node_info ni;
6aa58d8a 1238 struct page *page, *mpage;
4356e48e 1239 block_t newaddr;
48018b4c 1240 int err = 0;
b0332a0f 1241 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
ac2d750b
WG
1242 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1243 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
093749e2 1244 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
4375a336
JK
1245
1246 /* do not read out */
a56c7c6f 1247 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
4375a336 1248 if (!page)
48018b4c 1249 return -ENOMEM;
4375a336 1250
48018b4c
CY
1251 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1252 err = -ENOENT;
20614711 1253 goto out;
48018b4c 1254 }
20614711 1255
71419129
CY
1256 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1257 if (err)
1ad71a27 1258 goto out;
1ad71a27 1259
4375a336 1260 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1261 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
4375a336
JK
1262 if (err)
1263 goto out;
1264
08b39fbd
CY
1265 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1266 ClearPageUptodate(page);
48018b4c 1267 err = -ENOENT;
4375a336 1268 goto put_out;
08b39fbd
CY
1269 }
1270
1271 /*
1272 * don't cache encrypted data into meta inode until previous dirty
1273 * data were writebacked to avoid racing between GC and flush.
1274 */
bae0ee7a 1275 f2fs_wait_on_page_writeback(page, DATA, true, true);
4375a336 1276
9bf1a3f7
YS
1277 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1278
a9419b63 1279 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
7735730d
CY
1280 if (err)
1281 goto put_out;
1282
4375a336
JK
1283 /* read page */
1284 fio.page = page;
7a9d7548 1285 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
4375a336 1286
107a805d 1287 if (lfs_mode)
e4544b63 1288 f2fs_down_write(&fio.sbi->io_order_lock);
107a805d 1289
543b8c46
JK
1290 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1291 fio.old_blkaddr, false);
d7cd3702
CY
1292 if (!mpage) {
1293 err = -ENOMEM;
543b8c46 1294 goto up_out;
d7cd3702 1295 }
543b8c46
JK
1296
1297 fio.encrypted_page = mpage;
1298
1299 /* read source block in mpage */
1300 if (!PageUptodate(mpage)) {
1301 err = f2fs_submit_page_bio(&fio);
1302 if (err) {
1303 f2fs_put_page(mpage, 1);
1304 goto up_out;
1305 }
8b83ac81
CY
1306
1307 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
9c122384 1308 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
8b83ac81 1309
543b8c46
JK
1310 lock_page(mpage);
1311 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1312 !PageUptodate(mpage))) {
1313 err = -EIO;
1314 f2fs_put_page(mpage, 1);
1315 goto up_out;
1316 }
1317 }
1318
cf740403
CY
1319 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1320
1321 /* allocate block address */
4d57b86d 1322 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
093749e2 1323 &sum, type, NULL);
4356e48e 1324
01eccef7
CY
1325 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1326 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
4356e48e
CY
1327 if (!fio.encrypted_page) {
1328 err = -ENOMEM;
6aa58d8a 1329 f2fs_put_page(mpage, 1);
543b8c46 1330 goto recover_block;
4356e48e 1331 }
548aedac 1332
543b8c46 1333 /* write target block */
bae0ee7a 1334 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
543b8c46
JK
1335 memcpy(page_address(fio.encrypted_page),
1336 page_address(mpage), PAGE_SIZE);
1337 f2fs_put_page(mpage, 1);
1338 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1339 fio.old_blkaddr, fio.old_blkaddr);
6ce19aff 1340 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
543b8c46 1341
8d64d365 1342 set_page_dirty(fio.encrypted_page);
6282adbf
JK
1343 if (clear_page_dirty_for_io(fio.encrypted_page))
1344 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1345
548aedac 1346 set_page_writeback(fio.encrypted_page);
17c50035 1347 ClearPageError(page);
4375a336 1348
04d328de 1349 fio.op = REQ_OP_WRITE;
70fd7614 1350 fio.op_flags = REQ_SYNC;
4356e48e 1351 fio.new_blkaddr = newaddr;
fe16efe6
CY
1352 f2fs_submit_page_write(&fio);
1353 if (fio.retry) {
48018b4c 1354 err = -EAGAIN;
a9d572c7
SY
1355 if (PageWriteback(fio.encrypted_page))
1356 end_page_writeback(fio.encrypted_page);
1357 goto put_page_out;
1358 }
4375a336 1359
b0af6d49
CY
1360 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1361
f28b3434 1362 f2fs_update_data_blkaddr(&dn, newaddr);
91942321 1363 set_inode_flag(inode, FI_APPEND_WRITE);
4375a336 1364 if (page->index == 0)
91942321 1365 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
548aedac 1366put_page_out:
4375a336 1367 f2fs_put_page(fio.encrypted_page, 1);
4356e48e
CY
1368recover_block:
1369 if (err)
4d57b86d 1370 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
c5d02785 1371 true, true, true);
543b8c46
JK
1372up_out:
1373 if (lfs_mode)
e4544b63 1374 f2fs_up_write(&fio.sbi->io_order_lock);
4375a336
JK
1375put_out:
1376 f2fs_put_dnode(&dn);
1377out:
1378 f2fs_put_page(page, 1);
48018b4c 1379 return err;
4375a336
JK
1380}
1381
48018b4c 1382static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
20614711 1383 unsigned int segno, int off)
7bc09003 1384{
c879f90d 1385 struct page *page;
48018b4c 1386 int err = 0;
c879f90d 1387
4d57b86d 1388 page = f2fs_get_lock_data_page(inode, bidx, true);
c879f90d 1389 if (IS_ERR(page))
48018b4c 1390 return PTR_ERR(page);
63a0b7cb 1391
48018b4c
CY
1392 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1393 err = -ENOENT;
20614711 1394 goto out;
48018b4c 1395 }
20614711 1396
71419129
CY
1397 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1398 if (err)
1ad71a27 1399 goto out;
5fe45743 1400
7bc09003 1401 if (gc_type == BG_GC) {
48018b4c
CY
1402 if (PageWriteback(page)) {
1403 err = -EAGAIN;
4ebefc44 1404 goto out;
48018b4c 1405 }
7bc09003 1406 set_page_dirty(page);
b763f3be 1407 set_page_private_gcing(page);
7bc09003 1408 } else {
c879f90d
JK
1409 struct f2fs_io_info fio = {
1410 .sbi = F2FS_I_SB(inode),
39d787be 1411 .ino = inode->i_ino,
c879f90d 1412 .type = DATA,
a912b54d 1413 .temp = COLD,
04d328de 1414 .op = REQ_OP_WRITE,
70fd7614 1415 .op_flags = REQ_SYNC,
e959c8f5 1416 .old_blkaddr = NULL_ADDR,
c879f90d 1417 .page = page,
4375a336 1418 .encrypted_page = NULL,
cc15620b 1419 .need_lock = LOCK_REQ,
b0af6d49 1420 .io_type = FS_GC_DATA_IO,
c879f90d 1421 };
72e1c797 1422 bool is_dirty = PageDirty(page);
72e1c797
CY
1423
1424retry:
bae0ee7a 1425 f2fs_wait_on_page_writeback(page, DATA, true, true);
8d64d365
CY
1426
1427 set_page_dirty(page);
933439c8 1428 if (clear_page_dirty_for_io(page)) {
a7ffdbe2 1429 inode_dec_dirty_pages(inode);
4d57b86d 1430 f2fs_remove_dirty_inode(inode);
933439c8 1431 }
72e1c797 1432
b763f3be 1433 set_page_private_gcing(page);
72e1c797 1434
4d57b86d 1435 err = f2fs_do_write_data_page(&fio);
14a28559 1436 if (err) {
b763f3be 1437 clear_page_private_gcing(page);
14a28559 1438 if (err == -ENOMEM) {
4034247a 1439 memalloc_retry_wait(GFP_NOFS);
14a28559
CY
1440 goto retry;
1441 }
1442 if (is_dirty)
1443 set_page_dirty(page);
72e1c797 1444 }
7bc09003
JK
1445 }
1446out:
1447 f2fs_put_page(page, 1);
48018b4c 1448 return err;
7bc09003
JK
1449}
1450
0a8165d7 1451/*
7bc09003
JK
1452 * This function tries to get parent node of victim data block, and identifies
1453 * data block validity. If the block is valid, copy that with cold status and
1454 * modify parent node.
1455 * If the parent node is not valid or the data block address is different,
1456 * the victim data block is ignored.
1457 */
48018b4c 1458static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7dede886
CY
1459 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1460 bool force_migrate)
7bc09003
JK
1461{
1462 struct super_block *sb = sbi->sb;
1463 struct f2fs_summary *entry;
1464 block_t start_addr;
43727527 1465 int off;
7bc09003 1466 int phase = 0;
48018b4c 1467 int submitted = 0;
de881df9 1468 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
7bc09003
JK
1469
1470 start_addr = START_BLOCK(sbi, segno);
1471
1472next_step:
1473 entry = sum;
c718379b 1474
de881df9 1475 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
7bc09003
JK
1476 struct page *data_page;
1477 struct inode *inode;
1478 struct node_info dni; /* dnode info for the data */
1479 unsigned int ofs_in_node, nofs;
1480 block_t start_bidx;
7ea984b0 1481 nid_t nid = le32_to_cpu(entry->nid);
7bc09003 1482
803e74be
JK
1483 /*
1484 * stop BG_GC if there is not enough free sections.
1485 * Or, stop GC if the segment becomes fully valid caused by
1486 * race condition along with SSR block allocation.
1487 */
1488 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
7dede886 1489 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
074b5ea2 1490 CAP_BLKS_PER_SEC(sbi)))
48018b4c 1491 return submitted;
7bc09003 1492
43727527 1493 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
1494 continue;
1495
1496 if (phase == 0) {
4d57b86d 1497 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
1498 META_NAT, true);
1499 continue;
1500 }
1501
1502 if (phase == 1) {
4d57b86d 1503 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
1504 continue;
1505 }
1506
1507 /* Get an inode by ino with checking validity */
c1079892 1508 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
7bc09003
JK
1509 continue;
1510
7ea984b0 1511 if (phase == 2) {
4d57b86d 1512 f2fs_ra_node_page(sbi, dni.ino);
7bc09003
JK
1513 continue;
1514 }
1515
7bc09003
JK
1516 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1517
7ea984b0 1518 if (phase == 3) {
71419129
CY
1519 int err;
1520
d4686d56 1521 inode = f2fs_iget(sb, dni.ino);
9056d648
CY
1522 if (IS_ERR(inode) || is_bad_inode(inode) ||
1523 special_file(inode->i_mode))
7bc09003
JK
1524 continue;
1525
71419129
CY
1526 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1527 if (err == -EAGAIN) {
a22bb552
CY
1528 iput(inode);
1529 return submitted;
1530 }
1531
e4544b63 1532 if (!f2fs_down_write_trylock(
b2532c69 1533 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
bb06664a 1534 iput(inode);
6f8d4455 1535 sbi->skipped_gc_rwsem++;
bb06664a
CY
1536 continue;
1537 }
1538
6aa58d8a
CY
1539 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1540 ofs_in_node;
1541
1542 if (f2fs_post_read_required(inode)) {
1543 int err = ra_data_block(inode, start_bidx);
1544
e4544b63 1545 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6aa58d8a
CY
1546 if (err) {
1547 iput(inode);
1548 continue;
1549 }
1550 add_gc_inode(gc_list, inode);
1551 continue;
1552 }
1553
4d57b86d 1554 data_page = f2fs_get_read_data_page(inode,
6aa58d8a 1555 start_bidx, REQ_RAHEAD, true);
e4544b63 1556 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
31a32688
CL
1557 if (IS_ERR(data_page)) {
1558 iput(inode);
1559 continue;
1560 }
7bc09003
JK
1561
1562 f2fs_put_page(data_page, 0);
7dda2af8 1563 add_gc_inode(gc_list, inode);
31a32688
CL
1564 continue;
1565 }
1566
7ea984b0 1567 /* phase 4 */
7dda2af8 1568 inode = find_gc_inode(gc_list, dni.ino);
31a32688 1569 if (inode) {
82e0a5aa
CY
1570 struct f2fs_inode_info *fi = F2FS_I(inode);
1571 bool locked = false;
48018b4c 1572 int err;
82e0a5aa
CY
1573
1574 if (S_ISREG(inode->i_mode)) {
e4544b63 1575 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
ad126ebd 1576 sbi->skipped_gc_rwsem++;
82e0a5aa 1577 continue;
ad126ebd 1578 }
e4544b63 1579 if (!f2fs_down_write_trylock(
b2532c69 1580 &fi->i_gc_rwsem[WRITE])) {
6f8d4455 1581 sbi->skipped_gc_rwsem++;
e4544b63 1582 f2fs_up_write(&fi->i_gc_rwsem[READ]);
82e0a5aa
CY
1583 continue;
1584 }
1585 locked = true;
73ac2f4e
CY
1586
1587 /* wait for all inflight aio data */
1588 inode_dio_wait(inode);
82e0a5aa
CY
1589 }
1590
4d57b86d 1591 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
c879f90d 1592 + ofs_in_node;
6dbb1796 1593 if (f2fs_post_read_required(inode))
48018b4c
CY
1594 err = move_data_block(inode, start_bidx,
1595 gc_type, segno, off);
4375a336 1596 else
48018b4c 1597 err = move_data_page(inode, start_bidx, gc_type,
d4c759ee 1598 segno, off);
82e0a5aa 1599
48018b4c
CY
1600 if (!err && (gc_type == FG_GC ||
1601 f2fs_post_read_required(inode)))
1602 submitted++;
1603
82e0a5aa 1604 if (locked) {
e4544b63
TM
1605 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1606 f2fs_up_write(&fi->i_gc_rwsem[READ]);
82e0a5aa
CY
1607 }
1608
e1235983 1609 stat_inc_data_blk_count(sbi, 1, gc_type);
7bc09003 1610 }
7bc09003 1611 }
c718379b 1612
7ea984b0 1613 if (++phase < 5)
7bc09003 1614 goto next_step;
48018b4c
CY
1615
1616 return submitted;
7bc09003
JK
1617}
1618
1619static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
8a2d0ace 1620 int gc_type)
7bc09003
JK
1621{
1622 struct sit_info *sit_i = SIT_I(sbi);
1623 int ret;
8a2d0ace 1624
3d26fa6b 1625 down_write(&sit_i->sentry_lock);
8a2d0ace 1626 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
093749e2 1627 NO_CHECK_TYPE, LFS, 0);
3d26fa6b 1628 up_write(&sit_i->sentry_lock);
7bc09003
JK
1629 return ret;
1630}
1631
718e53fa
CY
1632static int do_garbage_collect(struct f2fs_sb_info *sbi,
1633 unsigned int start_segno,
7dede886
CY
1634 struct gc_inode_list *gc_list, int gc_type,
1635 bool force_migrate)
7bc09003
JK
1636{
1637 struct page *sum_page;
1638 struct f2fs_summary_block *sum;
c718379b 1639 struct blk_plug plug;
718e53fa
CY
1640 unsigned int segno = start_segno;
1641 unsigned int end_segno = start_segno + sbi->segs_per_sec;
e3080b01 1642 int seg_freed = 0, migrated = 0;
718e53fa
CY
1643 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1644 SUM_TYPE_DATA : SUM_TYPE_NODE;
48018b4c 1645 int submitted = 0;
7bc09003 1646
e3080b01
CY
1647 if (__is_large_section(sbi))
1648 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1649
de881df9
AR
1650 /*
1651 * zone-capacity can be less than zone-size in zoned devices,
1652 * resulting in less than expected usable segments in the zone,
1653 * calculate the end segno in the zone which can be garbage collected
1654 */
1655 if (f2fs_sb_has_blkzoned(sbi))
1656 end_segno -= sbi->segs_per_sec -
1657 f2fs_usable_segs_in_sec(sbi, segno);
1658
093749e2
CY
1659 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1660
718e53fa 1661 /* readahead multi ssa blocks those have contiguous address */
2c70c5e3 1662 if (__is_large_section(sbi))
4d57b86d 1663 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
e3080b01 1664 end_segno - segno, META_SSA, true);
718e53fa
CY
1665
1666 /* reference all summary page */
1667 while (segno < end_segno) {
4d57b86d 1668 sum_page = f2fs_get_sum_page(sbi, segno++);
edc55aaf
JK
1669 if (IS_ERR(sum_page)) {
1670 int err = PTR_ERR(sum_page);
1671
1672 end_segno = segno - 1;
1673 for (segno = start_segno; segno < end_segno; segno++) {
1674 sum_page = find_get_page(META_MAPPING(sbi),
1675 GET_SUM_BLOCK(sbi, segno));
1676 f2fs_put_page(sum_page, 0);
1677 f2fs_put_page(sum_page, 0);
1678 }
1679 return err;
1680 }
718e53fa
CY
1681 unlock_page(sum_page);
1682 }
7bc09003 1683
c718379b
JK
1684 blk_start_plug(&plug);
1685
718e53fa 1686 for (segno = start_segno; segno < end_segno; segno++) {
aa987273 1687
718e53fa
CY
1688 /* find segment summary of victim */
1689 sum_page = find_get_page(META_MAPPING(sbi),
1690 GET_SUM_BLOCK(sbi, segno));
718e53fa 1691 f2fs_put_page(sum_page, 0);
7bc09003 1692
d6c66cd1
YS
1693 if (get_valid_blocks(sbi, segno, false) == 0)
1694 goto freed;
dabfbbc8 1695 if (gc_type == BG_GC && __is_large_section(sbi) &&
e3080b01
CY
1696 migrated >= sbi->migration_granularity)
1697 goto skip;
d6c66cd1 1698 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
e3080b01 1699 goto skip;
de0dcc40 1700
718e53fa 1701 sum = page_address(sum_page);
10d255c3 1702 if (type != GET_SUM_TYPE((&sum->footer))) {
dcbb4c10
JP
1703 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1704 segno, type, GET_SUM_TYPE((&sum->footer)));
10d255c3 1705 set_sbi_flag(sbi, SBI_NEED_FSCK);
793ab1c8 1706 f2fs_stop_checkpoint(sbi, false);
e3080b01 1707 goto skip;
10d255c3 1708 }
718e53fa
CY
1709
1710 /*
1711 * this is to avoid deadlock:
1712 * - lock_page(sum_page) - f2fs_replace_block
3d26fa6b
CY
1713 * - check_valid_map() - down_write(sentry_lock)
1714 * - down_read(sentry_lock) - change_curseg()
718e53fa
CY
1715 * - lock_page(sum_page)
1716 */
718e53fa 1717 if (type == SUM_TYPE_NODE)
48018b4c 1718 submitted += gc_node_segment(sbi, sum->entries, segno,
718e53fa 1719 gc_type);
48018b4c
CY
1720 else
1721 submitted += gc_data_segment(sbi, sum->entries, gc_list,
7dede886
CY
1722 segno, gc_type,
1723 force_migrate);
718e53fa
CY
1724
1725 stat_inc_seg_count(sbi, type, gc_type);
07c6b593 1726 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
8c7b9ac1 1727 migrated++;
c56f16da 1728
d6c66cd1 1729freed:
c56f16da
CY
1730 if (gc_type == FG_GC &&
1731 get_valid_blocks(sbi, segno, false) == 0)
1732 seg_freed++;
e3080b01
CY
1733
1734 if (__is_large_section(sbi) && segno + 1 < end_segno)
1735 sbi->next_victim_seg[gc_type] = segno + 1;
1736skip:
718e53fa
CY
1737 f2fs_put_page(sum_page, 0);
1738 }
1739
48018b4c 1740 if (submitted)
b9109b0e
JK
1741 f2fs_submit_merged_write(sbi,
1742 (type == SUM_TYPE_NODE) ? NODE : DATA);
c718379b 1743
718e53fa 1744 blk_finish_plug(&plug);
7bc09003 1745
17d899df
CY
1746 stat_inc_call_count(sbi->stat_info);
1747
c56f16da 1748 return seg_freed;
7bc09003
JK
1749}
1750
d147ea4a 1751int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
7bc09003 1752{
d147ea4a
JK
1753 int gc_type = gc_control->init_gc_type;
1754 unsigned int segno = gc_control->victim_segno;
c56f16da
CY
1755 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1756 int ret = 0;
d5053a34 1757 struct cp_control cpc;
7dda2af8
CL
1758 struct gc_inode_list gc_list = {
1759 .ilist = LIST_HEAD_INIT(gc_list.ilist),
f6bb2a2c 1760 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
7dda2af8 1761 };
2ef79ecb 1762 unsigned int skipped_round = 0, round = 0;
d5053a34 1763
d147ea4a 1764 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
c81d5bae 1765 gc_control->nr_free_secs,
c56f16da
CY
1766 get_pages(sbi, F2FS_DIRTY_NODES),
1767 get_pages(sbi, F2FS_DIRTY_DENTS),
1768 get_pages(sbi, F2FS_DIRTY_IMETA),
1769 free_sections(sbi),
1770 free_segments(sbi),
1771 reserved_segments(sbi),
1772 prefree_segments(sbi));
1773
119ee914 1774 cpc.reason = __get_cp_reason(sbi);
6f8d4455 1775 sbi->skipped_gc_rwsem = 0;
7bc09003 1776gc_more:
1751e8a6 1777 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
e5dbd956 1778 ret = -EINVAL;
408e9375 1779 goto stop;
e5dbd956 1780 }
6d5a1495
CY
1781 if (unlikely(f2fs_cp_error(sbi))) {
1782 ret = -EIO;
203681f6 1783 goto stop;
6d5a1495 1784 }
7bc09003 1785
19f4e688 1786 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
6e17bfbc 1787 /*
19f4e688
HP
1788 * For example, if there are many prefree_segments below given
1789 * threshold, we can make them free by checkpoint. Then, we
1790 * secure free segments which doesn't need fggc any more.
6e17bfbc 1791 */
d147ea4a 1792 if (prefree_segments(sbi)) {
4d57b86d 1793 ret = f2fs_write_checkpoint(sbi, &cpc);
8fd5a37e
JK
1794 if (ret)
1795 goto stop;
1796 }
19f4e688
HP
1797 if (has_not_enough_free_secs(sbi, 0, 0))
1798 gc_type = FG_GC;
d64f8047 1799 }
7bc09003 1800
19f4e688 1801 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
d147ea4a 1802 if (gc_type == BG_GC && gc_control->no_bg_gc) {
c56f16da 1803 ret = -EINVAL;
19f4e688 1804 goto stop;
c56f16da 1805 }
71419129 1806retry:
97767500 1807 ret = __get_victim(sbi, &segno, gc_type);
71419129
CY
1808 if (ret) {
1809 /* allow to search victim from sections has pinned data */
1810 if (ret == -ENODATA && gc_type == FG_GC &&
1811 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1812 f2fs_unpin_all_sections(sbi, false);
1813 goto retry;
1814 }
408e9375 1815 goto stop;
71419129 1816 }
7bc09003 1817
d147ea4a
JK
1818 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1819 gc_control->should_migrate_blocks);
c56f16da 1820 total_freed += seg_freed;
43727527 1821
d147ea4a
JK
1822 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1823 sec_freed++;
2ef79ecb 1824
10d0786b 1825 if (gc_type == FG_GC)
5ec4e49f 1826 sbi->cur_victim_sec = NULL_SEGNO;
43727527 1827
c81d5bae
JK
1828 if (gc_control->init_gc_type == FG_GC ||
1829 !has_not_enough_free_secs(sbi,
1830 (gc_type == FG_GC) ? sec_freed : 0, 0)) {
1831 if (gc_type == FG_GC && sec_freed < gc_control->nr_free_secs)
1832 goto go_gc_more;
a9163b94 1833 goto stop;
c81d5bae 1834 }
43727527 1835
d147ea4a
JK
1836 /* FG_GC stops GC by skip_count */
1837 if (gc_type == FG_GC) {
1838 if (sbi->skipped_gc_rwsem)
1839 skipped_round++;
1840 round++;
1841 if (skipped_round > MAX_SKIP_GC_COUNT &&
1842 skipped_round * 2 >= round) {
4d57b86d 1843 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a 1844 goto stop;
a9163b94 1845 }
a9163b94 1846 }
d147ea4a
JK
1847
1848 /* Write checkpoint to reclaim prefree segments */
1849 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1850 prefree_segments(sbi)) {
a9163b94 1851 ret = f2fs_write_checkpoint(sbi, &cpc);
d147ea4a
JK
1852 if (ret)
1853 goto stop;
1854 }
c81d5bae 1855go_gc_more:
d147ea4a
JK
1856 segno = NULL_SEGNO;
1857 goto gc_more;
1858
408e9375 1859stop:
e066b83c 1860 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
d147ea4a 1861 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
c56f16da 1862
71419129
CY
1863 if (gc_type == FG_GC)
1864 f2fs_unpin_all_sections(sbi, true);
1865
c56f16da
CY
1866 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1867 get_pages(sbi, F2FS_DIRTY_NODES),
1868 get_pages(sbi, F2FS_DIRTY_DENTS),
1869 get_pages(sbi, F2FS_DIRTY_IMETA),
1870 free_sections(sbi),
1871 free_segments(sbi),
1872 reserved_segments(sbi),
1873 prefree_segments(sbi));
1874
e4544b63 1875 f2fs_up_write(&sbi->gc_lock);
7bc09003 1876
7dda2af8 1877 put_gc_inode(&gc_list);
d530d4d8 1878
d147ea4a 1879 if (gc_control->err_gc_skipped && !ret)
d530d4d8 1880 ret = sec_freed ? 0 : -EAGAIN;
43727527 1881 return ret;
7bc09003
JK
1882}
1883
093749e2
CY
1884int __init f2fs_create_garbage_collection_cache(void)
1885{
1886 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1887 sizeof(struct victim_entry));
1888 if (!victim_entry_slab)
1889 return -ENOMEM;
1890 return 0;
1891}
1892
1893void f2fs_destroy_garbage_collection_cache(void)
1894{
1895 kmem_cache_destroy(victim_entry_slab);
1896}
1897
1898static void init_atgc_management(struct f2fs_sb_info *sbi)
1899{
1900 struct atgc_management *am = &sbi->am;
1901
1902 if (test_opt(sbi, ATGC) &&
1903 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1904 am->atgc_enabled = true;
1905
1906 am->root = RB_ROOT_CACHED;
1907 INIT_LIST_HEAD(&am->victim_list);
1908 am->victim_count = 0;
1909
1910 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1911 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1912 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
89e53ff1 1913 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
093749e2
CY
1914}
1915
4d57b86d 1916void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
7bc09003
JK
1917{
1918 DIRTY_I(sbi)->v_ops = &default_v_ops;
e93b9865 1919
1ad71a27 1920 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
d5793249
JK
1921
1922 /* give warm/cold data area from slower device */
0916878d 1923 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
d5793249
JK
1924 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1925 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
093749e2
CY
1926
1927 init_atgc_management(sbi);
7bc09003 1928}
04f0b2ea 1929
b4b10061
JK
1930static int free_segment_range(struct f2fs_sb_info *sbi,
1931 unsigned int secs, bool gc_only)
04f0b2ea 1932{
b4b10061
JK
1933 unsigned int segno, next_inuse, start, end;
1934 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1935 int gc_mode, gc_type;
04f0b2ea 1936 int err = 0;
b4b10061
JK
1937 int type;
1938
1939 /* Force block allocation for GC */
1940 MAIN_SECS(sbi) -= secs;
1941 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1942 end = MAIN_SEGS(sbi) - 1;
1943
1944 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1945 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1946 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1947 SIT_I(sbi)->last_victim[gc_mode] = 0;
1948
1949 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1950 if (sbi->next_victim_seg[gc_type] >= start)
1951 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1952 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
04f0b2ea
QS
1953
1954 /* Move out cursegs from the target range */
d0b9e42a 1955 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
0ef81833 1956 f2fs_allocate_segment_for_resize(sbi, type, start, end);
04f0b2ea
QS
1957
1958 /* do GC to move out valid blocks in the range */
1959 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1960 struct gc_inode_list gc_list = {
1961 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1962 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1963 };
1964
7dede886 1965 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
04f0b2ea
QS
1966 put_gc_inode(&gc_list);
1967
b4b10061
JK
1968 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1969 err = -EAGAIN;
1970 goto out;
1971 }
1972 if (fatal_signal_pending(current)) {
1973 err = -ERESTARTSYS;
1974 goto out;
1975 }
04f0b2ea 1976 }
b4b10061
JK
1977 if (gc_only)
1978 goto out;
04f0b2ea 1979
b4b10061 1980 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea 1981 if (err)
b4b10061 1982 goto out;
04f0b2ea
QS
1983
1984 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1985 if (next_inuse <= end) {
dcbb4c10
JP
1986 f2fs_err(sbi, "segno %u should be free but still inuse!",
1987 next_inuse);
04f0b2ea
QS
1988 f2fs_bug_on(sbi, 1);
1989 }
b4b10061
JK
1990out:
1991 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
1992 return err;
1993}
1994
1995static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1996{
1997 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
a4ba5dfc
CY
1998 int section_count;
1999 int segment_count;
2000 int segment_count_main;
2001 long long block_count;
04f0b2ea
QS
2002 int segs = secs * sbi->segs_per_sec;
2003
e4544b63 2004 f2fs_down_write(&sbi->sb_lock);
a4ba5dfc
CY
2005
2006 section_count = le32_to_cpu(raw_sb->section_count);
2007 segment_count = le32_to_cpu(raw_sb->segment_count);
2008 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2009 block_count = le64_to_cpu(raw_sb->block_count);
2010
04f0b2ea
QS
2011 raw_sb->section_count = cpu_to_le32(section_count + secs);
2012 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2013 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2014 raw_sb->block_count = cpu_to_le64(block_count +
2015 (long long)segs * sbi->blocks_per_seg);
46d9ce19
QS
2016 if (f2fs_is_multi_device(sbi)) {
2017 int last_dev = sbi->s_ndevs - 1;
2018 int dev_segs =
2019 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2020
2021 raw_sb->devs[last_dev].total_segments =
2022 cpu_to_le32(dev_segs + segs);
2023 }
a4ba5dfc 2024
e4544b63 2025 f2fs_up_write(&sbi->sb_lock);
04f0b2ea
QS
2026}
2027
2028static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2029{
2030 int segs = secs * sbi->segs_per_sec;
46d9ce19 2031 long long blks = (long long)segs * sbi->blocks_per_seg;
04f0b2ea
QS
2032 long long user_block_count =
2033 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2034
2035 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2036 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
b4b10061 2037 MAIN_SECS(sbi) += secs;
04f0b2ea
QS
2038 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2039 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
46d9ce19
QS
2040 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2041
2042 if (f2fs_is_multi_device(sbi)) {
2043 int last_dev = sbi->s_ndevs - 1;
2044
2045 FDEV(last_dev).total_segments =
2046 (int)FDEV(last_dev).total_segments + segs;
2047 FDEV(last_dev).end_blk =
2048 (long long)FDEV(last_dev).end_blk + blks;
2049#ifdef CONFIG_BLK_DEV_ZONED
2050 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2051 (int)(blks >> sbi->log_blocks_per_blkz);
2052#endif
2053 }
04f0b2ea
QS
2054}
2055
2056int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
2057{
2058 __u64 old_block_count, shrunk_blocks;
b4b10061 2059 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
04f0b2ea 2060 unsigned int secs;
04f0b2ea
QS
2061 int err = 0;
2062 __u32 rem;
2063
2064 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2065 if (block_count > old_block_count)
2066 return -EINVAL;
2067
46d9ce19
QS
2068 if (f2fs_is_multi_device(sbi)) {
2069 int last_dev = sbi->s_ndevs - 1;
2070 __u64 last_segs = FDEV(last_dev).total_segments;
2071
2072 if (block_count + last_segs * sbi->blocks_per_seg <=
2073 old_block_count)
2074 return -EINVAL;
2075 }
2076
04f0b2ea
QS
2077 /* new fs size should align to section size */
2078 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2079 if (rem)
2080 return -EINVAL;
2081
2082 if (block_count == old_block_count)
2083 return 0;
2084
2085 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
dcbb4c10 2086 f2fs_err(sbi, "Should run fsck to repair first.");
10f966bb 2087 return -EFSCORRUPTED;
04f0b2ea
QS
2088 }
2089
2090 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
dcbb4c10 2091 f2fs_err(sbi, "Checkpoint should be enabled.");
04f0b2ea
QS
2092 return -EINVAL;
2093 }
2094
04f0b2ea
QS
2095 shrunk_blocks = old_block_count - block_count;
2096 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
b4b10061
JK
2097
2098 /* stop other GC */
e4544b63 2099 if (!f2fs_down_write_trylock(&sbi->gc_lock))
b4b10061
JK
2100 return -EAGAIN;
2101
2102 /* stop CP to protect MAIN_SEC in free_segment_range */
2103 f2fs_lock_op(sbi);
3ab0598e
CY
2104
2105 spin_lock(&sbi->stat_lock);
2106 if (shrunk_blocks + valid_user_blocks(sbi) +
2107 sbi->current_reserved_blocks + sbi->unusable_block_count +
2108 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2109 err = -ENOSPC;
2110 spin_unlock(&sbi->stat_lock);
2111
2112 if (err)
2113 goto out_unlock;
2114
b4b10061 2115 err = free_segment_range(sbi, secs, true);
3ab0598e
CY
2116
2117out_unlock:
b4b10061 2118 f2fs_unlock_op(sbi);
e4544b63 2119 f2fs_up_write(&sbi->gc_lock);
b4b10061
JK
2120 if (err)
2121 return err;
2122
2123 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2124
2125 freeze_super(sbi->sb);
e4544b63
TM
2126 f2fs_down_write(&sbi->gc_lock);
2127 f2fs_down_write(&sbi->cp_global_sem);
b4b10061 2128
04f0b2ea
QS
2129 spin_lock(&sbi->stat_lock);
2130 if (shrunk_blocks + valid_user_blocks(sbi) +
2131 sbi->current_reserved_blocks + sbi->unusable_block_count +
2132 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2133 err = -ENOSPC;
2134 else
2135 sbi->user_block_count -= shrunk_blocks;
2136 spin_unlock(&sbi->stat_lock);
b4b10061
JK
2137 if (err)
2138 goto out_err;
04f0b2ea 2139
b4b10061 2140 err = free_segment_range(sbi, secs, false);
04f0b2ea 2141 if (err)
b4b10061 2142 goto recover_out;
04f0b2ea
QS
2143
2144 update_sb_metadata(sbi, -secs);
2145
2146 err = f2fs_commit_super(sbi, false);
2147 if (err) {
2148 update_sb_metadata(sbi, secs);
b4b10061 2149 goto recover_out;
04f0b2ea
QS
2150 }
2151
2152 update_fs_metadata(sbi, -secs);
2153 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
68275682 2154 set_sbi_flag(sbi, SBI_IS_DIRTY);
68275682 2155
b4b10061 2156 err = f2fs_write_checkpoint(sbi, &cpc);
04f0b2ea
QS
2157 if (err) {
2158 update_fs_metadata(sbi, secs);
2159 update_sb_metadata(sbi, secs);
2160 f2fs_commit_super(sbi, false);
2161 }
b4b10061 2162recover_out:
04f0b2ea
QS
2163 if (err) {
2164 set_sbi_flag(sbi, SBI_NEED_FSCK);
dcbb4c10 2165 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
04f0b2ea 2166
04f0b2ea
QS
2167 spin_lock(&sbi->stat_lock);
2168 sbi->user_block_count += shrunk_blocks;
2169 spin_unlock(&sbi->stat_lock);
2170 }
b4b10061 2171out_err:
e4544b63
TM
2172 f2fs_up_write(&sbi->cp_global_sem);
2173 f2fs_up_write(&sbi->gc_lock);
b4b10061 2174 thaw_super(sbi->sb);
04f0b2ea 2175 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
04f0b2ea
QS
2176 return err;
2177}