]> git.ipfire.org Git - people/ms/linux.git/blame - fs/f2fs/gc.c
f2fs: optimize the init_dirty_segmap function
[people/ms/linux.git] / fs / f2fs / gc.c
CommitLineData
0a8165d7 1/*
7bc09003
JK
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
7bc09003
JK
14#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
19#include <linux/blkdev.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "gc.h"
8e46b3ed 25#include <trace/events/f2fs.h>
7bc09003
JK
26
27static struct kmem_cache *winode_slab;
28
29static int gc_thread_func(void *data)
30{
31 struct f2fs_sb_info *sbi = data;
32 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
33 long wait_ms;
34
35 wait_ms = GC_THREAD_MIN_SLEEP_TIME;
36
37 do {
38 if (try_to_freeze())
39 continue;
40 else
41 wait_event_interruptible_timeout(*wq,
42 kthread_should_stop(),
43 msecs_to_jiffies(wait_ms));
44 if (kthread_should_stop())
45 break;
46
d6212a5f
CL
47 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
48 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
49 continue;
50 }
51
7bc09003
JK
52 /*
53 * [GC triggering condition]
54 * 0. GC is not conducted currently.
55 * 1. There are enough dirty segments.
56 * 2. IO subsystem is idle by checking the # of writeback pages.
57 * 3. IO subsystem is idle by checking the # of requests in
58 * bdev's request list.
59 *
60 * Note) We have to avoid triggering GCs too much frequently.
61 * Because it is possible that some segments can be
62 * invalidated soon after by user update or deletion.
63 * So, I'd like to wait some time to collect dirty segments.
64 */
65 if (!mutex_trylock(&sbi->gc_mutex))
66 continue;
67
68 if (!is_idle(sbi)) {
69 wait_ms = increase_sleep_time(wait_ms);
70 mutex_unlock(&sbi->gc_mutex);
71 continue;
72 }
73
74 if (has_enough_invalid_blocks(sbi))
75 wait_ms = decrease_sleep_time(wait_ms);
76 else
77 wait_ms = increase_sleep_time(wait_ms);
78
35b09d82 79#ifdef CONFIG_F2FS_STAT_FS
7bc09003 80 sbi->bg_gc++;
35b09d82 81#endif
7bc09003 82
43727527
JK
83 /* if return value is not zero, no victim was selected */
84 if (f2fs_gc(sbi))
7bc09003 85 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
7bc09003
JK
86 } while (!kthread_should_stop());
87 return 0;
88}
89
90int start_gc_thread(struct f2fs_sb_info *sbi)
91{
1042d60f 92 struct f2fs_gc_kthread *gc_th;
ec7b1f2d 93 dev_t dev = sbi->sb->s_bdev->bd_dev;
7a267f8d 94 int err = 0;
7bc09003 95
48600e44 96 if (!test_opt(sbi, BG_GC))
7a267f8d 97 goto out;
7bc09003 98 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
7a267f8d
NJ
99 if (!gc_th) {
100 err = -ENOMEM;
101 goto out;
102 }
7bc09003
JK
103
104 sbi->gc_thread = gc_th;
105 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
106 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
ec7b1f2d 107 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
7bc09003 108 if (IS_ERR(gc_th->f2fs_gc_task)) {
7a267f8d 109 err = PTR_ERR(gc_th->f2fs_gc_task);
7bc09003 110 kfree(gc_th);
25718423 111 sbi->gc_thread = NULL;
7bc09003 112 }
7a267f8d
NJ
113
114out:
115 return err;
7bc09003
JK
116}
117
118void stop_gc_thread(struct f2fs_sb_info *sbi)
119{
120 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
121 if (!gc_th)
122 return;
123 kthread_stop(gc_th->f2fs_gc_task);
124 kfree(gc_th);
125 sbi->gc_thread = NULL;
126}
127
128static int select_gc_type(int gc_type)
129{
130 return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
131}
132
133static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
134 int type, struct victim_sel_policy *p)
135{
136 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
137
4ebefc44 138 if (p->alloc_mode == SSR) {
7bc09003
JK
139 p->gc_mode = GC_GREEDY;
140 p->dirty_segmap = dirty_i->dirty_segmap[type];
141 p->ofs_unit = 1;
142 } else {
143 p->gc_mode = select_gc_type(gc_type);
144 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
145 p->ofs_unit = sbi->segs_per_sec;
146 }
147 p->offset = sbi->last_victim[p->gc_mode];
148}
149
150static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
151 struct victim_sel_policy *p)
152{
b7250d2d
JK
153 /* SSR allocates in a segment unit */
154 if (p->alloc_mode == SSR)
155 return 1 << sbi->log_blocks_per_seg;
7bc09003
JK
156 if (p->gc_mode == GC_GREEDY)
157 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
158 else if (p->gc_mode == GC_CB)
159 return UINT_MAX;
160 else /* No other gc_mode */
161 return 0;
162}
163
164static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
165{
166 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f
JK
167 unsigned int hint = 0;
168 unsigned int secno;
7bc09003
JK
169
170 /*
171 * If the gc_type is FG_GC, we can select victim segments
172 * selected by background GC before.
173 * Those segments guarantee they have small valid blocks.
174 */
5ec4e49f
JK
175next:
176 secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
177 if (secno < TOTAL_SECS(sbi)) {
178 if (sec_usage_check(sbi, secno))
179 goto next;
180 clear_bit(secno, dirty_i->victim_secmap);
181 return secno * sbi->segs_per_sec;
7bc09003
JK
182 }
183 return NULL_SEGNO;
184}
185
186static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
187{
188 struct sit_info *sit_i = SIT_I(sbi);
189 unsigned int secno = GET_SECNO(sbi, segno);
190 unsigned int start = secno * sbi->segs_per_sec;
191 unsigned long long mtime = 0;
192 unsigned int vblocks;
193 unsigned char age = 0;
194 unsigned char u;
195 unsigned int i;
196
197 for (i = 0; i < sbi->segs_per_sec; i++)
198 mtime += get_seg_entry(sbi, start + i)->mtime;
199 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
200
201 mtime = div_u64(mtime, sbi->segs_per_sec);
202 vblocks = div_u64(vblocks, sbi->segs_per_sec);
203
204 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
205
206 /* Handle if the system time is changed by user */
207 if (mtime < sit_i->min_mtime)
208 sit_i->min_mtime = mtime;
209 if (mtime > sit_i->max_mtime)
210 sit_i->max_mtime = mtime;
211 if (sit_i->max_mtime != sit_i->min_mtime)
212 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
213 sit_i->max_mtime - sit_i->min_mtime);
214
215 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
216}
217
218static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
219 struct victim_sel_policy *p)
220{
221 if (p->alloc_mode == SSR)
222 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
223
224 /* alloc_mode == LFS */
225 if (p->gc_mode == GC_GREEDY)
226 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
227 else
228 return get_cb_cost(sbi, segno);
229}
230
0a8165d7 231/*
111d2495 232 * This function is called from two paths.
7bc09003
JK
233 * One is garbage collection and the other is SSR segment selection.
234 * When it is called during GC, it just gets a victim segment
235 * and it does not remove it from dirty seglist.
236 * When it is called from SSR segment selection, it finds a segment
237 * which has minimum valid blocks and removes it from dirty seglist.
238 */
239static int get_victim_by_default(struct f2fs_sb_info *sbi,
240 unsigned int *result, int gc_type, int type, char alloc_mode)
241{
242 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
243 struct victim_sel_policy p;
b2b3460a 244 unsigned int secno, max_cost;
7bc09003
JK
245 int nsearched = 0;
246
247 p.alloc_mode = alloc_mode;
248 select_policy(sbi, gc_type, type, &p);
249
250 p.min_segno = NULL_SEGNO;
b2b3460a 251 p.min_cost = max_cost = get_max_cost(sbi, &p);
7bc09003
JK
252
253 mutex_lock(&dirty_i->seglist_lock);
254
255 if (p.alloc_mode == LFS && gc_type == FG_GC) {
256 p.min_segno = check_bg_victims(sbi);
257 if (p.min_segno != NULL_SEGNO)
258 goto got_it;
259 }
260
261 while (1) {
262 unsigned long cost;
5ec4e49f 263 unsigned int segno;
7bc09003
JK
264
265 segno = find_next_bit(p.dirty_segmap,
266 TOTAL_SEGS(sbi), p.offset);
267 if (segno >= TOTAL_SEGS(sbi)) {
268 if (sbi->last_victim[p.gc_mode]) {
269 sbi->last_victim[p.gc_mode] = 0;
270 p.offset = 0;
271 continue;
272 }
273 break;
274 }
275 p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
5ec4e49f 276 secno = GET_SECNO(sbi, segno);
7bc09003 277
5ec4e49f 278 if (sec_usage_check(sbi, secno))
7bc09003 279 continue;
5ec4e49f 280 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
7bc09003
JK
281 continue;
282
283 cost = get_gc_cost(sbi, segno, &p);
284
285 if (p.min_cost > cost) {
286 p.min_segno = segno;
287 p.min_cost = cost;
288 }
289
b2b3460a 290 if (cost == max_cost)
7bc09003
JK
291 continue;
292
293 if (nsearched++ >= MAX_VICTIM_SEARCH) {
294 sbi->last_victim[p.gc_mode] = segno;
295 break;
296 }
297 }
7bc09003 298 if (p.min_segno != NULL_SEGNO) {
b2b3460a 299got_it:
7bc09003 300 if (p.alloc_mode == LFS) {
5ec4e49f
JK
301 secno = GET_SECNO(sbi, p.min_segno);
302 if (gc_type == FG_GC)
303 sbi->cur_victim_sec = secno;
304 else
305 set_bit(secno, dirty_i->victim_secmap);
7bc09003 306 }
5ec4e49f 307 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
8e46b3ed
NJ
308
309 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
310 sbi->cur_victim_sec,
311 prefree_segments(sbi), free_segments(sbi));
7bc09003
JK
312 }
313 mutex_unlock(&dirty_i->seglist_lock);
314
315 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
316}
317
318static const struct victim_selection default_v_ops = {
319 .get_victim = get_victim_by_default,
320};
321
322static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
323{
324 struct list_head *this;
325 struct inode_entry *ie;
326
327 list_for_each(this, ilist) {
328 ie = list_entry(this, struct inode_entry, list);
329 if (ie->inode->i_ino == ino)
330 return ie->inode;
331 }
332 return NULL;
333}
334
335static void add_gc_inode(struct inode *inode, struct list_head *ilist)
336{
337 struct list_head *this;
338 struct inode_entry *new_ie, *ie;
339
340 list_for_each(this, ilist) {
341 ie = list_entry(this, struct inode_entry, list);
342 if (ie->inode == inode) {
343 iput(inode);
344 return;
345 }
346 }
347repeat:
348 new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
349 if (!new_ie) {
350 cond_resched();
351 goto repeat;
352 }
353 new_ie->inode = inode;
354 list_add_tail(&new_ie->list, ilist);
355}
356
357static void put_gc_inode(struct list_head *ilist)
358{
359 struct inode_entry *ie, *next_ie;
360 list_for_each_entry_safe(ie, next_ie, ilist, list) {
361 iput(ie->inode);
362 list_del(&ie->list);
363 kmem_cache_free(winode_slab, ie);
364 }
365}
366
367static int check_valid_map(struct f2fs_sb_info *sbi,
368 unsigned int segno, int offset)
369{
370 struct sit_info *sit_i = SIT_I(sbi);
371 struct seg_entry *sentry;
372 int ret;
373
374 mutex_lock(&sit_i->sentry_lock);
375 sentry = get_seg_entry(sbi, segno);
376 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
377 mutex_unlock(&sit_i->sentry_lock);
43727527 378 return ret;
7bc09003
JK
379}
380
0a8165d7 381/*
7bc09003
JK
382 * This function compares node address got in summary with that in NAT.
383 * On validity, copy that node with cold status, otherwise (invalid node)
384 * ignore that.
385 */
43727527 386static void gc_node_segment(struct f2fs_sb_info *sbi,
7bc09003
JK
387 struct f2fs_summary *sum, unsigned int segno, int gc_type)
388{
389 bool initial = true;
390 struct f2fs_summary *entry;
391 int off;
392
393next_step:
394 entry = sum;
c718379b 395
7bc09003
JK
396 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
397 nid_t nid = le32_to_cpu(entry->nid);
398 struct page *node_page;
7bc09003 399
43727527
JK
400 /* stop BG_GC if there is not enough free sections. */
401 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
402 return;
7bc09003 403
43727527 404 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
405 continue;
406
407 if (initial) {
408 ra_node_page(sbi, nid);
409 continue;
410 }
411 node_page = get_node_page(sbi, nid);
412 if (IS_ERR(node_page))
413 continue;
414
415 /* set page dirty and write it */
4ebefc44
JK
416 if (gc_type == FG_GC) {
417 f2fs_submit_bio(sbi, NODE, true);
418 wait_on_page_writeback(node_page);
7bc09003 419 set_page_dirty(node_page);
4ebefc44
JK
420 } else {
421 if (!PageWriteback(node_page))
422 set_page_dirty(node_page);
423 }
7bc09003
JK
424 f2fs_put_page(node_page, 1);
425 stat_inc_node_blk_count(sbi, 1);
426 }
c718379b 427
7bc09003
JK
428 if (initial) {
429 initial = false;
430 goto next_step;
431 }
432
433 if (gc_type == FG_GC) {
434 struct writeback_control wbc = {
435 .sync_mode = WB_SYNC_ALL,
436 .nr_to_write = LONG_MAX,
437 .for_reclaim = 0,
438 };
439 sync_node_pages(sbi, 0, &wbc);
4ebefc44
JK
440
441 /*
442 * In the case of FG_GC, it'd be better to reclaim this victim
443 * completely.
444 */
445 if (get_valid_blocks(sbi, segno, 1) != 0)
446 goto next_step;
7bc09003 447 }
7bc09003
JK
448}
449
0a8165d7 450/*
9af45ef5
JK
451 * Calculate start block index indicating the given node offset.
452 * Be careful, caller should give this node offset only indicating direct node
453 * blocks. If any node offsets, which point the other types of node blocks such
454 * as indirect or double indirect node blocks, are given, it must be a caller's
455 * bug.
7bc09003
JK
456 */
457block_t start_bidx_of_node(unsigned int node_ofs)
458{
ce19a5d4
JK
459 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
460 unsigned int bidx;
7bc09003 461
ce19a5d4
JK
462 if (node_ofs == 0)
463 return 0;
7bc09003 464
ce19a5d4 465 if (node_ofs <= 2) {
7bc09003
JK
466 bidx = node_ofs - 1;
467 } else if (node_ofs <= indirect_blks) {
ce19a5d4 468 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
7bc09003
JK
469 bidx = node_ofs - 2 - dec;
470 } else {
ce19a5d4 471 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
7bc09003
JK
472 bidx = node_ofs - 5 - dec;
473 }
ce19a5d4 474 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
7bc09003
JK
475}
476
477static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
478 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
479{
480 struct page *node_page;
481 nid_t nid;
482 unsigned int ofs_in_node;
483 block_t source_blkaddr;
484
485 nid = le32_to_cpu(sum->nid);
486 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
487
488 node_page = get_node_page(sbi, nid);
489 if (IS_ERR(node_page))
43727527 490 return 0;
7bc09003
JK
491
492 get_node_info(sbi, nid, dni);
493
494 if (sum->version != dni->version) {
495 f2fs_put_page(node_page, 1);
43727527 496 return 0;
7bc09003
JK
497 }
498
499 *nofs = ofs_of_node(node_page);
500 source_blkaddr = datablock_addr(node_page, ofs_in_node);
501 f2fs_put_page(node_page, 1);
502
503 if (source_blkaddr != blkaddr)
43727527
JK
504 return 0;
505 return 1;
7bc09003
JK
506}
507
508static void move_data_page(struct inode *inode, struct page *page, int gc_type)
509{
7bc09003 510 if (gc_type == BG_GC) {
4ebefc44
JK
511 if (PageWriteback(page))
512 goto out;
7bc09003
JK
513 set_page_dirty(page);
514 set_cold_data(page);
515 } else {
516 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
4ebefc44
JK
517
518 if (PageWriteback(page)) {
519 f2fs_submit_bio(sbi, DATA, true);
520 wait_on_page_writeback(page);
521 }
522
7bc09003
JK
523 if (clear_page_dirty_for_io(page) &&
524 S_ISDIR(inode->i_mode)) {
525 dec_page_count(sbi, F2FS_DIRTY_DENTS);
526 inode_dec_dirty_dents(inode);
527 }
528 set_cold_data(page);
529 do_write_data_page(page);
7bc09003
JK
530 clear_cold_data(page);
531 }
532out:
533 f2fs_put_page(page, 1);
534}
535
0a8165d7 536/*
7bc09003
JK
537 * This function tries to get parent node of victim data block, and identifies
538 * data block validity. If the block is valid, copy that with cold status and
539 * modify parent node.
540 * If the parent node is not valid or the data block address is different,
541 * the victim data block is ignored.
542 */
43727527 543static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7bc09003
JK
544 struct list_head *ilist, unsigned int segno, int gc_type)
545{
546 struct super_block *sb = sbi->sb;
547 struct f2fs_summary *entry;
548 block_t start_addr;
43727527 549 int off;
7bc09003
JK
550 int phase = 0;
551
552 start_addr = START_BLOCK(sbi, segno);
553
554next_step:
555 entry = sum;
c718379b 556
7bc09003
JK
557 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
558 struct page *data_page;
559 struct inode *inode;
560 struct node_info dni; /* dnode info for the data */
561 unsigned int ofs_in_node, nofs;
562 block_t start_bidx;
563
43727527
JK
564 /* stop BG_GC if there is not enough free sections. */
565 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
566 return;
7bc09003 567
43727527 568 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
569 continue;
570
571 if (phase == 0) {
572 ra_node_page(sbi, le32_to_cpu(entry->nid));
573 continue;
574 }
575
576 /* Get an inode by ino with checking validity */
43727527 577 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
7bc09003
JK
578 continue;
579
580 if (phase == 1) {
581 ra_node_page(sbi, dni.ino);
582 continue;
583 }
584
585 start_bidx = start_bidx_of_node(nofs);
586 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
587
588 if (phase == 2) {
d4686d56 589 inode = f2fs_iget(sb, dni.ino);
7bc09003
JK
590 if (IS_ERR(inode))
591 continue;
592
593 data_page = find_data_page(inode,
c718379b 594 start_bidx + ofs_in_node, false);
7bc09003
JK
595 if (IS_ERR(data_page))
596 goto next_iput;
597
598 f2fs_put_page(data_page, 0);
599 add_gc_inode(inode, ilist);
600 } else {
601 inode = find_gc_inode(dni.ino, ilist);
602 if (inode) {
603 data_page = get_lock_data_page(inode,
604 start_bidx + ofs_in_node);
605 if (IS_ERR(data_page))
606 continue;
607 move_data_page(inode, data_page, gc_type);
608 stat_inc_data_blk_count(sbi, 1);
609 }
610 }
611 continue;
612next_iput:
613 iput(inode);
614 }
c718379b 615
7bc09003
JK
616 if (++phase < 4)
617 goto next_step;
43727527 618
4ebefc44 619 if (gc_type == FG_GC) {
7bc09003 620 f2fs_submit_bio(sbi, DATA, true);
4ebefc44
JK
621
622 /*
623 * In the case of FG_GC, it'd be better to reclaim this victim
624 * completely.
625 */
626 if (get_valid_blocks(sbi, segno, 1) != 0) {
627 phase = 2;
628 goto next_step;
629 }
630 }
7bc09003
JK
631}
632
633static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
634 int gc_type, int type)
635{
636 struct sit_info *sit_i = SIT_I(sbi);
637 int ret;
638 mutex_lock(&sit_i->sentry_lock);
639 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
640 mutex_unlock(&sit_i->sentry_lock);
641 return ret;
642}
643
43727527 644static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
7bc09003
JK
645 struct list_head *ilist, int gc_type)
646{
647 struct page *sum_page;
648 struct f2fs_summary_block *sum;
c718379b 649 struct blk_plug plug;
7bc09003
JK
650
651 /* read segment summary of victim */
652 sum_page = get_sum_page(sbi, segno);
653 if (IS_ERR(sum_page))
43727527 654 return;
7bc09003 655
c718379b
JK
656 blk_start_plug(&plug);
657
7bc09003
JK
658 sum = page_address(sum_page);
659
660 switch (GET_SUM_TYPE((&sum->footer))) {
661 case SUM_TYPE_NODE:
43727527 662 gc_node_segment(sbi, sum->entries, segno, gc_type);
7bc09003
JK
663 break;
664 case SUM_TYPE_DATA:
43727527 665 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
7bc09003
JK
666 break;
667 }
c718379b
JK
668 blk_finish_plug(&plug);
669
7bc09003
JK
670 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
671 stat_inc_call_count(sbi->stat_info);
672
b7473754 673 f2fs_put_page(sum_page, 1);
7bc09003
JK
674}
675
408e9375 676int f2fs_gc(struct f2fs_sb_info *sbi)
7bc09003 677{
7bc09003 678 struct list_head ilist;
408e9375 679 unsigned int segno, i;
7bc09003 680 int gc_type = BG_GC;
43727527
JK
681 int nfree = 0;
682 int ret = -1;
7bc09003
JK
683
684 INIT_LIST_HEAD(&ilist);
685gc_more:
408e9375
JK
686 if (!(sbi->sb->s_flags & MS_ACTIVE))
687 goto stop;
7bc09003 688
d64f8047 689 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
408e9375 690 gc_type = FG_GC;
d64f8047
JK
691 write_checkpoint(sbi, false);
692 }
7bc09003 693
408e9375
JK
694 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
695 goto stop;
43727527 696 ret = 0;
7bc09003 697
43727527
JK
698 for (i = 0; i < sbi->segs_per_sec; i++)
699 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
700
5ec4e49f
JK
701 if (gc_type == FG_GC) {
702 sbi->cur_victim_sec = NULL_SEGNO;
43727527 703 nfree++;
5ec4e49f
JK
704 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
705 }
43727527
JK
706
707 if (has_not_enough_free_secs(sbi, nfree))
708 goto gc_more;
709
710 if (gc_type == FG_GC)
711 write_checkpoint(sbi, false);
408e9375 712stop:
7bc09003
JK
713 mutex_unlock(&sbi->gc_mutex);
714
715 put_gc_inode(&ilist);
43727527 716 return ret;
7bc09003
JK
717}
718
719void build_gc_manager(struct f2fs_sb_info *sbi)
720{
721 DIRTY_I(sbi)->v_ops = &default_v_ops;
722}
723
6e6093a8 724int __init create_gc_caches(void)
7bc09003
JK
725{
726 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
727 sizeof(struct inode_entry), NULL);
728 if (!winode_slab)
729 return -ENOMEM;
730 return 0;
731}
732
733void destroy_gc_caches(void)
734{
735 kmem_cache_destroy(winode_slab);
736}