]> git.ipfire.org Git - people/arne_f/kernel.git/blob - fs/ext4/extents_status.c
MIPS: AR7: Ensure that serial ports are properly set up
[people/arne_f/kernel.git] / fs / ext4 / extents_status.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/ext4/extents_status.c
4 *
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
6 * Modified by
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
10 *
11 * Ext4 extents status tree core functions.
12 */
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
16 #include "ext4.h"
17
18 #include <trace/events/ext4.h>
19
20 /*
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
32 *
33 * Step1:
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
39 *
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
42 *
43 * Step2:
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
55 */
56
57 /*
58 * Extent status tree implementation for ext4.
59 *
60 *
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
63 *
64 * 1. Why we need to implement extent status tree?
65 *
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
69 *
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
72 *
73 * Let us have a look at how they do without extent status tree.
74 * -- FIEMAP
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
76 *
77 * -- SEEK_HOLE/DATA
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
79 *
80 * -- bigalloc
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
84 *
85 * -- writeout
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
88 * time consuming.
89 *
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
94 *
95 *
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
98 *
99 * -- extent
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
105 *
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
110 *
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
114 *
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
117 *
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
122 *
123 *
124 * ==========================================================================
125 * 3. Performance analysis
126 *
127 * -- overhead
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
130 *
131 * -- gain
132 * 2. Code is much simpler, more readable, more maintainable and
133 * more efficient.
134 *
135 *
136 * ==========================================================================
137 * 4. TODO list
138 *
139 * -- Refactor delayed space reservation
140 *
141 * -- Extent-level locking
142 */
143
144 static struct kmem_cache *ext4_es_cachep;
145
146 static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
147 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
148 ext4_lblk_t end);
149 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
150 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
151 struct ext4_inode_info *locked_ei);
152
153 int __init ext4_init_es(void)
154 {
155 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
156 sizeof(struct extent_status),
157 0, (SLAB_RECLAIM_ACCOUNT), NULL);
158 if (ext4_es_cachep == NULL)
159 return -ENOMEM;
160 return 0;
161 }
162
163 void ext4_exit_es(void)
164 {
165 if (ext4_es_cachep)
166 kmem_cache_destroy(ext4_es_cachep);
167 }
168
169 void ext4_es_init_tree(struct ext4_es_tree *tree)
170 {
171 tree->root = RB_ROOT;
172 tree->cache_es = NULL;
173 }
174
175 #ifdef ES_DEBUG__
176 static void ext4_es_print_tree(struct inode *inode)
177 {
178 struct ext4_es_tree *tree;
179 struct rb_node *node;
180
181 printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
182 tree = &EXT4_I(inode)->i_es_tree;
183 node = rb_first(&tree->root);
184 while (node) {
185 struct extent_status *es;
186 es = rb_entry(node, struct extent_status, rb_node);
187 printk(KERN_DEBUG " [%u/%u) %llu %x",
188 es->es_lblk, es->es_len,
189 ext4_es_pblock(es), ext4_es_status(es));
190 node = rb_next(node);
191 }
192 printk(KERN_DEBUG "\n");
193 }
194 #else
195 #define ext4_es_print_tree(inode)
196 #endif
197
198 static inline ext4_lblk_t ext4_es_end(struct extent_status *es)
199 {
200 BUG_ON(es->es_lblk + es->es_len < es->es_lblk);
201 return es->es_lblk + es->es_len - 1;
202 }
203
204 /*
205 * search through the tree for an delayed extent with a given offset. If
206 * it can't be found, try to find next extent.
207 */
208 static struct extent_status *__es_tree_search(struct rb_root *root,
209 ext4_lblk_t lblk)
210 {
211 struct rb_node *node = root->rb_node;
212 struct extent_status *es = NULL;
213
214 while (node) {
215 es = rb_entry(node, struct extent_status, rb_node);
216 if (lblk < es->es_lblk)
217 node = node->rb_left;
218 else if (lblk > ext4_es_end(es))
219 node = node->rb_right;
220 else
221 return es;
222 }
223
224 if (es && lblk < es->es_lblk)
225 return es;
226
227 if (es && lblk > ext4_es_end(es)) {
228 node = rb_next(&es->rb_node);
229 return node ? rb_entry(node, struct extent_status, rb_node) :
230 NULL;
231 }
232
233 return NULL;
234 }
235
236 /*
237 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
238 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
239 *
240 * @inode: the inode which owns delayed extents
241 * @lblk: the offset where we start to search
242 * @end: the offset where we stop to search
243 * @es: delayed extent that we found
244 */
245 void ext4_es_find_delayed_extent_range(struct inode *inode,
246 ext4_lblk_t lblk, ext4_lblk_t end,
247 struct extent_status *es)
248 {
249 struct ext4_es_tree *tree = NULL;
250 struct extent_status *es1 = NULL;
251 struct rb_node *node;
252
253 BUG_ON(es == NULL);
254 BUG_ON(end < lblk);
255 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
256
257 read_lock(&EXT4_I(inode)->i_es_lock);
258 tree = &EXT4_I(inode)->i_es_tree;
259
260 /* find extent in cache firstly */
261 es->es_lblk = es->es_len = es->es_pblk = 0;
262 if (tree->cache_es) {
263 es1 = tree->cache_es;
264 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
265 es_debug("%u cached by [%u/%u) %llu %x\n",
266 lblk, es1->es_lblk, es1->es_len,
267 ext4_es_pblock(es1), ext4_es_status(es1));
268 goto out;
269 }
270 }
271
272 es1 = __es_tree_search(&tree->root, lblk);
273
274 out:
275 if (es1 && !ext4_es_is_delayed(es1)) {
276 while ((node = rb_next(&es1->rb_node)) != NULL) {
277 es1 = rb_entry(node, struct extent_status, rb_node);
278 if (es1->es_lblk > end) {
279 es1 = NULL;
280 break;
281 }
282 if (ext4_es_is_delayed(es1))
283 break;
284 }
285 }
286
287 if (es1 && ext4_es_is_delayed(es1)) {
288 tree->cache_es = es1;
289 es->es_lblk = es1->es_lblk;
290 es->es_len = es1->es_len;
291 es->es_pblk = es1->es_pblk;
292 }
293
294 read_unlock(&EXT4_I(inode)->i_es_lock);
295
296 trace_ext4_es_find_delayed_extent_range_exit(inode, es);
297 }
298
299 static void ext4_es_list_add(struct inode *inode)
300 {
301 struct ext4_inode_info *ei = EXT4_I(inode);
302 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
303
304 if (!list_empty(&ei->i_es_list))
305 return;
306
307 spin_lock(&sbi->s_es_lock);
308 if (list_empty(&ei->i_es_list)) {
309 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
310 sbi->s_es_nr_inode++;
311 }
312 spin_unlock(&sbi->s_es_lock);
313 }
314
315 static void ext4_es_list_del(struct inode *inode)
316 {
317 struct ext4_inode_info *ei = EXT4_I(inode);
318 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
319
320 spin_lock(&sbi->s_es_lock);
321 if (!list_empty(&ei->i_es_list)) {
322 list_del_init(&ei->i_es_list);
323 sbi->s_es_nr_inode--;
324 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
325 }
326 spin_unlock(&sbi->s_es_lock);
327 }
328
329 static struct extent_status *
330 ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
331 ext4_fsblk_t pblk)
332 {
333 struct extent_status *es;
334 es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
335 if (es == NULL)
336 return NULL;
337 es->es_lblk = lblk;
338 es->es_len = len;
339 es->es_pblk = pblk;
340
341 /*
342 * We don't count delayed extent because we never try to reclaim them
343 */
344 if (!ext4_es_is_delayed(es)) {
345 if (!EXT4_I(inode)->i_es_shk_nr++)
346 ext4_es_list_add(inode);
347 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
348 s_es_stats.es_stats_shk_cnt);
349 }
350
351 EXT4_I(inode)->i_es_all_nr++;
352 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
353
354 return es;
355 }
356
357 static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
358 {
359 EXT4_I(inode)->i_es_all_nr--;
360 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
361
362 /* Decrease the shrink counter when this es is not delayed */
363 if (!ext4_es_is_delayed(es)) {
364 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
365 if (!--EXT4_I(inode)->i_es_shk_nr)
366 ext4_es_list_del(inode);
367 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
368 s_es_stats.es_stats_shk_cnt);
369 }
370
371 kmem_cache_free(ext4_es_cachep, es);
372 }
373
374 /*
375 * Check whether or not two extents can be merged
376 * Condition:
377 * - logical block number is contiguous
378 * - physical block number is contiguous
379 * - status is equal
380 */
381 static int ext4_es_can_be_merged(struct extent_status *es1,
382 struct extent_status *es2)
383 {
384 if (ext4_es_type(es1) != ext4_es_type(es2))
385 return 0;
386
387 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
388 pr_warn("ES assertion failed when merging extents. "
389 "The sum of lengths of es1 (%d) and es2 (%d) "
390 "is bigger than allowed file size (%d)\n",
391 es1->es_len, es2->es_len, EXT_MAX_BLOCKS);
392 WARN_ON(1);
393 return 0;
394 }
395
396 if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
397 return 0;
398
399 if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
400 (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
401 return 1;
402
403 if (ext4_es_is_hole(es1))
404 return 1;
405
406 /* we need to check delayed extent is without unwritten status */
407 if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
408 return 1;
409
410 return 0;
411 }
412
413 static struct extent_status *
414 ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
415 {
416 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
417 struct extent_status *es1;
418 struct rb_node *node;
419
420 node = rb_prev(&es->rb_node);
421 if (!node)
422 return es;
423
424 es1 = rb_entry(node, struct extent_status, rb_node);
425 if (ext4_es_can_be_merged(es1, es)) {
426 es1->es_len += es->es_len;
427 if (ext4_es_is_referenced(es))
428 ext4_es_set_referenced(es1);
429 rb_erase(&es->rb_node, &tree->root);
430 ext4_es_free_extent(inode, es);
431 es = es1;
432 }
433
434 return es;
435 }
436
437 static struct extent_status *
438 ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
439 {
440 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
441 struct extent_status *es1;
442 struct rb_node *node;
443
444 node = rb_next(&es->rb_node);
445 if (!node)
446 return es;
447
448 es1 = rb_entry(node, struct extent_status, rb_node);
449 if (ext4_es_can_be_merged(es, es1)) {
450 es->es_len += es1->es_len;
451 if (ext4_es_is_referenced(es1))
452 ext4_es_set_referenced(es);
453 rb_erase(node, &tree->root);
454 ext4_es_free_extent(inode, es1);
455 }
456
457 return es;
458 }
459
460 #ifdef ES_AGGRESSIVE_TEST
461 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
462
463 static void ext4_es_insert_extent_ext_check(struct inode *inode,
464 struct extent_status *es)
465 {
466 struct ext4_ext_path *path = NULL;
467 struct ext4_extent *ex;
468 ext4_lblk_t ee_block;
469 ext4_fsblk_t ee_start;
470 unsigned short ee_len;
471 int depth, ee_status, es_status;
472
473 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
474 if (IS_ERR(path))
475 return;
476
477 depth = ext_depth(inode);
478 ex = path[depth].p_ext;
479
480 if (ex) {
481
482 ee_block = le32_to_cpu(ex->ee_block);
483 ee_start = ext4_ext_pblock(ex);
484 ee_len = ext4_ext_get_actual_len(ex);
485
486 ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
487 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
488
489 /*
490 * Make sure ex and es are not overlap when we try to insert
491 * a delayed/hole extent.
492 */
493 if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
494 if (in_range(es->es_lblk, ee_block, ee_len)) {
495 pr_warn("ES insert assertion failed for "
496 "inode: %lu we can find an extent "
497 "at block [%d/%d/%llu/%c], but we "
498 "want to add a delayed/hole extent "
499 "[%d/%d/%llu/%x]\n",
500 inode->i_ino, ee_block, ee_len,
501 ee_start, ee_status ? 'u' : 'w',
502 es->es_lblk, es->es_len,
503 ext4_es_pblock(es), ext4_es_status(es));
504 }
505 goto out;
506 }
507
508 /*
509 * We don't check ee_block == es->es_lblk, etc. because es
510 * might be a part of whole extent, vice versa.
511 */
512 if (es->es_lblk < ee_block ||
513 ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
514 pr_warn("ES insert assertion failed for inode: %lu "
515 "ex_status [%d/%d/%llu/%c] != "
516 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
517 ee_block, ee_len, ee_start,
518 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
519 ext4_es_pblock(es), es_status ? 'u' : 'w');
520 goto out;
521 }
522
523 if (ee_status ^ es_status) {
524 pr_warn("ES insert assertion failed for inode: %lu "
525 "ex_status [%d/%d/%llu/%c] != "
526 "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
527 ee_block, ee_len, ee_start,
528 ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
529 ext4_es_pblock(es), es_status ? 'u' : 'w');
530 }
531 } else {
532 /*
533 * We can't find an extent on disk. So we need to make sure
534 * that we don't want to add an written/unwritten extent.
535 */
536 if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
537 pr_warn("ES insert assertion failed for inode: %lu "
538 "can't find an extent at block %d but we want "
539 "to add a written/unwritten extent "
540 "[%d/%d/%llu/%x]\n", inode->i_ino,
541 es->es_lblk, es->es_lblk, es->es_len,
542 ext4_es_pblock(es), ext4_es_status(es));
543 }
544 }
545 out:
546 ext4_ext_drop_refs(path);
547 kfree(path);
548 }
549
550 static void ext4_es_insert_extent_ind_check(struct inode *inode,
551 struct extent_status *es)
552 {
553 struct ext4_map_blocks map;
554 int retval;
555
556 /*
557 * Here we call ext4_ind_map_blocks to lookup a block mapping because
558 * 'Indirect' structure is defined in indirect.c. So we couldn't
559 * access direct/indirect tree from outside. It is too dirty to define
560 * this function in indirect.c file.
561 */
562
563 map.m_lblk = es->es_lblk;
564 map.m_len = es->es_len;
565
566 retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
567 if (retval > 0) {
568 if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
569 /*
570 * We want to add a delayed/hole extent but this
571 * block has been allocated.
572 */
573 pr_warn("ES insert assertion failed for inode: %lu "
574 "We can find blocks but we want to add a "
575 "delayed/hole extent [%d/%d/%llu/%x]\n",
576 inode->i_ino, es->es_lblk, es->es_len,
577 ext4_es_pblock(es), ext4_es_status(es));
578 return;
579 } else if (ext4_es_is_written(es)) {
580 if (retval != es->es_len) {
581 pr_warn("ES insert assertion failed for "
582 "inode: %lu retval %d != es_len %d\n",
583 inode->i_ino, retval, es->es_len);
584 return;
585 }
586 if (map.m_pblk != ext4_es_pblock(es)) {
587 pr_warn("ES insert assertion failed for "
588 "inode: %lu m_pblk %llu != "
589 "es_pblk %llu\n",
590 inode->i_ino, map.m_pblk,
591 ext4_es_pblock(es));
592 return;
593 }
594 } else {
595 /*
596 * We don't need to check unwritten extent because
597 * indirect-based file doesn't have it.
598 */
599 BUG_ON(1);
600 }
601 } else if (retval == 0) {
602 if (ext4_es_is_written(es)) {
603 pr_warn("ES insert assertion failed for inode: %lu "
604 "We can't find the block but we want to add "
605 "a written extent [%d/%d/%llu/%x]\n",
606 inode->i_ino, es->es_lblk, es->es_len,
607 ext4_es_pblock(es), ext4_es_status(es));
608 return;
609 }
610 }
611 }
612
613 static inline void ext4_es_insert_extent_check(struct inode *inode,
614 struct extent_status *es)
615 {
616 /*
617 * We don't need to worry about the race condition because
618 * caller takes i_data_sem locking.
619 */
620 BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
621 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
622 ext4_es_insert_extent_ext_check(inode, es);
623 else
624 ext4_es_insert_extent_ind_check(inode, es);
625 }
626 #else
627 static inline void ext4_es_insert_extent_check(struct inode *inode,
628 struct extent_status *es)
629 {
630 }
631 #endif
632
633 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
634 {
635 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
636 struct rb_node **p = &tree->root.rb_node;
637 struct rb_node *parent = NULL;
638 struct extent_status *es;
639
640 while (*p) {
641 parent = *p;
642 es = rb_entry(parent, struct extent_status, rb_node);
643
644 if (newes->es_lblk < es->es_lblk) {
645 if (ext4_es_can_be_merged(newes, es)) {
646 /*
647 * Here we can modify es_lblk directly
648 * because it isn't overlapped.
649 */
650 es->es_lblk = newes->es_lblk;
651 es->es_len += newes->es_len;
652 if (ext4_es_is_written(es) ||
653 ext4_es_is_unwritten(es))
654 ext4_es_store_pblock(es,
655 newes->es_pblk);
656 es = ext4_es_try_to_merge_left(inode, es);
657 goto out;
658 }
659 p = &(*p)->rb_left;
660 } else if (newes->es_lblk > ext4_es_end(es)) {
661 if (ext4_es_can_be_merged(es, newes)) {
662 es->es_len += newes->es_len;
663 es = ext4_es_try_to_merge_right(inode, es);
664 goto out;
665 }
666 p = &(*p)->rb_right;
667 } else {
668 BUG_ON(1);
669 return -EINVAL;
670 }
671 }
672
673 es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
674 newes->es_pblk);
675 if (!es)
676 return -ENOMEM;
677 rb_link_node(&es->rb_node, parent, p);
678 rb_insert_color(&es->rb_node, &tree->root);
679
680 out:
681 tree->cache_es = es;
682 return 0;
683 }
684
685 /*
686 * ext4_es_insert_extent() adds information to an inode's extent
687 * status tree.
688 *
689 * Return 0 on success, error code on failure.
690 */
691 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
692 ext4_lblk_t len, ext4_fsblk_t pblk,
693 unsigned int status)
694 {
695 struct extent_status newes;
696 ext4_lblk_t end = lblk + len - 1;
697 int err = 0;
698
699 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
700 lblk, len, pblk, status, inode->i_ino);
701
702 if (!len)
703 return 0;
704
705 BUG_ON(end < lblk);
706
707 if ((status & EXTENT_STATUS_DELAYED) &&
708 (status & EXTENT_STATUS_WRITTEN)) {
709 ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
710 " delayed and written which can potentially "
711 " cause data loss.", lblk, len);
712 WARN_ON(1);
713 }
714
715 newes.es_lblk = lblk;
716 newes.es_len = len;
717 ext4_es_store_pblock_status(&newes, pblk, status);
718 trace_ext4_es_insert_extent(inode, &newes);
719
720 ext4_es_insert_extent_check(inode, &newes);
721
722 write_lock(&EXT4_I(inode)->i_es_lock);
723 err = __es_remove_extent(inode, lblk, end);
724 if (err != 0)
725 goto error;
726 retry:
727 err = __es_insert_extent(inode, &newes);
728 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
729 128, EXT4_I(inode)))
730 goto retry;
731 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
732 err = 0;
733
734 error:
735 write_unlock(&EXT4_I(inode)->i_es_lock);
736
737 ext4_es_print_tree(inode);
738
739 return err;
740 }
741
742 /*
743 * ext4_es_cache_extent() inserts information into the extent status
744 * tree if and only if there isn't information about the range in
745 * question already.
746 */
747 void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
748 ext4_lblk_t len, ext4_fsblk_t pblk,
749 unsigned int status)
750 {
751 struct extent_status *es;
752 struct extent_status newes;
753 ext4_lblk_t end = lblk + len - 1;
754
755 newes.es_lblk = lblk;
756 newes.es_len = len;
757 ext4_es_store_pblock_status(&newes, pblk, status);
758 trace_ext4_es_cache_extent(inode, &newes);
759
760 if (!len)
761 return;
762
763 BUG_ON(end < lblk);
764
765 write_lock(&EXT4_I(inode)->i_es_lock);
766
767 es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
768 if (!es || es->es_lblk > end)
769 __es_insert_extent(inode, &newes);
770 write_unlock(&EXT4_I(inode)->i_es_lock);
771 }
772
773 /*
774 * ext4_es_lookup_extent() looks up an extent in extent status tree.
775 *
776 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
777 *
778 * Return: 1 on found, 0 on not
779 */
780 int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
781 struct extent_status *es)
782 {
783 struct ext4_es_tree *tree;
784 struct ext4_es_stats *stats;
785 struct extent_status *es1 = NULL;
786 struct rb_node *node;
787 int found = 0;
788
789 trace_ext4_es_lookup_extent_enter(inode, lblk);
790 es_debug("lookup extent in block %u\n", lblk);
791
792 tree = &EXT4_I(inode)->i_es_tree;
793 read_lock(&EXT4_I(inode)->i_es_lock);
794
795 /* find extent in cache firstly */
796 es->es_lblk = es->es_len = es->es_pblk = 0;
797 if (tree->cache_es) {
798 es1 = tree->cache_es;
799 if (in_range(lblk, es1->es_lblk, es1->es_len)) {
800 es_debug("%u cached by [%u/%u)\n",
801 lblk, es1->es_lblk, es1->es_len);
802 found = 1;
803 goto out;
804 }
805 }
806
807 node = tree->root.rb_node;
808 while (node) {
809 es1 = rb_entry(node, struct extent_status, rb_node);
810 if (lblk < es1->es_lblk)
811 node = node->rb_left;
812 else if (lblk > ext4_es_end(es1))
813 node = node->rb_right;
814 else {
815 found = 1;
816 break;
817 }
818 }
819
820 out:
821 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
822 if (found) {
823 BUG_ON(!es1);
824 es->es_lblk = es1->es_lblk;
825 es->es_len = es1->es_len;
826 es->es_pblk = es1->es_pblk;
827 if (!ext4_es_is_referenced(es1))
828 ext4_es_set_referenced(es1);
829 stats->es_stats_cache_hits++;
830 } else {
831 stats->es_stats_cache_misses++;
832 }
833
834 read_unlock(&EXT4_I(inode)->i_es_lock);
835
836 trace_ext4_es_lookup_extent_exit(inode, es, found);
837 return found;
838 }
839
840 static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
841 ext4_lblk_t end)
842 {
843 struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
844 struct rb_node *node;
845 struct extent_status *es;
846 struct extent_status orig_es;
847 ext4_lblk_t len1, len2;
848 ext4_fsblk_t block;
849 int err;
850
851 retry:
852 err = 0;
853 es = __es_tree_search(&tree->root, lblk);
854 if (!es)
855 goto out;
856 if (es->es_lblk > end)
857 goto out;
858
859 /* Simply invalidate cache_es. */
860 tree->cache_es = NULL;
861
862 orig_es.es_lblk = es->es_lblk;
863 orig_es.es_len = es->es_len;
864 orig_es.es_pblk = es->es_pblk;
865
866 len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0;
867 len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0;
868 if (len1 > 0)
869 es->es_len = len1;
870 if (len2 > 0) {
871 if (len1 > 0) {
872 struct extent_status newes;
873
874 newes.es_lblk = end + 1;
875 newes.es_len = len2;
876 block = 0x7FDEADBEEFULL;
877 if (ext4_es_is_written(&orig_es) ||
878 ext4_es_is_unwritten(&orig_es))
879 block = ext4_es_pblock(&orig_es) +
880 orig_es.es_len - len2;
881 ext4_es_store_pblock_status(&newes, block,
882 ext4_es_status(&orig_es));
883 err = __es_insert_extent(inode, &newes);
884 if (err) {
885 es->es_lblk = orig_es.es_lblk;
886 es->es_len = orig_es.es_len;
887 if ((err == -ENOMEM) &&
888 __es_shrink(EXT4_SB(inode->i_sb),
889 128, EXT4_I(inode)))
890 goto retry;
891 goto out;
892 }
893 } else {
894 es->es_lblk = end + 1;
895 es->es_len = len2;
896 if (ext4_es_is_written(es) ||
897 ext4_es_is_unwritten(es)) {
898 block = orig_es.es_pblk + orig_es.es_len - len2;
899 ext4_es_store_pblock(es, block);
900 }
901 }
902 goto out;
903 }
904
905 if (len1 > 0) {
906 node = rb_next(&es->rb_node);
907 if (node)
908 es = rb_entry(node, struct extent_status, rb_node);
909 else
910 es = NULL;
911 }
912
913 while (es && ext4_es_end(es) <= end) {
914 node = rb_next(&es->rb_node);
915 rb_erase(&es->rb_node, &tree->root);
916 ext4_es_free_extent(inode, es);
917 if (!node) {
918 es = NULL;
919 break;
920 }
921 es = rb_entry(node, struct extent_status, rb_node);
922 }
923
924 if (es && es->es_lblk < end + 1) {
925 ext4_lblk_t orig_len = es->es_len;
926
927 len1 = ext4_es_end(es) - end;
928 es->es_lblk = end + 1;
929 es->es_len = len1;
930 if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) {
931 block = es->es_pblk + orig_len - len1;
932 ext4_es_store_pblock(es, block);
933 }
934 }
935
936 out:
937 return err;
938 }
939
940 /*
941 * ext4_es_remove_extent() removes a space from a extent status tree.
942 *
943 * Return 0 on success, error code on failure.
944 */
945 int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
946 ext4_lblk_t len)
947 {
948 ext4_lblk_t end;
949 int err = 0;
950
951 trace_ext4_es_remove_extent(inode, lblk, len);
952 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
953 lblk, len, inode->i_ino);
954
955 if (!len)
956 return err;
957
958 end = lblk + len - 1;
959 BUG_ON(end < lblk);
960
961 /*
962 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
963 * so that we are sure __es_shrink() is done with the inode before it
964 * is reclaimed.
965 */
966 write_lock(&EXT4_I(inode)->i_es_lock);
967 err = __es_remove_extent(inode, lblk, end);
968 write_unlock(&EXT4_I(inode)->i_es_lock);
969 ext4_es_print_tree(inode);
970 return err;
971 }
972
973 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
974 struct ext4_inode_info *locked_ei)
975 {
976 struct ext4_inode_info *ei;
977 struct ext4_es_stats *es_stats;
978 ktime_t start_time;
979 u64 scan_time;
980 int nr_to_walk;
981 int nr_shrunk = 0;
982 int retried = 0, nr_skipped = 0;
983
984 es_stats = &sbi->s_es_stats;
985 start_time = ktime_get();
986
987 retry:
988 spin_lock(&sbi->s_es_lock);
989 nr_to_walk = sbi->s_es_nr_inode;
990 while (nr_to_walk-- > 0) {
991 if (list_empty(&sbi->s_es_list)) {
992 spin_unlock(&sbi->s_es_lock);
993 goto out;
994 }
995 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
996 i_es_list);
997 /* Move the inode to the tail */
998 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
999
1000 /*
1001 * Normally we try hard to avoid shrinking precached inodes,
1002 * but we will as a last resort.
1003 */
1004 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
1005 EXT4_STATE_EXT_PRECACHED)) {
1006 nr_skipped++;
1007 continue;
1008 }
1009
1010 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
1011 nr_skipped++;
1012 continue;
1013 }
1014 /*
1015 * Now we hold i_es_lock which protects us from inode reclaim
1016 * freeing inode under us
1017 */
1018 spin_unlock(&sbi->s_es_lock);
1019
1020 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
1021 write_unlock(&ei->i_es_lock);
1022
1023 if (nr_to_scan <= 0)
1024 goto out;
1025 spin_lock(&sbi->s_es_lock);
1026 }
1027 spin_unlock(&sbi->s_es_lock);
1028
1029 /*
1030 * If we skipped any inodes, and we weren't able to make any
1031 * forward progress, try again to scan precached inodes.
1032 */
1033 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1034 retried++;
1035 goto retry;
1036 }
1037
1038 if (locked_ei && nr_shrunk == 0)
1039 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1040
1041 out:
1042 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1043 if (likely(es_stats->es_stats_scan_time))
1044 es_stats->es_stats_scan_time = (scan_time +
1045 es_stats->es_stats_scan_time*3) / 4;
1046 else
1047 es_stats->es_stats_scan_time = scan_time;
1048 if (scan_time > es_stats->es_stats_max_scan_time)
1049 es_stats->es_stats_max_scan_time = scan_time;
1050 if (likely(es_stats->es_stats_shrunk))
1051 es_stats->es_stats_shrunk = (nr_shrunk +
1052 es_stats->es_stats_shrunk*3) / 4;
1053 else
1054 es_stats->es_stats_shrunk = nr_shrunk;
1055
1056 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1057 nr_skipped, retried);
1058 return nr_shrunk;
1059 }
1060
1061 static unsigned long ext4_es_count(struct shrinker *shrink,
1062 struct shrink_control *sc)
1063 {
1064 unsigned long nr;
1065 struct ext4_sb_info *sbi;
1066
1067 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1068 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1069 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1070 return nr;
1071 }
1072
1073 static unsigned long ext4_es_scan(struct shrinker *shrink,
1074 struct shrink_control *sc)
1075 {
1076 struct ext4_sb_info *sbi = container_of(shrink,
1077 struct ext4_sb_info, s_es_shrinker);
1078 int nr_to_scan = sc->nr_to_scan;
1079 int ret, nr_shrunk;
1080
1081 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1082 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1083
1084 if (!nr_to_scan)
1085 return ret;
1086
1087 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1088
1089 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1090 return nr_shrunk;
1091 }
1092
1093 int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v)
1094 {
1095 struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private);
1096 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1097 struct ext4_inode_info *ei, *max = NULL;
1098 unsigned int inode_cnt = 0;
1099
1100 if (v != SEQ_START_TOKEN)
1101 return 0;
1102
1103 /* here we just find an inode that has the max nr. of objects */
1104 spin_lock(&sbi->s_es_lock);
1105 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1106 inode_cnt++;
1107 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1108 max = ei;
1109 else if (!max)
1110 max = ei;
1111 }
1112 spin_unlock(&sbi->s_es_lock);
1113
1114 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1115 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1116 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1117 seq_printf(seq, " %lu/%lu cache hits/misses\n",
1118 es_stats->es_stats_cache_hits,
1119 es_stats->es_stats_cache_misses);
1120 if (inode_cnt)
1121 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1122
1123 seq_printf(seq, "average:\n %llu us scan time\n",
1124 div_u64(es_stats->es_stats_scan_time, 1000));
1125 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1126 if (inode_cnt)
1127 seq_printf(seq,
1128 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1129 " %llu us max scan time\n",
1130 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1131 div_u64(es_stats->es_stats_max_scan_time, 1000));
1132
1133 return 0;
1134 }
1135
1136 int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1137 {
1138 int err;
1139
1140 /* Make sure we have enough bits for physical block number */
1141 BUILD_BUG_ON(ES_SHIFT < 48);
1142 INIT_LIST_HEAD(&sbi->s_es_list);
1143 sbi->s_es_nr_inode = 0;
1144 spin_lock_init(&sbi->s_es_lock);
1145 sbi->s_es_stats.es_stats_shrunk = 0;
1146 sbi->s_es_stats.es_stats_cache_hits = 0;
1147 sbi->s_es_stats.es_stats_cache_misses = 0;
1148 sbi->s_es_stats.es_stats_scan_time = 0;
1149 sbi->s_es_stats.es_stats_max_scan_time = 0;
1150 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1151 if (err)
1152 return err;
1153 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1154 if (err)
1155 goto err1;
1156
1157 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1158 sbi->s_es_shrinker.count_objects = ext4_es_count;
1159 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1160 err = register_shrinker(&sbi->s_es_shrinker);
1161 if (err)
1162 goto err2;
1163
1164 return 0;
1165
1166 err2:
1167 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1168 err1:
1169 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1170 return err;
1171 }
1172
1173 void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1174 {
1175 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1176 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1177 unregister_shrinker(&sbi->s_es_shrinker);
1178 }
1179
1180 /*
1181 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1182 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1183 *
1184 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1185 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1186 * ei->i_es_shrink_lblk to where we should continue scanning.
1187 */
1188 static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1189 int *nr_to_scan, int *nr_shrunk)
1190 {
1191 struct inode *inode = &ei->vfs_inode;
1192 struct ext4_es_tree *tree = &ei->i_es_tree;
1193 struct extent_status *es;
1194 struct rb_node *node;
1195
1196 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1197 if (!es)
1198 goto out_wrap;
1199 node = &es->rb_node;
1200 while (*nr_to_scan > 0) {
1201 if (es->es_lblk > end) {
1202 ei->i_es_shrink_lblk = end + 1;
1203 return 0;
1204 }
1205
1206 (*nr_to_scan)--;
1207 node = rb_next(&es->rb_node);
1208 /*
1209 * We can't reclaim delayed extent from status tree because
1210 * fiemap, bigallic, and seek_data/hole need to use it.
1211 */
1212 if (ext4_es_is_delayed(es))
1213 goto next;
1214 if (ext4_es_is_referenced(es)) {
1215 ext4_es_clear_referenced(es);
1216 goto next;
1217 }
1218
1219 rb_erase(&es->rb_node, &tree->root);
1220 ext4_es_free_extent(inode, es);
1221 (*nr_shrunk)++;
1222 next:
1223 if (!node)
1224 goto out_wrap;
1225 es = rb_entry(node, struct extent_status, rb_node);
1226 }
1227 ei->i_es_shrink_lblk = es->es_lblk;
1228 return 1;
1229 out_wrap:
1230 ei->i_es_shrink_lblk = 0;
1231 return 0;
1232 }
1233
1234 static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1235 {
1236 struct inode *inode = &ei->vfs_inode;
1237 int nr_shrunk = 0;
1238 ext4_lblk_t start = ei->i_es_shrink_lblk;
1239 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1240 DEFAULT_RATELIMIT_BURST);
1241
1242 if (ei->i_es_shk_nr == 0)
1243 return 0;
1244
1245 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1246 __ratelimit(&_rs))
1247 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1248
1249 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1250 start != 0)
1251 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1252
1253 ei->i_es_tree.cache_es = NULL;
1254 return nr_shrunk;
1255 }