]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ext.c
libxfs: refactor manage_zones()
[thirdparty/xfsprogs-dev.git] / repair / incore_ext.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "avl.h"
9 #include "btree.h"
10 #include "globals.h"
11 #include "incore.h"
12 #include "agheader.h"
13 #include "protos.h"
14 #include "err_protos.h"
15 #include "avl64.h"
16 #include "threads.h"
17
18 /*
19 * note: there are 4 sets of incore things handled here:
20 * block bitmaps, extent trees, uncertain inode list,
21 * and inode tree. The tree-based code uses the AVL
22 * tree package used by the IRIX kernel VM code
23 * (sys/avl.h). The inode list code uses the same records
24 * as the inode tree code for convenience. The bitmaps
25 * and bitmap operators are mostly macros defined in incore.h.
26 * There are one of everything per AG except for extent
27 * trees. There's one duplicate extent tree, one bno and
28 * one bcnt extent tree per AG. Not all of the above exist
29 * through all phases. The duplicate extent tree gets trashed
30 * at the end of phase 4. The bno/bcnt trees don't appear until
31 * phase 5. The uncertain inode list goes away at the end of
32 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
33 */
34
35 static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */
36 static pthread_mutex_t rt_ext_tree_lock;
37
38 static struct btree_root **dup_extent_trees; /* per ag dup extent trees */
39 static pthread_mutex_t *dup_extent_tree_locks;
40
41 static avltree_desc_t **extent_bno_ptrs; /*
42 * array of extent tree ptrs
43 * one per ag for free extents
44 * sorted by starting block
45 * number
46 */
47 static avltree_desc_t **extent_bcnt_ptrs; /*
48 * array of extent tree ptrs
49 * one per ag for free extents
50 * sorted by size
51 */
52
53 /*
54 * duplicate extent tree functions
55 */
56
57 void
58 release_dup_extent_tree(
59 xfs_agnumber_t agno)
60 {
61 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
62 btree_clear(dup_extent_trees[agno]);
63 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
64 }
65
66 int
67 add_dup_extent(
68 xfs_agnumber_t agno,
69 xfs_agblock_t startblock,
70 xfs_extlen_t blockcount)
71 {
72 int ret;
73 #ifdef XR_DUP_TRACE
74 fprintf(stderr, "Adding dup extent - %d/%d %d\n", agno, startblock,
75 blockcount);
76 #endif
77 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
78 ret = btree_insert(dup_extent_trees[agno], startblock,
79 (void *)(uintptr_t)(startblock + blockcount));
80 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
81 return ret;
82 }
83
84 int
85 search_dup_extent(
86 xfs_agnumber_t agno,
87 xfs_agblock_t start_agbno,
88 xfs_agblock_t end_agbno)
89 {
90 unsigned long bno;
91 int ret;
92
93 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
94 if (!btree_find(dup_extent_trees[agno], start_agbno, &bno)) {
95 ret = 0;
96 goto out; /* this really shouldn't happen */
97 }
98 if (bno < end_agbno) {
99 ret = 1;
100 goto out;
101 }
102 ret = (uintptr_t)btree_peek_prev(dup_extent_trees[agno], NULL) >
103 start_agbno;
104 out:
105 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
106 return ret;
107 }
108
109
110 /*
111 * extent tree stuff is avl trees of duplicate extents,
112 * sorted in order by block number. there is one tree per ag.
113 */
114
115 static extent_tree_node_t *
116 mk_extent_tree_nodes(xfs_agblock_t new_startblock,
117 xfs_extlen_t new_blockcount, extent_state_t new_state)
118 {
119 extent_tree_node_t *new;
120
121 new = malloc(sizeof(*new));
122 if (!new)
123 do_error(_("couldn't allocate new extent descriptor.\n"));
124
125 new->avl_node.avl_nextino = NULL;
126 new->ex_startblock = new_startblock;
127 new->ex_blockcount = new_blockcount;
128 new->ex_state = new_state;
129 new->next = NULL;
130 new->last = NULL;
131
132 return new;
133 }
134
135 void
136 release_extent_tree_node(extent_tree_node_t *node)
137 {
138 free(node);
139 }
140
141 /*
142 * routines to recycle all nodes in a tree. it walks the tree
143 * and puts all nodes back on the free list so the nodes can be
144 * reused. the duplicate and bno/bcnt extent trees for each AG
145 * are recycled after they're no longer needed to save memory
146 */
147 static void
148 release_extent_tree(avltree_desc_t *tree)
149 {
150 extent_tree_node_t *ext;
151 extent_tree_node_t *tmp;
152 extent_tree_node_t *lext;
153 extent_tree_node_t *ltmp;
154
155 if (tree->avl_firstino == NULL)
156 return;
157
158 ext = (extent_tree_node_t *) tree->avl_firstino;
159
160 while (ext != NULL) {
161 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
162
163 /*
164 * ext->next is guaranteed to be set only in bcnt trees
165 */
166 if (ext->next != NULL) {
167 lext = ext->next;
168 while (lext != NULL) {
169 ltmp = lext->next;
170 release_extent_tree_node(lext);
171 lext = ltmp;
172 }
173 }
174
175 release_extent_tree_node(ext);
176 ext = tmp;
177 }
178
179 tree->avl_root = tree->avl_firstino = NULL;
180
181 return;
182 }
183
184 /*
185 * top-level (visible) routines
186 */
187 void
188 release_agbno_extent_tree(xfs_agnumber_t agno)
189 {
190 release_extent_tree(extent_bno_ptrs[agno]);
191
192 return;
193 }
194
195 void
196 release_agbcnt_extent_tree(xfs_agnumber_t agno)
197 {
198 release_extent_tree(extent_bcnt_ptrs[agno]);
199
200 return;
201 }
202
203 /*
204 * the next 4 routines manage the trees of free extents -- 2 trees
205 * per AG. The first tree is sorted by block number. The second
206 * tree is sorted by extent size. This is the bno tree.
207 */
208 void
209 add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
210 xfs_extlen_t blockcount)
211 {
212 extent_tree_node_t *ext;
213
214 ASSERT(extent_bno_ptrs != NULL);
215 ASSERT(extent_bno_ptrs[agno] != NULL);
216
217 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
218
219 if (avl_insert(extent_bno_ptrs[agno], (avlnode_t *) ext) == NULL) {
220 do_error(_("duplicate bno extent range\n"));
221 }
222 }
223
224 extent_tree_node_t *
225 findfirst_bno_extent(xfs_agnumber_t agno)
226 {
227 ASSERT(extent_bno_ptrs != NULL);
228 ASSERT(extent_bno_ptrs[agno] != NULL);
229
230 return((extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino);
231 }
232
233 extent_tree_node_t *
234 find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock)
235 {
236 ASSERT(extent_bno_ptrs != NULL);
237 ASSERT(extent_bno_ptrs[agno] != NULL);
238
239 return((extent_tree_node_t *) avl_find(extent_bno_ptrs[agno],
240 startblock));
241 }
242
243 /*
244 * delete a node that's in the tree (pointer obtained by a find routine)
245 */
246 void
247 get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
248 {
249 ASSERT(extent_bno_ptrs != NULL);
250 ASSERT(extent_bno_ptrs[agno] != NULL);
251
252 avl_delete(extent_bno_ptrs[agno], &ext->avl_node);
253
254 return;
255 }
256
257 /*
258 * the next 4 routines manage the trees of free extents -- 2 trees
259 * per AG. The first tree is sorted by block number. The second
260 * tree is sorted by extent size. This is the bcnt tree.
261 */
262 void
263 add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
264 xfs_extlen_t blockcount)
265 {
266 extent_tree_node_t *ext, *prev, *current, *top;
267 xfs_agblock_t tmp_startblock;
268 xfs_extlen_t tmp_blockcount;
269 extent_state_t tmp_state;
270
271 ASSERT(extent_bcnt_ptrs != NULL);
272 ASSERT(extent_bcnt_ptrs[agno] != NULL);
273
274 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
275
276 ASSERT(ext->next == NULL);
277
278 #ifdef XR_BCNT_TRACE
279 fprintf(stderr, "adding bcnt: agno = %d, start = %u, count = %u\n",
280 agno, startblock, blockcount);
281 #endif
282 if ((current = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
283 blockcount)) != NULL) {
284 /*
285 * avl tree code doesn't handle dups so insert
286 * onto linked list in increasing startblock order
287 *
288 * when called from mk_incore_fstree,
289 * startblock is in increasing order.
290 * current is an "anchor" node.
291 * quick check if the new ext goes to the end.
292 * if so, append at the end, using the last field
293 * of the "anchor".
294 */
295 ASSERT(current->last != NULL);
296 if (startblock > current->last->ex_startblock) {
297 current->last->next = ext;
298 current->last = ext;
299 return;
300 }
301
302 /*
303 * scan, to find the proper location for new entry.
304 * this scan is *very* expensive and gets worse with
305 * with increasing entries.
306 */
307 top = prev = current;
308 while (current != NULL &&
309 startblock > current->ex_startblock) {
310 prev = current;
311 current = current->next;
312 }
313
314 if (top == current) {
315 ASSERT(top == prev);
316 /*
317 * new entry should be ahead of current.
318 * to keep the avl tree intact,
319 * swap the values of to-be-inserted element
320 * and the values of the head of the list.
321 * then insert as the 2nd element on the list.
322 *
323 * see the comment in get_bcnt_extent()
324 * as to why we have to do this.
325 */
326 tmp_startblock = top->ex_startblock;
327 tmp_blockcount = top->ex_blockcount;
328 tmp_state = top->ex_state;
329
330 top->ex_startblock = ext->ex_startblock;
331 top->ex_blockcount = ext->ex_blockcount;
332 top->ex_state = ext->ex_state;
333
334 ext->ex_startblock = tmp_startblock;
335 ext->ex_blockcount = tmp_blockcount;
336 ext->ex_state = tmp_state;
337
338 current = top->next;
339 prev = top;
340 }
341
342 prev->next = ext;
343 ext->next = current;
344
345 return;
346 }
347
348 if (avl_insert(extent_bcnt_ptrs[agno], (avlnode_t *) ext) == NULL) {
349 do_error(_(": duplicate bno extent range\n"));
350 }
351
352 ext->last = ext; /* ext is an "anchor" node */
353
354 return;
355 }
356
357 extent_tree_node_t *
358 findfirst_bcnt_extent(xfs_agnumber_t agno)
359 {
360 ASSERT(extent_bcnt_ptrs != NULL);
361 ASSERT(extent_bcnt_ptrs[agno] != NULL);
362
363 return((extent_tree_node_t *) extent_bcnt_ptrs[agno]->avl_firstino);
364 }
365
366 extent_tree_node_t *
367 findbiggest_bcnt_extent(xfs_agnumber_t agno)
368 {
369 ASSERT(extent_bcnt_ptrs != NULL);
370 ASSERT(extent_bcnt_ptrs[agno] != NULL);
371
372 return((extent_tree_node_t *) avl_lastino(extent_bcnt_ptrs[agno]->avl_root));
373 }
374
375 extent_tree_node_t *
376 findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
377 {
378 avlnode_t *nextino;
379
380 if (ext->next != NULL) {
381 ASSERT(ext->ex_blockcount == ext->next->ex_blockcount);
382 ASSERT(ext->ex_startblock < ext->next->ex_startblock);
383 return(ext->next);
384 } else {
385 /*
386 * have to look at the top of the list to get the
387 * correct avl_nextino pointer since that pointer
388 * is maintained and altered by the AVL code.
389 */
390 nextino = avl_find(extent_bcnt_ptrs[agno], ext->ex_blockcount);
391 ASSERT(nextino != NULL);
392 if (nextino->avl_nextino != NULL) {
393 ASSERT(ext->ex_blockcount < ((extent_tree_node_t *)
394 nextino->avl_nextino)->ex_blockcount);
395 }
396 return((extent_tree_node_t *) nextino->avl_nextino);
397 }
398 }
399
400 /*
401 * this is meant to be called after you walk the bno tree to
402 * determine exactly which extent you want (so you'll know the
403 * desired value for startblock when you call this routine).
404 */
405 extent_tree_node_t *
406 get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
407 xfs_extlen_t blockcount)
408 {
409 extent_tree_node_t *ext, *prev, *top;
410 xfs_agblock_t tmp_startblock;
411 xfs_extlen_t tmp_blockcount;
412 extent_state_t tmp_state;
413
414 prev = NULL;
415 ASSERT(extent_bcnt_ptrs != NULL);
416 ASSERT(extent_bcnt_ptrs[agno] != NULL);
417
418 if ((ext = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
419 blockcount)) == NULL)
420 return(NULL);
421
422 top = ext;
423
424 if (ext->next != NULL) {
425 /*
426 * pull it off the list
427 */
428 while (ext != NULL && startblock != ext->ex_startblock) {
429 prev = ext;
430 ext = ext->next;
431 }
432 ASSERT(ext != NULL);
433 if (ext == top) {
434 /*
435 * this node is linked into the tree so we
436 * swap the core values so we can delete
437 * the next item on the list instead of
438 * the head of the list. This is because
439 * the rest of the tree undoubtedly has
440 * pointers to the piece of memory that
441 * is the head of the list so pulling
442 * the item out of the list and hence
443 * the avl tree would be a bad idea.
444 *
445 * (cheaper than the alternative, a tree
446 * delete of this node followed by a tree
447 * insert of the next node on the list).
448 */
449 tmp_startblock = ext->next->ex_startblock;
450 tmp_blockcount = ext->next->ex_blockcount;
451 tmp_state = ext->next->ex_state;
452
453 ext->next->ex_startblock = ext->ex_startblock;
454 ext->next->ex_blockcount = ext->ex_blockcount;
455 ext->next->ex_state = ext->ex_state;
456
457 ext->ex_startblock = tmp_startblock;
458 ext->ex_blockcount = tmp_blockcount;
459 ext->ex_state = tmp_state;
460
461 ext = ext->next;
462 prev = top;
463 }
464 /*
465 * now, a simple list deletion
466 */
467 prev->next = ext->next;
468 ext->next = NULL;
469 } else {
470 /*
471 * no list, just one node. simply delete
472 */
473 avl_delete(extent_bcnt_ptrs[agno], &ext->avl_node);
474 }
475
476 ASSERT(ext->ex_startblock == startblock);
477 ASSERT(ext->ex_blockcount == blockcount);
478 return(ext);
479 }
480
481 static uintptr_t
482 avl_ext_start(avlnode_t *node)
483 {
484 return((uintptr_t)
485 ((extent_tree_node_t *) node)->ex_startblock);
486 }
487
488 static uintptr_t
489 avl_ext_end(avlnode_t *node)
490 {
491 return((uintptr_t) (
492 ((extent_tree_node_t *) node)->ex_startblock +
493 ((extent_tree_node_t *) node)->ex_blockcount));
494 }
495
496 /*
497 * convert size to an address for the AVL tree code -- the bigger the size,
498 * the lower the address so the biggest extent will be first in the tree
499 */
500 static uintptr_t
501 avl_ext_bcnt_start(avlnode_t *node)
502 {
503 /*
504 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
505 node)->ex_blockcount)));
506 */
507 return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
508 }
509
510 static uintptr_t
511 avl_ext_bcnt_end(avlnode_t *node)
512 {
513 /*
514 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
515 node)->ex_blockcount)));
516 */
517 return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
518 }
519
520 static avlops_t avl_extent_bcnt_tree_ops = {
521 avl_ext_bcnt_start,
522 avl_ext_bcnt_end
523 };
524
525 static avlops_t avl_extent_tree_ops = {
526 avl_ext_start,
527 avl_ext_end
528 };
529
530 /*
531 * for real-time extents -- have to dup code since realtime extent
532 * startblocks can be 64-bit values.
533 */
534 static rt_extent_tree_node_t *
535 mk_rt_extent_tree_nodes(xfs_rtblock_t new_startblock,
536 xfs_extlen_t new_blockcount, extent_state_t new_state)
537 {
538 rt_extent_tree_node_t *new;
539
540 new = malloc(sizeof(*new));
541 if (!new)
542 do_error(_("couldn't allocate new extent descriptor.\n"));
543
544 new->avl_node.avl_nextino = NULL;
545 new->rt_startblock = new_startblock;
546 new->rt_blockcount = new_blockcount;
547 new->rt_state = new_state;
548 return new;
549 }
550
551 #if 0
552 void
553 release_rt_extent_tree_node(rt_extent_tree_node_t *node)
554 {
555 free(node);
556 }
557
558 void
559 release_rt_extent_tree()
560 {
561 extent_tree_node_t *ext;
562 extent_tree_node_t *tmp;
563 extent_tree_node_t *lext;
564 extent_tree_node_t *ltmp;
565 avl64tree_desc_t *tree;
566
567 tree = rt_extent_tree_ptr;
568
569 if (tree->avl_firstino == NULL)
570 return;
571
572 ext = (extent_tree_node_t *) tree->avl_firstino;
573
574 while (ext != NULL) {
575 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
576 release_rt_extent_tree_node(ext);
577 ext = tmp;
578 }
579
580 tree->avl_root = tree->avl_firstino = NULL;
581
582 return;
583 }
584 #endif
585
586 /*
587 * don't need release functions for realtime tree teardown
588 * since we only have one tree, not one per AG
589 */
590 /* ARGSUSED */
591 void
592 free_rt_dup_extent_tree(xfs_mount_t *mp)
593 {
594 ASSERT(mp->m_sb.sb_rblocks != 0);
595 free(rt_ext_tree_ptr);
596 rt_ext_tree_ptr = NULL;
597 }
598
599 /*
600 * add a duplicate real-time extent
601 */
602 void
603 add_rt_dup_extent(xfs_rtblock_t startblock, xfs_extlen_t blockcount)
604 {
605 rt_extent_tree_node_t *first, *last, *ext, *next_ext;
606 xfs_rtblock_t new_startblock;
607 xfs_extlen_t new_blockcount;
608
609 pthread_mutex_lock(&rt_ext_tree_lock);
610 avl64_findranges(rt_ext_tree_ptr, startblock - 1,
611 startblock + blockcount + 1,
612 (avl64node_t **) &first, (avl64node_t **) &last);
613 /*
614 * find adjacent and overlapping extent blocks
615 */
616 if (first == NULL && last == NULL) {
617 /* nothing, just make and insert new extent */
618
619 ext = mk_rt_extent_tree_nodes(startblock,
620 blockcount, XR_E_MULT);
621
622 if (avl64_insert(rt_ext_tree_ptr,
623 (avl64node_t *) ext) == NULL) {
624 do_error(_("duplicate extent range\n"));
625 }
626
627 pthread_mutex_unlock(&rt_ext_tree_lock);
628 return;
629 }
630
631 ASSERT(first != NULL && last != NULL);
632
633 /*
634 * find the new composite range, delete old extent nodes
635 * as we go
636 */
637 new_startblock = startblock;
638 new_blockcount = blockcount;
639
640 for (ext = first;
641 ext != (rt_extent_tree_node_t *) last->avl_node.avl_nextino;
642 ext = next_ext) {
643 /*
644 * preserve the next inorder node
645 */
646 next_ext = (rt_extent_tree_node_t *) ext->avl_node.avl_nextino;
647 /*
648 * just bail if the new extent is contained within an old one
649 */
650 if (ext->rt_startblock <= startblock &&
651 ext->rt_blockcount >= blockcount) {
652 pthread_mutex_unlock(&rt_ext_tree_lock);
653 return;
654 }
655 /*
656 * now check for overlaps and adjacent extents
657 */
658 if (ext->rt_startblock + ext->rt_blockcount >= startblock
659 || ext->rt_startblock <= startblock + blockcount) {
660
661 if (ext->rt_startblock < new_startblock)
662 new_startblock = ext->rt_startblock;
663
664 if (ext->rt_startblock + ext->rt_blockcount >
665 new_startblock + new_blockcount)
666 new_blockcount = ext->rt_startblock +
667 ext->rt_blockcount -
668 new_startblock;
669
670 avl64_delete(rt_ext_tree_ptr, (avl64node_t *) ext);
671 continue;
672 }
673 }
674
675 ext = mk_rt_extent_tree_nodes(new_startblock,
676 new_blockcount, XR_E_MULT);
677
678 if (avl64_insert(rt_ext_tree_ptr, (avl64node_t *) ext) == NULL) {
679 do_error(_("duplicate extent range\n"));
680 }
681
682 pthread_mutex_unlock(&rt_ext_tree_lock);
683 return;
684 }
685
686 /*
687 * returns 1 if block is a dup, 0 if not
688 */
689 /* ARGSUSED */
690 int
691 search_rt_dup_extent(xfs_mount_t *mp, xfs_rtblock_t bno)
692 {
693 int ret;
694
695 pthread_mutex_lock(&rt_ext_tree_lock);
696 if (avl64_findrange(rt_ext_tree_ptr, bno) != NULL)
697 ret = 1;
698 else
699 ret = 0;
700 pthread_mutex_unlock(&rt_ext_tree_lock);
701 return(ret);
702 }
703
704 static uint64_t
705 avl64_rt_ext_start(avl64node_t *node)
706 {
707 return(((rt_extent_tree_node_t *) node)->rt_startblock);
708 }
709
710 static uint64_t
711 avl64_ext_end(avl64node_t *node)
712 {
713 return(((rt_extent_tree_node_t *) node)->rt_startblock +
714 ((rt_extent_tree_node_t *) node)->rt_blockcount);
715 }
716
717 static avl64ops_t avl64_extent_tree_ops = {
718 avl64_rt_ext_start,
719 avl64_ext_end
720 };
721
722 void
723 incore_ext_init(xfs_mount_t *mp)
724 {
725 int i;
726 xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
727
728 pthread_mutex_init(&rt_ext_tree_lock, NULL);
729
730 dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
731 if (!dup_extent_trees)
732 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
733
734 dup_extent_tree_locks = calloc(agcount, sizeof(pthread_mutex_t));
735 if (!dup_extent_tree_locks)
736 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
737
738 if ((extent_bno_ptrs = malloc(agcount *
739 sizeof(avltree_desc_t *))) == NULL)
740 do_error(
741 _("couldn't malloc free by-bno extent tree descriptor table\n"));
742
743 if ((extent_bcnt_ptrs = malloc(agcount *
744 sizeof(avltree_desc_t *))) == NULL)
745 do_error(
746 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
747
748 for (i = 0; i < agcount; i++) {
749 if ((extent_bno_ptrs[i] =
750 malloc(sizeof(avltree_desc_t))) == NULL)
751 do_error(
752 _("couldn't malloc bno extent tree descriptor\n"));
753 if ((extent_bcnt_ptrs[i] =
754 malloc(sizeof(avltree_desc_t))) == NULL)
755 do_error(
756 _("couldn't malloc bcnt extent tree descriptor\n"));
757 }
758
759 for (i = 0; i < agcount; i++) {
760 btree_init(&dup_extent_trees[i]);
761 pthread_mutex_init(&dup_extent_tree_locks[i], NULL);
762 avl_init_tree(extent_bno_ptrs[i], &avl_extent_tree_ops);
763 avl_init_tree(extent_bcnt_ptrs[i], &avl_extent_bcnt_tree_ops);
764 }
765
766 if ((rt_ext_tree_ptr = malloc(sizeof(avl64tree_desc_t))) == NULL)
767 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
768
769 avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
770 }
771
772 /*
773 * this routine actually frees all the memory used to track per-AG trees
774 */
775 void
776 incore_ext_teardown(xfs_mount_t *mp)
777 {
778 xfs_agnumber_t i;
779
780 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
781 btree_destroy(dup_extent_trees[i]);
782 free(extent_bno_ptrs[i]);
783 free(extent_bcnt_ptrs[i]);
784 }
785
786 free(dup_extent_trees);
787 free(extent_bcnt_ptrs);
788 free(extent_bno_ptrs);
789
790 dup_extent_trees = NULL;
791 extent_bcnt_ptrs = NULL;
792 extent_bno_ptrs = NULL;
793 }
794
795 static int
796 count_extents(xfs_agnumber_t agno, avltree_desc_t *tree, int whichtree)
797 {
798 extent_tree_node_t *node;
799 int i = 0;
800
801 node = (extent_tree_node_t *) tree->avl_firstino;
802
803 while (node != NULL) {
804 i++;
805 if (whichtree)
806 node = findnext_bcnt_extent(agno, node);
807 else
808 node = findnext_bno_extent(node);
809 }
810
811 return(i);
812 }
813
814 int
815 count_bno_extents_blocks(xfs_agnumber_t agno, uint *numblocks)
816 {
817 uint64_t nblocks;
818 extent_tree_node_t *node;
819 int i = 0;
820
821 ASSERT(agno < glob_agcount);
822
823 nblocks = 0;
824
825 node = (extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino;
826
827 while (node != NULL) {
828 nblocks += node->ex_blockcount;
829 i++;
830 node = findnext_bno_extent(node);
831 }
832
833 *numblocks = nblocks;
834 return(i);
835 }
836
837 int
838 count_bno_extents(xfs_agnumber_t agno)
839 {
840 ASSERT(agno < glob_agcount);
841 return(count_extents(agno, extent_bno_ptrs[agno], 0));
842 }
843
844 int
845 count_bcnt_extents(xfs_agnumber_t agno)
846 {
847 ASSERT(agno < glob_agcount);
848 return(count_extents(agno, extent_bcnt_ptrs[agno], 1));
849 }