]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ext.c
xfsprogs: remove double-underscore integer types
[thirdparty/xfsprogs-dev.git] / repair / incore_ext.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libxfs.h"
20 #include "avl.h"
21 #include "btree.h"
22 #include "globals.h"
23 #include "incore.h"
24 #include "agheader.h"
25 #include "protos.h"
26 #include "err_protos.h"
27 #include "avl64.h"
28 #include "threads.h"
29
30 /*
31 * note: there are 4 sets of incore things handled here:
32 * block bitmaps, extent trees, uncertain inode list,
33 * and inode tree. The tree-based code uses the AVL
34 * tree package used by the IRIX kernel VM code
35 * (sys/avl.h). The inode list code uses the same records
36 * as the inode tree code for convenience. The bitmaps
37 * and bitmap operators are mostly macros defined in incore.h.
38 * There are one of everything per AG except for extent
39 * trees. There's one duplicate extent tree, one bno and
40 * one bcnt extent tree per AG. Not all of the above exist
41 * through all phases. The duplicate extent tree gets trashed
42 * at the end of phase 4. The bno/bcnt trees don't appear until
43 * phase 5. The uncertain inode list goes away at the end of
44 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
45 */
46
47 static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */
48 static pthread_mutex_t rt_ext_tree_lock;
49
50 static struct btree_root **dup_extent_trees; /* per ag dup extent trees */
51 static pthread_mutex_t *dup_extent_tree_locks;
52
53 static avltree_desc_t **extent_bno_ptrs; /*
54 * array of extent tree ptrs
55 * one per ag for free extents
56 * sorted by starting block
57 * number
58 */
59 static avltree_desc_t **extent_bcnt_ptrs; /*
60 * array of extent tree ptrs
61 * one per ag for free extents
62 * sorted by size
63 */
64
65 /*
66 * duplicate extent tree functions
67 */
68
69 void
70 release_dup_extent_tree(
71 xfs_agnumber_t agno)
72 {
73 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
74 btree_clear(dup_extent_trees[agno]);
75 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
76 }
77
78 int
79 add_dup_extent(
80 xfs_agnumber_t agno,
81 xfs_agblock_t startblock,
82 xfs_extlen_t blockcount)
83 {
84 int ret;
85 #ifdef XR_DUP_TRACE
86 fprintf(stderr, "Adding dup extent - %d/%d %d\n", agno, startblock,
87 blockcount);
88 #endif
89 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
90 ret = btree_insert(dup_extent_trees[agno], startblock,
91 (void *)(uintptr_t)(startblock + blockcount));
92 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
93 return ret;
94 }
95
96 int
97 search_dup_extent(
98 xfs_agnumber_t agno,
99 xfs_agblock_t start_agbno,
100 xfs_agblock_t end_agbno)
101 {
102 unsigned long bno;
103 int ret;
104
105 pthread_mutex_lock(&dup_extent_tree_locks[agno]);
106 if (!btree_find(dup_extent_trees[agno], start_agbno, &bno)) {
107 ret = 0;
108 goto out; /* this really shouldn't happen */
109 }
110 if (bno < end_agbno) {
111 ret = 1;
112 goto out;
113 }
114 ret = (uintptr_t)btree_peek_prev(dup_extent_trees[agno], NULL) >
115 start_agbno;
116 out:
117 pthread_mutex_unlock(&dup_extent_tree_locks[agno]);
118 return ret;
119 }
120
121
122 /*
123 * extent tree stuff is avl trees of duplicate extents,
124 * sorted in order by block number. there is one tree per ag.
125 */
126
127 static extent_tree_node_t *
128 mk_extent_tree_nodes(xfs_agblock_t new_startblock,
129 xfs_extlen_t new_blockcount, extent_state_t new_state)
130 {
131 extent_tree_node_t *new;
132
133 new = malloc(sizeof(*new));
134 if (!new)
135 do_error(_("couldn't allocate new extent descriptor.\n"));
136
137 new->avl_node.avl_nextino = NULL;
138 new->ex_startblock = new_startblock;
139 new->ex_blockcount = new_blockcount;
140 new->ex_state = new_state;
141 new->next = NULL;
142 new->last = NULL;
143
144 return new;
145 }
146
147 void
148 release_extent_tree_node(extent_tree_node_t *node)
149 {
150 free(node);
151 }
152
153 /*
154 * routines to recycle all nodes in a tree. it walks the tree
155 * and puts all nodes back on the free list so the nodes can be
156 * reused. the duplicate and bno/bcnt extent trees for each AG
157 * are recycled after they're no longer needed to save memory
158 */
159 void
160 release_extent_tree(avltree_desc_t *tree)
161 {
162 extent_tree_node_t *ext;
163 extent_tree_node_t *tmp;
164 extent_tree_node_t *lext;
165 extent_tree_node_t *ltmp;
166
167 if (tree->avl_firstino == NULL)
168 return;
169
170 ext = (extent_tree_node_t *) tree->avl_firstino;
171
172 while (ext != NULL) {
173 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
174
175 /*
176 * ext->next is guaranteed to be set only in bcnt trees
177 */
178 if (ext->next != NULL) {
179 lext = ext->next;
180 while (lext != NULL) {
181 ltmp = lext->next;
182 release_extent_tree_node(lext);
183 lext = ltmp;
184 }
185 }
186
187 release_extent_tree_node(ext);
188 ext = tmp;
189 }
190
191 tree->avl_root = tree->avl_firstino = NULL;
192
193 return;
194 }
195
196 /*
197 * top-level (visible) routines
198 */
199 void
200 release_agbno_extent_tree(xfs_agnumber_t agno)
201 {
202 release_extent_tree(extent_bno_ptrs[agno]);
203
204 return;
205 }
206
207 void
208 release_agbcnt_extent_tree(xfs_agnumber_t agno)
209 {
210 release_extent_tree(extent_bcnt_ptrs[agno]);
211
212 return;
213 }
214
215 /*
216 * the next 4 routines manage the trees of free extents -- 2 trees
217 * per AG. The first tree is sorted by block number. The second
218 * tree is sorted by extent size. This is the bno tree.
219 */
220 void
221 add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
222 xfs_extlen_t blockcount)
223 {
224 extent_tree_node_t *ext;
225
226 ASSERT(extent_bno_ptrs != NULL);
227 ASSERT(extent_bno_ptrs[agno] != NULL);
228
229 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
230
231 if (avl_insert(extent_bno_ptrs[agno], (avlnode_t *) ext) == NULL) {
232 do_error(_("duplicate bno extent range\n"));
233 }
234 }
235
236 extent_tree_node_t *
237 findfirst_bno_extent(xfs_agnumber_t agno)
238 {
239 ASSERT(extent_bno_ptrs != NULL);
240 ASSERT(extent_bno_ptrs[agno] != NULL);
241
242 return((extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino);
243 }
244
245 extent_tree_node_t *
246 find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock)
247 {
248 ASSERT(extent_bno_ptrs != NULL);
249 ASSERT(extent_bno_ptrs[agno] != NULL);
250
251 return((extent_tree_node_t *) avl_find(extent_bno_ptrs[agno],
252 startblock));
253 }
254
255 /*
256 * delete a node that's in the tree (pointer obtained by a find routine)
257 */
258 void
259 get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
260 {
261 ASSERT(extent_bno_ptrs != NULL);
262 ASSERT(extent_bno_ptrs[agno] != NULL);
263
264 avl_delete(extent_bno_ptrs[agno], &ext->avl_node);
265
266 return;
267 }
268
269 /*
270 * normalizing constant for bcnt size -> address conversion (see avl ops)
271 * used by the AVL tree code to convert sizes and must be used when
272 * doing an AVL search in the tree (e.g. avl_findrange(s))
273 */
274 #define MAXBCNT 0xFFFFFFFF
275 #define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
276
277 /*
278 * the next 4 routines manage the trees of free extents -- 2 trees
279 * per AG. The first tree is sorted by block number. The second
280 * tree is sorted by extent size. This is the bcnt tree.
281 */
282 void
283 add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
284 xfs_extlen_t blockcount)
285 {
286 extent_tree_node_t *ext, *prev, *current, *top;
287 xfs_agblock_t tmp_startblock;
288 xfs_extlen_t tmp_blockcount;
289 extent_state_t tmp_state;
290
291 ASSERT(extent_bcnt_ptrs != NULL);
292 ASSERT(extent_bcnt_ptrs[agno] != NULL);
293
294 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
295
296 ASSERT(ext->next == NULL);
297
298 #ifdef XR_BCNT_TRACE
299 fprintf(stderr, "adding bcnt: agno = %d, start = %u, count = %u\n",
300 agno, startblock, blockcount);
301 #endif
302 if ((current = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
303 blockcount)) != NULL) {
304 /*
305 * avl tree code doesn't handle dups so insert
306 * onto linked list in increasing startblock order
307 *
308 * when called from mk_incore_fstree,
309 * startblock is in increasing order.
310 * current is an "anchor" node.
311 * quick check if the new ext goes to the end.
312 * if so, append at the end, using the last field
313 * of the "anchor".
314 */
315 ASSERT(current->last != NULL);
316 if (startblock > current->last->ex_startblock) {
317 current->last->next = ext;
318 current->last = ext;
319 return;
320 }
321
322 /*
323 * scan, to find the proper location for new entry.
324 * this scan is *very* expensive and gets worse with
325 * with increasing entries.
326 */
327 top = prev = current;
328 while (current != NULL &&
329 startblock > current->ex_startblock) {
330 prev = current;
331 current = current->next;
332 }
333
334 if (top == current) {
335 ASSERT(top == prev);
336 /*
337 * new entry should be ahead of current.
338 * to keep the avl tree intact,
339 * swap the values of to-be-inserted element
340 * and the values of the head of the list.
341 * then insert as the 2nd element on the list.
342 *
343 * see the comment in get_bcnt_extent()
344 * as to why we have to do this.
345 */
346 tmp_startblock = top->ex_startblock;
347 tmp_blockcount = top->ex_blockcount;
348 tmp_state = top->ex_state;
349
350 top->ex_startblock = ext->ex_startblock;
351 top->ex_blockcount = ext->ex_blockcount;
352 top->ex_state = ext->ex_state;
353
354 ext->ex_startblock = tmp_startblock;
355 ext->ex_blockcount = tmp_blockcount;
356 ext->ex_state = tmp_state;
357
358 current = top->next;
359 prev = top;
360 }
361
362 prev->next = ext;
363 ext->next = current;
364
365 return;
366 }
367
368 if (avl_insert(extent_bcnt_ptrs[agno], (avlnode_t *) ext) == NULL) {
369 do_error(_(": duplicate bno extent range\n"));
370 }
371
372 ext->last = ext; /* ext is an "anchor" node */
373
374 return;
375 }
376
377 extent_tree_node_t *
378 findfirst_bcnt_extent(xfs_agnumber_t agno)
379 {
380 ASSERT(extent_bcnt_ptrs != NULL);
381 ASSERT(extent_bcnt_ptrs[agno] != NULL);
382
383 return((extent_tree_node_t *) extent_bcnt_ptrs[agno]->avl_firstino);
384 }
385
386 extent_tree_node_t *
387 findbiggest_bcnt_extent(xfs_agnumber_t agno)
388 {
389 ASSERT(extent_bcnt_ptrs != NULL);
390 ASSERT(extent_bcnt_ptrs[agno] != NULL);
391
392 return((extent_tree_node_t *) avl_lastino(extent_bcnt_ptrs[agno]->avl_root));
393 }
394
395 extent_tree_node_t *
396 findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
397 {
398 avlnode_t *nextino;
399
400 if (ext->next != NULL) {
401 ASSERT(ext->ex_blockcount == ext->next->ex_blockcount);
402 ASSERT(ext->ex_startblock < ext->next->ex_startblock);
403 return(ext->next);
404 } else {
405 /*
406 * have to look at the top of the list to get the
407 * correct avl_nextino pointer since that pointer
408 * is maintained and altered by the AVL code.
409 */
410 nextino = avl_find(extent_bcnt_ptrs[agno], ext->ex_blockcount);
411 ASSERT(nextino != NULL);
412 if (nextino->avl_nextino != NULL) {
413 ASSERT(ext->ex_blockcount < ((extent_tree_node_t *)
414 nextino->avl_nextino)->ex_blockcount);
415 }
416 return((extent_tree_node_t *) nextino->avl_nextino);
417 }
418 }
419
420 /*
421 * this is meant to be called after you walk the bno tree to
422 * determine exactly which extent you want (so you'll know the
423 * desired value for startblock when you call this routine).
424 */
425 extent_tree_node_t *
426 get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
427 xfs_extlen_t blockcount)
428 {
429 extent_tree_node_t *ext, *prev, *top;
430 xfs_agblock_t tmp_startblock;
431 xfs_extlen_t tmp_blockcount;
432 extent_state_t tmp_state;
433
434 prev = NULL;
435 ASSERT(extent_bcnt_ptrs != NULL);
436 ASSERT(extent_bcnt_ptrs[agno] != NULL);
437
438 if ((ext = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
439 blockcount)) == NULL)
440 return(NULL);
441
442 top = ext;
443
444 if (ext->next != NULL) {
445 /*
446 * pull it off the list
447 */
448 while (ext != NULL && startblock != ext->ex_startblock) {
449 prev = ext;
450 ext = ext->next;
451 }
452 ASSERT(ext != NULL);
453 if (ext == top) {
454 /*
455 * this node is linked into the tree so we
456 * swap the core values so we can delete
457 * the next item on the list instead of
458 * the head of the list. This is because
459 * the rest of the tree undoubtedly has
460 * pointers to the piece of memory that
461 * is the head of the list so pulling
462 * the item out of the list and hence
463 * the avl tree would be a bad idea.
464 *
465 * (cheaper than the alternative, a tree
466 * delete of this node followed by a tree
467 * insert of the next node on the list).
468 */
469 tmp_startblock = ext->next->ex_startblock;
470 tmp_blockcount = ext->next->ex_blockcount;
471 tmp_state = ext->next->ex_state;
472
473 ext->next->ex_startblock = ext->ex_startblock;
474 ext->next->ex_blockcount = ext->ex_blockcount;
475 ext->next->ex_state = ext->ex_state;
476
477 ext->ex_startblock = tmp_startblock;
478 ext->ex_blockcount = tmp_blockcount;
479 ext->ex_state = tmp_state;
480
481 ext = ext->next;
482 prev = top;
483 }
484 /*
485 * now, a simple list deletion
486 */
487 prev->next = ext->next;
488 ext->next = NULL;
489 } else {
490 /*
491 * no list, just one node. simply delete
492 */
493 avl_delete(extent_bcnt_ptrs[agno], &ext->avl_node);
494 }
495
496 ASSERT(ext->ex_startblock == startblock);
497 ASSERT(ext->ex_blockcount == blockcount);
498 return(ext);
499 }
500
501 static uintptr_t
502 avl_ext_start(avlnode_t *node)
503 {
504 return((uintptr_t)
505 ((extent_tree_node_t *) node)->ex_startblock);
506 }
507
508 static uintptr_t
509 avl_ext_end(avlnode_t *node)
510 {
511 return((uintptr_t) (
512 ((extent_tree_node_t *) node)->ex_startblock +
513 ((extent_tree_node_t *) node)->ex_blockcount));
514 }
515
516 /*
517 * convert size to an address for the AVL tree code -- the bigger the size,
518 * the lower the address so the biggest extent will be first in the tree
519 */
520 static uintptr_t
521 avl_ext_bcnt_start(avlnode_t *node)
522 {
523 /*
524 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
525 node)->ex_blockcount)));
526 */
527 return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
528 }
529
530 static uintptr_t
531 avl_ext_bcnt_end(avlnode_t *node)
532 {
533 /*
534 return((uintptr_t) (BCNT_ADDR(((extent_tree_node_t *)
535 node)->ex_blockcount)));
536 */
537 return((uintptr_t) ((extent_tree_node_t *)node)->ex_blockcount);
538 }
539
540 avlops_t avl_extent_bcnt_tree_ops = {
541 avl_ext_bcnt_start,
542 avl_ext_bcnt_end
543 };
544
545 avlops_t avl_extent_tree_ops = {
546 avl_ext_start,
547 avl_ext_end
548 };
549
550 /*
551 * for real-time extents -- have to dup code since realtime extent
552 * startblocks can be 64-bit values.
553 */
554 static rt_extent_tree_node_t *
555 mk_rt_extent_tree_nodes(xfs_rtblock_t new_startblock,
556 xfs_extlen_t new_blockcount, extent_state_t new_state)
557 {
558 rt_extent_tree_node_t *new;
559
560 new = malloc(sizeof(*new));
561 if (!new)
562 do_error(_("couldn't allocate new extent descriptor.\n"));
563
564 new->avl_node.avl_nextino = NULL;
565 new->rt_startblock = new_startblock;
566 new->rt_blockcount = new_blockcount;
567 new->rt_state = new_state;
568 return new;
569 }
570
571 #if 0
572 void
573 release_rt_extent_tree_node(rt_extent_tree_node_t *node)
574 {
575 free(node);
576 }
577
578 void
579 release_rt_extent_tree()
580 {
581 extent_tree_node_t *ext;
582 extent_tree_node_t *tmp;
583 extent_tree_node_t *lext;
584 extent_tree_node_t *ltmp;
585 avl64tree_desc_t *tree;
586
587 tree = rt_extent_tree_ptr;
588
589 if (tree->avl_firstino == NULL)
590 return;
591
592 ext = (extent_tree_node_t *) tree->avl_firstino;
593
594 while (ext != NULL) {
595 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
596 release_rt_extent_tree_node(ext);
597 ext = tmp;
598 }
599
600 tree->avl_root = tree->avl_firstino = NULL;
601
602 return;
603 }
604 #endif
605
606 /*
607 * don't need release functions for realtime tree teardown
608 * since we only have one tree, not one per AG
609 */
610 /* ARGSUSED */
611 void
612 free_rt_dup_extent_tree(xfs_mount_t *mp)
613 {
614 ASSERT(mp->m_sb.sb_rblocks != 0);
615 free(rt_ext_tree_ptr);
616 rt_ext_tree_ptr = NULL;
617 }
618
619 /*
620 * add a duplicate real-time extent
621 */
622 void
623 add_rt_dup_extent(xfs_rtblock_t startblock, xfs_extlen_t blockcount)
624 {
625 rt_extent_tree_node_t *first, *last, *ext, *next_ext;
626 xfs_rtblock_t new_startblock;
627 xfs_extlen_t new_blockcount;
628
629 pthread_mutex_lock(&rt_ext_tree_lock);
630 avl64_findranges(rt_ext_tree_ptr, startblock - 1,
631 startblock + blockcount + 1,
632 (avl64node_t **) &first, (avl64node_t **) &last);
633 /*
634 * find adjacent and overlapping extent blocks
635 */
636 if (first == NULL && last == NULL) {
637 /* nothing, just make and insert new extent */
638
639 ext = mk_rt_extent_tree_nodes(startblock,
640 blockcount, XR_E_MULT);
641
642 if (avl64_insert(rt_ext_tree_ptr,
643 (avl64node_t *) ext) == NULL) {
644 do_error(_("duplicate extent range\n"));
645 }
646
647 pthread_mutex_unlock(&rt_ext_tree_lock);
648 return;
649 }
650
651 ASSERT(first != NULL && last != NULL);
652
653 /*
654 * find the new composite range, delete old extent nodes
655 * as we go
656 */
657 new_startblock = startblock;
658 new_blockcount = blockcount;
659
660 for (ext = first;
661 ext != (rt_extent_tree_node_t *) last->avl_node.avl_nextino;
662 ext = next_ext) {
663 /*
664 * preserve the next inorder node
665 */
666 next_ext = (rt_extent_tree_node_t *) ext->avl_node.avl_nextino;
667 /*
668 * just bail if the new extent is contained within an old one
669 */
670 if (ext->rt_startblock <= startblock &&
671 ext->rt_blockcount >= blockcount) {
672 pthread_mutex_unlock(&rt_ext_tree_lock);
673 return;
674 }
675 /*
676 * now check for overlaps and adjacent extents
677 */
678 if (ext->rt_startblock + ext->rt_blockcount >= startblock
679 || ext->rt_startblock <= startblock + blockcount) {
680
681 if (ext->rt_startblock < new_startblock)
682 new_startblock = ext->rt_startblock;
683
684 if (ext->rt_startblock + ext->rt_blockcount >
685 new_startblock + new_blockcount)
686 new_blockcount = ext->rt_startblock +
687 ext->rt_blockcount -
688 new_startblock;
689
690 avl64_delete(rt_ext_tree_ptr, (avl64node_t *) ext);
691 continue;
692 }
693 }
694
695 ext = mk_rt_extent_tree_nodes(new_startblock,
696 new_blockcount, XR_E_MULT);
697
698 if (avl64_insert(rt_ext_tree_ptr, (avl64node_t *) ext) == NULL) {
699 do_error(_("duplicate extent range\n"));
700 }
701
702 pthread_mutex_unlock(&rt_ext_tree_lock);
703 return;
704 }
705
706 /*
707 * returns 1 if block is a dup, 0 if not
708 */
709 /* ARGSUSED */
710 int
711 search_rt_dup_extent(xfs_mount_t *mp, xfs_rtblock_t bno)
712 {
713 int ret;
714
715 pthread_mutex_lock(&rt_ext_tree_lock);
716 if (avl64_findrange(rt_ext_tree_ptr, bno) != NULL)
717 ret = 1;
718 else
719 ret = 0;
720 pthread_mutex_unlock(&rt_ext_tree_lock);
721 return(ret);
722 }
723
724 static uint64_t
725 avl64_rt_ext_start(avl64node_t *node)
726 {
727 return(((rt_extent_tree_node_t *) node)->rt_startblock);
728 }
729
730 static uint64_t
731 avl64_ext_end(avl64node_t *node)
732 {
733 return(((rt_extent_tree_node_t *) node)->rt_startblock +
734 ((rt_extent_tree_node_t *) node)->rt_blockcount);
735 }
736
737 avl64ops_t avl64_extent_tree_ops = {
738 avl64_rt_ext_start,
739 avl64_ext_end
740 };
741
742 void
743 incore_ext_init(xfs_mount_t *mp)
744 {
745 int i;
746 xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
747
748 pthread_mutex_init(&rt_ext_tree_lock, NULL);
749
750 dup_extent_trees = calloc(agcount, sizeof(struct btree_root *));
751 if (!dup_extent_trees)
752 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
753
754 dup_extent_tree_locks = calloc(agcount, sizeof(pthread_mutex_t));
755 if (!dup_extent_tree_locks)
756 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
757
758 if ((extent_bno_ptrs = malloc(agcount *
759 sizeof(avltree_desc_t *))) == NULL)
760 do_error(
761 _("couldn't malloc free by-bno extent tree descriptor table\n"));
762
763 if ((extent_bcnt_ptrs = malloc(agcount *
764 sizeof(avltree_desc_t *))) == NULL)
765 do_error(
766 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
767
768 for (i = 0; i < agcount; i++) {
769 if ((extent_bno_ptrs[i] =
770 malloc(sizeof(avltree_desc_t))) == NULL)
771 do_error(
772 _("couldn't malloc bno extent tree descriptor\n"));
773 if ((extent_bcnt_ptrs[i] =
774 malloc(sizeof(avltree_desc_t))) == NULL)
775 do_error(
776 _("couldn't malloc bcnt extent tree descriptor\n"));
777 }
778
779 for (i = 0; i < agcount; i++) {
780 btree_init(&dup_extent_trees[i]);
781 pthread_mutex_init(&dup_extent_tree_locks[i], NULL);
782 avl_init_tree(extent_bno_ptrs[i], &avl_extent_tree_ops);
783 avl_init_tree(extent_bcnt_ptrs[i], &avl_extent_bcnt_tree_ops);
784 }
785
786 if ((rt_ext_tree_ptr = malloc(sizeof(avl64tree_desc_t))) == NULL)
787 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
788
789 avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
790 }
791
792 /*
793 * this routine actually frees all the memory used to track per-AG trees
794 */
795 void
796 incore_ext_teardown(xfs_mount_t *mp)
797 {
798 xfs_agnumber_t i;
799
800 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
801 btree_destroy(dup_extent_trees[i]);
802 free(extent_bno_ptrs[i]);
803 free(extent_bcnt_ptrs[i]);
804 }
805
806 free(dup_extent_trees);
807 free(extent_bcnt_ptrs);
808 free(extent_bno_ptrs);
809
810 dup_extent_trees = NULL;
811 extent_bcnt_ptrs = NULL;
812 extent_bno_ptrs = NULL;
813 }
814
815 int
816 count_extents(xfs_agnumber_t agno, avltree_desc_t *tree, int whichtree)
817 {
818 extent_tree_node_t *node;
819 int i = 0;
820
821 node = (extent_tree_node_t *) tree->avl_firstino;
822
823 while (node != NULL) {
824 i++;
825 if (whichtree)
826 node = findnext_bcnt_extent(agno, node);
827 else
828 node = findnext_bno_extent(node);
829 }
830
831 return(i);
832 }
833
834 int
835 count_bno_extents_blocks(xfs_agnumber_t agno, uint *numblocks)
836 {
837 uint64_t nblocks;
838 extent_tree_node_t *node;
839 int i = 0;
840
841 ASSERT(agno < glob_agcount);
842
843 nblocks = 0;
844
845 node = (extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino;
846
847 while (node != NULL) {
848 nblocks += node->ex_blockcount;
849 i++;
850 node = findnext_bno_extent(node);
851 }
852
853 *numblocks = nblocks;
854 return(i);
855 }
856
857 int
858 count_bno_extents(xfs_agnumber_t agno)
859 {
860 ASSERT(agno < glob_agcount);
861 return(count_extents(agno, extent_bno_ptrs[agno], 0));
862 }
863
864 int
865 count_bcnt_extents(xfs_agnumber_t agno)
866 {
867 ASSERT(agno < glob_agcount);
868 return(count_extents(agno, extent_bcnt_ptrs[agno], 1));
869 }