2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "err_protos.h"
29 #define ALLOC_NUM_EXTS 100
32 * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
34 typedef struct extent_alloc_rec
{
35 struct list_head list
;
36 extent_tree_node_t extents
[ALLOC_NUM_EXTS
];
39 typedef struct rt_extent_alloc_rec
{
40 struct list_head list
;
41 rt_extent_tree_node_t extents
[ALLOC_NUM_EXTS
];
42 } rt_extent_alloc_rec_t
;
45 * note: there are 4 sets of incore things handled here:
46 * block bitmaps, extent trees, uncertain inode list,
47 * and inode tree. The tree-based code uses the AVL
48 * tree package used by the IRIX kernel VM code
49 * (sys/avl.h). The inode list code uses the same records
50 * as the inode tree code for convenience. The bitmaps
51 * and bitmap operators are mostly macros defined in incore.h.
52 * There are one of everything per AG except for extent
53 * trees. There's one duplicate extent tree, one bno and
54 * one bcnt extent tree per AG. Not all of the above exist
55 * through all phases. The duplicate extent tree gets trashed
56 * at the end of phase 4. The bno/bcnt trees don't appear until
57 * phase 5. The uncertain inode list goes away at the end of
58 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
60 typedef struct ext_flist_s
{
61 extent_tree_node_t
*list
;
65 static ext_flist_t ext_flist
;
67 typedef struct rt_ext_flist_s
{
68 rt_extent_tree_node_t
*list
;
72 static rt_ext_flist_t rt_ext_flist
;
74 static avl64tree_desc_t
*rt_ext_tree_ptr
; /* dup extent tree for rt */
76 static struct btree_root
**dup_extent_trees
; /* per ag dup extent trees */
77 static pthread_mutex_t
*dup_extent_tree_locks
;
79 static avltree_desc_t
**extent_bno_ptrs
; /*
80 * array of extent tree ptrs
81 * one per ag for free extents
82 * sorted by starting block
85 static avltree_desc_t
**extent_bcnt_ptrs
; /*
86 * array of extent tree ptrs
87 * one per ag for free extents
92 * list of allocated "blocks" for easy freeing later
94 static struct list_head ba_list
;
95 static struct list_head rt_ba_list
;
100 static pthread_mutex_t ext_flist_lock
;
101 static pthread_mutex_t rt_ext_tree_lock
;
102 static pthread_mutex_t rt_ext_flist_lock
;
105 * duplicate extent tree functions
109 release_dup_extent_tree(
112 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
113 btree_clear(dup_extent_trees
[agno
]);
114 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
120 xfs_agblock_t startblock
,
121 xfs_extlen_t blockcount
)
125 fprintf(stderr
, "Adding dup extent - %d/%d %d\n", agno
, startblock
,
128 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
129 ret
= btree_insert(dup_extent_trees
[agno
], startblock
,
130 (void *)(uintptr_t)(startblock
+ blockcount
));
131 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
138 xfs_agblock_t start_agbno
,
139 xfs_agblock_t end_agbno
)
144 pthread_mutex_lock(&dup_extent_tree_locks
[agno
]);
145 if (!btree_find(dup_extent_trees
[agno
], start_agbno
, &bno
)) {
147 goto out
; /* this really shouldn't happen */
149 if (bno
< end_agbno
) {
153 ret
= (uintptr_t)btree_peek_prev(dup_extent_trees
[agno
], NULL
) >
156 pthread_mutex_unlock(&dup_extent_tree_locks
[agno
]);
162 * extent tree stuff is avl trees of duplicate extents,
163 * sorted in order by block number. there is one tree per ag.
166 static extent_tree_node_t
*
167 mk_extent_tree_nodes(xfs_agblock_t new_startblock
,
168 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
171 extent_tree_node_t
*new;
172 extent_alloc_rec_t
*rec
;
174 pthread_mutex_lock(&ext_flist_lock
);
175 if (ext_flist
.cnt
== 0) {
176 ASSERT(ext_flist
.list
== NULL
);
178 if ((rec
= malloc(sizeof(extent_alloc_rec_t
))) == NULL
)
180 _("couldn't allocate new extent descriptors.\n"));
182 list_add(&rec
->list
, &ba_list
);
184 new = &rec
->extents
[0];
186 for (i
= 0; i
< ALLOC_NUM_EXTS
; i
++) {
187 new->avl_node
.avl_nextino
= (avlnode_t
*)
189 ext_flist
.list
= new;
195 ASSERT(ext_flist
.list
!= NULL
);
197 new = ext_flist
.list
;
198 ext_flist
.list
= (extent_tree_node_t
*) new->avl_node
.avl_nextino
;
200 new->avl_node
.avl_nextino
= NULL
;
201 pthread_mutex_unlock(&ext_flist_lock
);
203 /* initialize node */
205 new->ex_startblock
= new_startblock
;
206 new->ex_blockcount
= new_blockcount
;
207 new->ex_state
= new_state
;
215 release_extent_tree_node(extent_tree_node_t
*node
)
217 pthread_mutex_lock(&ext_flist_lock
);
218 node
->avl_node
.avl_nextino
= (avlnode_t
*) ext_flist
.list
;
219 ext_flist
.list
= node
;
221 pthread_mutex_unlock(&ext_flist_lock
);
227 * routines to recycle all nodes in a tree. it walks the tree
228 * and puts all nodes back on the free list so the nodes can be
229 * reused. the duplicate and bno/bcnt extent trees for each AG
230 * are recycled after they're no longer needed to save memory
233 release_extent_tree(avltree_desc_t
*tree
)
235 extent_tree_node_t
*ext
;
236 extent_tree_node_t
*tmp
;
237 extent_tree_node_t
*lext
;
238 extent_tree_node_t
*ltmp
;
240 if (tree
->avl_firstino
== NULL
)
243 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
245 while (ext
!= NULL
) {
246 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
249 * ext->next is guaranteed to be set only in bcnt trees
251 if (ext
->next
!= NULL
) {
253 while (lext
!= NULL
) {
255 release_extent_tree_node(lext
);
260 release_extent_tree_node(ext
);
264 tree
->avl_root
= tree
->avl_firstino
= NULL
;
270 * top-level (visible) routines
273 release_agbno_extent_tree(xfs_agnumber_t agno
)
275 release_extent_tree(extent_bno_ptrs
[agno
]);
281 release_agbcnt_extent_tree(xfs_agnumber_t agno
)
283 release_extent_tree(extent_bcnt_ptrs
[agno
]);
289 * the next 4 routines manage the trees of free extents -- 2 trees
290 * per AG. The first tree is sorted by block number. The second
291 * tree is sorted by extent size. This is the bno tree.
294 add_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
295 xfs_extlen_t blockcount
)
297 extent_tree_node_t
*ext
;
299 ASSERT(extent_bno_ptrs
!= NULL
);
300 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
302 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
304 if (avl_insert(extent_bno_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
305 do_error(_("duplicate bno extent range\n"));
310 findfirst_bno_extent(xfs_agnumber_t agno
)
312 ASSERT(extent_bno_ptrs
!= NULL
);
313 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
315 return((extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
);
319 find_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
)
321 ASSERT(extent_bno_ptrs
!= NULL
);
322 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
324 return((extent_tree_node_t
*) avl_find(extent_bno_ptrs
[agno
],
329 * delete a node that's in the tree (pointer obtained by a find routine)
332 get_bno_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
334 ASSERT(extent_bno_ptrs
!= NULL
);
335 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
337 avl_delete(extent_bno_ptrs
[agno
], &ext
->avl_node
);
343 * normalizing constant for bcnt size -> address conversion (see avl ops)
344 * used by the AVL tree code to convert sizes and must be used when
345 * doing an AVL search in the tree (e.g. avl_findrange(s))
347 #define MAXBCNT 0xFFFFFFFF
348 #define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
351 * the next 4 routines manage the trees of free extents -- 2 trees
352 * per AG. The first tree is sorted by block number. The second
353 * tree is sorted by extent size. This is the bcnt tree.
356 add_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
357 xfs_extlen_t blockcount
)
359 extent_tree_node_t
*ext
, *prev
, *current
, *top
;
360 xfs_agblock_t tmp_startblock
;
361 xfs_extlen_t tmp_blockcount
;
362 extent_state_t tmp_state
;
364 ASSERT(extent_bcnt_ptrs
!= NULL
);
365 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
367 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
369 ASSERT(ext
->next
== NULL
);
372 fprintf(stderr
, "adding bcnt: agno = %d, start = %u, count = %u\n",
373 agno
, startblock
, blockcount
);
375 if ((current
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
376 blockcount
)) != NULL
) {
378 * avl tree code doesn't handle dups so insert
379 * onto linked list in increasing startblock order
381 * when called from mk_incore_fstree,
382 * startblock is in increasing order.
383 * current is an "anchor" node.
384 * quick check if the new ext goes to the end.
385 * if so, append at the end, using the last field
388 ASSERT(current
->last
!= NULL
);
389 if (startblock
> current
->last
->ex_startblock
) {
390 current
->last
->next
= ext
;
396 * scan, to find the proper location for new entry.
397 * this scan is *very* expensive and gets worse with
398 * with increasing entries.
400 top
= prev
= current
;
401 while (current
!= NULL
&&
402 startblock
> current
->ex_startblock
) {
404 current
= current
->next
;
407 if (top
== current
) {
410 * new entry should be ahead of current.
411 * to keep the avl tree intact,
412 * swap the values of to-be-inserted element
413 * and the values of the head of the list.
414 * then insert as the 2nd element on the list.
416 * see the comment in get_bcnt_extent()
417 * as to why we have to do this.
419 tmp_startblock
= top
->ex_startblock
;
420 tmp_blockcount
= top
->ex_blockcount
;
421 tmp_state
= top
->ex_state
;
423 top
->ex_startblock
= ext
->ex_startblock
;
424 top
->ex_blockcount
= ext
->ex_blockcount
;
425 top
->ex_state
= ext
->ex_state
;
427 ext
->ex_startblock
= tmp_startblock
;
428 ext
->ex_blockcount
= tmp_blockcount
;
429 ext
->ex_state
= tmp_state
;
441 if (avl_insert(extent_bcnt_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
442 do_error(_(": duplicate bno extent range\n"));
445 ext
->last
= ext
; /* ext is an "anchor" node */
451 findfirst_bcnt_extent(xfs_agnumber_t agno
)
453 ASSERT(extent_bcnt_ptrs
!= NULL
);
454 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
456 return((extent_tree_node_t
*) extent_bcnt_ptrs
[agno
]->avl_firstino
);
460 findbiggest_bcnt_extent(xfs_agnumber_t agno
)
462 extern avlnode_t
*avl_lastino(avlnode_t
*root
);
464 ASSERT(extent_bcnt_ptrs
!= NULL
);
465 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
467 return((extent_tree_node_t
*) avl_lastino(extent_bcnt_ptrs
[agno
]->avl_root
));
471 findnext_bcnt_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
475 if (ext
->next
!= NULL
) {
476 ASSERT(ext
->ex_blockcount
== ext
->next
->ex_blockcount
);
477 ASSERT(ext
->ex_startblock
< ext
->next
->ex_startblock
);
481 * have to look at the top of the list to get the
482 * correct avl_nextino pointer since that pointer
483 * is maintained and altered by the AVL code.
485 nextino
= avl_find(extent_bcnt_ptrs
[agno
], ext
->ex_blockcount
);
486 ASSERT(nextino
!= NULL
);
487 if (nextino
->avl_nextino
!= NULL
) {
488 ASSERT(ext
->ex_blockcount
< ((extent_tree_node_t
*)
489 nextino
->avl_nextino
)->ex_blockcount
);
491 return((extent_tree_node_t
*) nextino
->avl_nextino
);
496 * this is meant to be called after you walk the bno tree to
497 * determine exactly which extent you want (so you'll know the
498 * desired value for startblock when you call this routine).
501 get_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
502 xfs_extlen_t blockcount
)
504 extent_tree_node_t
*ext
, *prev
, *top
;
505 xfs_agblock_t tmp_startblock
;
506 xfs_extlen_t tmp_blockcount
;
507 extent_state_t tmp_state
;
510 ASSERT(extent_bcnt_ptrs
!= NULL
);
511 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
513 if ((ext
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
514 blockcount
)) == NULL
)
519 if (ext
->next
!= NULL
) {
521 * pull it off the list
523 while (ext
!= NULL
&& startblock
!= ext
->ex_startblock
) {
530 * this node is linked into the tree so we
531 * swap the core values so we can delete
532 * the next item on the list instead of
533 * the head of the list. This is because
534 * the rest of the tree undoubtedly has
535 * pointers to the piece of memory that
536 * is the head of the list so pulling
537 * the item out of the list and hence
538 * the avl tree would be a bad idea.
540 * (cheaper than the alternative, a tree
541 * delete of this node followed by a tree
542 * insert of the next node on the list).
544 tmp_startblock
= ext
->next
->ex_startblock
;
545 tmp_blockcount
= ext
->next
->ex_blockcount
;
546 tmp_state
= ext
->next
->ex_state
;
548 ext
->next
->ex_startblock
= ext
->ex_startblock
;
549 ext
->next
->ex_blockcount
= ext
->ex_blockcount
;
550 ext
->next
->ex_state
= ext
->ex_state
;
552 ext
->ex_startblock
= tmp_startblock
;
553 ext
->ex_blockcount
= tmp_blockcount
;
554 ext
->ex_state
= tmp_state
;
560 * now, a simple list deletion
562 prev
->next
= ext
->next
;
566 * no list, just one node. simply delete
568 avl_delete(extent_bcnt_ptrs
[agno
], &ext
->avl_node
);
571 ASSERT(ext
->ex_startblock
== startblock
);
572 ASSERT(ext
->ex_blockcount
== blockcount
);
576 static __psunsigned_t
577 avl_ext_start(avlnode_t
*node
)
579 return((__psunsigned_t
)
580 ((extent_tree_node_t
*) node
)->ex_startblock
);
583 static __psunsigned_t
584 avl_ext_end(avlnode_t
*node
)
586 return((__psunsigned_t
) (
587 ((extent_tree_node_t
*) node
)->ex_startblock
+
588 ((extent_tree_node_t
*) node
)->ex_blockcount
));
592 * convert size to an address for the AVL tree code -- the bigger the size,
593 * the lower the address so the biggest extent will be first in the tree
595 static __psunsigned_t
596 avl_ext_bcnt_start(avlnode_t
*node
)
599 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
600 node)->ex_blockcount)));
602 return((__psunsigned_t
) ((extent_tree_node_t
*)node
)->ex_blockcount
);
605 static __psunsigned_t
606 avl_ext_bcnt_end(avlnode_t
*node
)
609 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
610 node)->ex_blockcount)));
612 return((__psunsigned_t
) ((extent_tree_node_t
*)node
)->ex_blockcount
);
615 avlops_t avl_extent_bcnt_tree_ops
= {
620 avlops_t avl_extent_tree_ops
= {
626 * for real-time extents -- have to dup code since realtime extent
627 * startblocks can be 64-bit values.
629 static rt_extent_tree_node_t
*
630 mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock
,
631 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
634 rt_extent_tree_node_t
*new;
635 rt_extent_alloc_rec_t
*rec
;
637 pthread_mutex_lock(&rt_ext_flist_lock
);
638 if (rt_ext_flist
.cnt
== 0) {
639 ASSERT(rt_ext_flist
.list
== NULL
);
641 if ((rec
= malloc(sizeof(rt_extent_alloc_rec_t
))) == NULL
)
643 _("couldn't allocate new extent descriptors.\n"));
645 list_add(&rec
->list
, &rt_ba_list
);
647 new = &rec
->extents
[0];
649 for (i
= 0; i
< ALLOC_NUM_EXTS
; i
++) {
650 new->avl_node
.avl_nextino
= (avlnode_t
*)
652 rt_ext_flist
.list
= new;
658 ASSERT(rt_ext_flist
.list
!= NULL
);
660 new = rt_ext_flist
.list
;
661 rt_ext_flist
.list
= (rt_extent_tree_node_t
*) new->avl_node
.avl_nextino
;
663 new->avl_node
.avl_nextino
= NULL
;
664 pthread_mutex_unlock(&rt_ext_flist_lock
);
666 /* initialize node */
668 new->rt_startblock
= new_startblock
;
669 new->rt_blockcount
= new_blockcount
;
670 new->rt_state
= new_state
;
677 release_rt_extent_tree_node(rt_extent_tree_node_t
*node
)
679 node
->avl_node
.avl_nextino
= (avlnode_t
*) rt_ext_flist
.list
;
680 rt_ext_flist
.list
= node
;
687 release_rt_extent_tree()
689 extent_tree_node_t
*ext
;
690 extent_tree_node_t
*tmp
;
691 extent_tree_node_t
*lext
;
692 extent_tree_node_t
*ltmp
;
693 avl64tree_desc_t
*tree
;
695 tree
= rt_extent_tree_ptr
;
697 if (tree
->avl_firstino
== NULL
)
700 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
702 while (ext
!= NULL
) {
703 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
704 release_rt_extent_tree_node(ext
);
708 tree
->avl_root
= tree
->avl_firstino
= NULL
;
715 * don't need release functions for realtime tree teardown
716 * since we only have one tree, not one per AG
720 free_rt_dup_extent_tree(xfs_mount_t
*mp
)
722 rt_extent_alloc_rec_t
*cur
, *tmp
;
724 ASSERT(mp
->m_sb
.sb_rblocks
!= 0);
726 list_for_each_entry_safe(cur
, tmp
, &rt_ba_list
, list
)
729 free(rt_ext_tree_ptr
);
731 rt_ext_tree_ptr
= NULL
;
737 * add a duplicate real-time extent
740 add_rt_dup_extent(xfs_drtbno_t startblock
, xfs_extlen_t blockcount
)
742 rt_extent_tree_node_t
*first
, *last
, *ext
, *next_ext
;
743 xfs_drtbno_t new_startblock
;
744 xfs_extlen_t new_blockcount
;
746 pthread_mutex_lock(&rt_ext_tree_lock
);
747 avl64_findranges(rt_ext_tree_ptr
, startblock
- 1,
748 startblock
+ blockcount
+ 1,
749 (avl64node_t
**) &first
, (avl64node_t
**) &last
);
751 * find adjacent and overlapping extent blocks
753 if (first
== NULL
&& last
== NULL
) {
754 /* nothing, just make and insert new extent */
756 ext
= mk_rt_extent_tree_nodes(startblock
,
757 blockcount
, XR_E_MULT
);
759 if (avl64_insert(rt_ext_tree_ptr
,
760 (avl64node_t
*) ext
) == NULL
) {
761 do_error(_("duplicate extent range\n"));
764 pthread_mutex_unlock(&rt_ext_tree_lock
);
768 ASSERT(first
!= NULL
&& last
!= NULL
);
771 * find the new composite range, delete old extent nodes
774 new_startblock
= startblock
;
775 new_blockcount
= blockcount
;
778 ext
!= (rt_extent_tree_node_t
*) last
->avl_node
.avl_nextino
;
781 * preserve the next inorder node
783 next_ext
= (rt_extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
785 * just bail if the new extent is contained within an old one
787 if (ext
->rt_startblock
<= startblock
&&
788 ext
->rt_blockcount
>= blockcount
) {
789 pthread_mutex_unlock(&rt_ext_tree_lock
);
793 * now check for overlaps and adjacent extents
795 if (ext
->rt_startblock
+ ext
->rt_blockcount
>= startblock
796 || ext
->rt_startblock
<= startblock
+ blockcount
) {
798 if (ext
->rt_startblock
< new_startblock
)
799 new_startblock
= ext
->rt_startblock
;
801 if (ext
->rt_startblock
+ ext
->rt_blockcount
>
802 new_startblock
+ new_blockcount
)
803 new_blockcount
= ext
->rt_startblock
+
807 avl64_delete(rt_ext_tree_ptr
, (avl64node_t
*) ext
);
812 ext
= mk_rt_extent_tree_nodes(new_startblock
,
813 new_blockcount
, XR_E_MULT
);
815 if (avl64_insert(rt_ext_tree_ptr
, (avl64node_t
*) ext
) == NULL
) {
816 do_error(_("duplicate extent range\n"));
819 pthread_mutex_unlock(&rt_ext_tree_lock
);
824 * returns 1 if block is a dup, 0 if not
828 search_rt_dup_extent(xfs_mount_t
*mp
, xfs_drtbno_t bno
)
832 pthread_mutex_lock(&rt_ext_tree_lock
);
833 if (avl64_findrange(rt_ext_tree_ptr
, bno
) != NULL
)
837 pthread_mutex_unlock(&rt_ext_tree_lock
);
842 avl64_rt_ext_start(avl64node_t
*node
)
844 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
);
848 avl64_ext_end(avl64node_t
*node
)
850 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
+
851 ((rt_extent_tree_node_t
*) node
)->rt_blockcount
);
854 avl64ops_t avl64_extent_tree_ops
= {
860 incore_ext_init(xfs_mount_t
*mp
)
863 xfs_agnumber_t agcount
= mp
->m_sb
.sb_agcount
;
865 list_head_init(&ba_list
);
866 list_head_init(&rt_ba_list
);
867 pthread_mutex_init(&ext_flist_lock
, NULL
);
868 pthread_mutex_init(&rt_ext_tree_lock
, NULL
);
869 pthread_mutex_init(&rt_ext_flist_lock
, NULL
);
871 dup_extent_trees
= calloc(agcount
, sizeof(struct btree_root
*));
872 if (!dup_extent_trees
)
873 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
875 dup_extent_tree_locks
= calloc(agcount
, sizeof(pthread_mutex_t
));
876 if (!dup_extent_tree_locks
)
877 do_error(_("couldn't malloc dup extent tree descriptor table\n"));
879 if ((extent_bno_ptrs
= malloc(agcount
*
880 sizeof(avltree_desc_t
*))) == NULL
)
882 _("couldn't malloc free by-bno extent tree descriptor table\n"));
884 if ((extent_bcnt_ptrs
= malloc(agcount
*
885 sizeof(avltree_desc_t
*))) == NULL
)
887 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
889 for (i
= 0; i
< agcount
; i
++) {
890 if ((extent_bno_ptrs
[i
] =
891 malloc(sizeof(avltree_desc_t
))) == NULL
)
893 _("couldn't malloc bno extent tree descriptor\n"));
894 if ((extent_bcnt_ptrs
[i
] =
895 malloc(sizeof(avltree_desc_t
))) == NULL
)
897 _("couldn't malloc bcnt extent tree descriptor\n"));
900 for (i
= 0; i
< agcount
; i
++) {
901 btree_init(&dup_extent_trees
[i
]);
902 pthread_mutex_init(&dup_extent_tree_locks
[i
], NULL
);
903 avl_init_tree(extent_bno_ptrs
[i
], &avl_extent_tree_ops
);
904 avl_init_tree(extent_bcnt_ptrs
[i
], &avl_extent_bcnt_tree_ops
);
907 if ((rt_ext_tree_ptr
= malloc(sizeof(avltree_desc_t
))) == NULL
)
908 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
910 avl64_init_tree(rt_ext_tree_ptr
, &avl64_extent_tree_ops
);
913 ext_flist
.list
= NULL
;
919 * this routine actually frees all the memory used to track per-AG trees
922 incore_ext_teardown(xfs_mount_t
*mp
)
924 extent_alloc_rec_t
*cur
, *tmp
;
927 list_for_each_entry_safe(cur
, tmp
, &ba_list
, list
)
930 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
931 btree_destroy(dup_extent_trees
[i
]);
932 free(extent_bno_ptrs
[i
]);
933 free(extent_bcnt_ptrs
[i
]);
936 free(dup_extent_trees
);
937 free(extent_bcnt_ptrs
);
938 free(extent_bno_ptrs
);
940 dup_extent_trees
= NULL
;
941 extent_bcnt_ptrs
= NULL
;
942 extent_bno_ptrs
= NULL
;
946 count_extents(xfs_agnumber_t agno
, avltree_desc_t
*tree
, int whichtree
)
948 extent_tree_node_t
*node
;
951 node
= (extent_tree_node_t
*) tree
->avl_firstino
;
953 while (node
!= NULL
) {
956 node
= findnext_bcnt_extent(agno
, node
);
958 node
= findnext_bno_extent(node
);
965 count_bno_extents_blocks(xfs_agnumber_t agno
, uint
*numblocks
)
968 extent_tree_node_t
*node
;
971 ASSERT(agno
< glob_agcount
);
975 node
= (extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
;
977 while (node
!= NULL
) {
978 nblocks
+= node
->ex_blockcount
;
980 node
= findnext_bno_extent(node
);
983 *numblocks
= nblocks
;
988 count_bno_extents(xfs_agnumber_t agno
)
990 ASSERT(agno
< glob_agcount
);
991 return(count_extents(agno
, extent_bno_ptrs
[agno
], 0));
995 count_bcnt_extents(xfs_agnumber_t agno
)
997 ASSERT(agno
< glob_agcount
);
998 return(count_extents(agno
, extent_bcnt_ptrs
[agno
], 1));