2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "err_protos.h"
28 #define ALLOC_NUM_EXTS 100
31 * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
33 typedef struct extent_alloc_rec
{
35 extent_tree_node_t extents
[ALLOC_NUM_EXTS
];
38 typedef struct rt_extent_alloc_rec
{
40 rt_extent_tree_node_t extents
[ALLOC_NUM_EXTS
];
41 } rt_extent_alloc_rec_t
;
44 * note: there are 4 sets of incore things handled here:
45 * block bitmaps, extent trees, uncertain inode list,
46 * and inode tree. The tree-based code uses the AVL
47 * tree package used by the IRIX kernel VM code
48 * (sys/avl.h). The inode list code uses the same records
49 * as the inode tree code for convenience. The bitmaps
50 * and bitmap operators are mostly macros defined in incore.h.
51 * There are one of everything per AG except for extent
52 * trees. There's one duplicate extent tree, one bno and
53 * one bcnt extent tree per AG. Not all of the above exist
54 * through all phases. The duplicate extent tree gets trashed
55 * at the end of phase 4. The bno/bcnt trees don't appear until
56 * phase 5. The uncertain inode list goes away at the end of
57 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
59 typedef struct ext_flist_s
{
60 extent_tree_node_t
*list
;
64 static ext_flist_t ext_flist
;
66 typedef struct rt_ext_flist_s
{
67 rt_extent_tree_node_t
*list
;
71 static rt_ext_flist_t rt_ext_flist
;
73 static avl64tree_desc_t
*rt_ext_tree_ptr
; /* dup extent tree for rt */
75 avltree_desc_t
**extent_tree_ptrs
; /* array of extent tree ptrs */
76 /* one per ag for dups */
77 static avltree_desc_t
**extent_bno_ptrs
; /*
78 * array of extent tree ptrs
79 * one per ag for free extents
80 * sorted by starting block
83 static avltree_desc_t
**extent_bcnt_ptrs
; /*
84 * array of extent tree ptrs
85 * one per ag for free extents
90 * list of allocated "blocks" for easy freeing later
92 static ba_rec_t
*ba_list
;
93 static ba_rec_t
*rt_ba_list
;
98 static pthread_mutex_t ext_flist_lock
;
99 static pthread_mutex_t rt_ext_tree_lock
;
100 static pthread_mutex_t rt_ext_flist_lock
;
103 * extent tree stuff is avl trees of duplicate extents,
104 * sorted in order by block number. there is one tree per ag.
107 static extent_tree_node_t
*
108 mk_extent_tree_nodes(xfs_agblock_t new_startblock
,
109 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
112 extent_tree_node_t
*new;
113 extent_alloc_rec_t
*rec
;
115 pthread_mutex_lock(&ext_flist_lock
);
116 if (ext_flist
.cnt
== 0) {
117 ASSERT(ext_flist
.list
== NULL
);
119 if ((rec
= malloc(sizeof(extent_alloc_rec_t
))) == NULL
)
121 _("couldn't allocate new extent descriptors.\n"));
123 record_allocation(&rec
->alloc_rec
, ba_list
);
125 new = &rec
->extents
[0];
127 for (i
= 0; i
< ALLOC_NUM_EXTS
; i
++) {
128 new->avl_node
.avl_nextino
= (avlnode_t
*)
130 ext_flist
.list
= new;
136 ASSERT(ext_flist
.list
!= NULL
);
138 new = ext_flist
.list
;
139 ext_flist
.list
= (extent_tree_node_t
*) new->avl_node
.avl_nextino
;
141 new->avl_node
.avl_nextino
= NULL
;
142 pthread_mutex_unlock(&ext_flist_lock
);
144 /* initialize node */
146 new->ex_startblock
= new_startblock
;
147 new->ex_blockcount
= new_blockcount
;
148 new->ex_state
= new_state
;
156 release_extent_tree_node(extent_tree_node_t
*node
)
158 pthread_mutex_lock(&ext_flist_lock
);
159 node
->avl_node
.avl_nextino
= (avlnode_t
*) ext_flist
.list
;
160 ext_flist
.list
= node
;
162 pthread_mutex_unlock(&ext_flist_lock
);
168 * routines to recycle all nodes in a tree. it walks the tree
169 * and puts all nodes back on the free list so the nodes can be
170 * reused. the duplicate and bno/bcnt extent trees for each AG
171 * are recycled after they're no longer needed to save memory
174 release_extent_tree(avltree_desc_t
*tree
)
176 extent_tree_node_t
*ext
;
177 extent_tree_node_t
*tmp
;
178 extent_tree_node_t
*lext
;
179 extent_tree_node_t
*ltmp
;
181 if (tree
->avl_firstino
== NULL
)
184 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
186 while (ext
!= NULL
) {
187 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
190 * ext->next is guaranteed to be set only in bcnt trees
192 if (ext
->next
!= NULL
) {
194 while (lext
!= NULL
) {
196 release_extent_tree_node(lext
);
201 release_extent_tree_node(ext
);
205 tree
->avl_root
= tree
->avl_firstino
= NULL
;
211 * top-level (visible) routines
214 release_dup_extent_tree(xfs_agnumber_t agno
)
216 release_extent_tree(extent_tree_ptrs
[agno
]);
222 release_agbno_extent_tree(xfs_agnumber_t agno
)
224 release_extent_tree(extent_bno_ptrs
[agno
]);
230 release_agbcnt_extent_tree(xfs_agnumber_t agno
)
232 release_extent_tree(extent_bcnt_ptrs
[agno
]);
238 * the next 4 routines manage the trees of free extents -- 2 trees
239 * per AG. The first tree is sorted by block number. The second
240 * tree is sorted by extent size. This is the bno tree.
243 add_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
244 xfs_extlen_t blockcount
)
246 extent_tree_node_t
*ext
;
248 ASSERT(extent_bno_ptrs
!= NULL
);
249 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
251 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
253 if (avl_insert(extent_bno_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
254 do_error(_("duplicate bno extent range\n"));
259 findfirst_bno_extent(xfs_agnumber_t agno
)
261 ASSERT(extent_bno_ptrs
!= NULL
);
262 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
264 return((extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
);
268 find_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
)
270 ASSERT(extent_bno_ptrs
!= NULL
);
271 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
273 return((extent_tree_node_t
*) avl_find(extent_bno_ptrs
[agno
],
278 * delete a node that's in the tree (pointer obtained by a find routine)
281 get_bno_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
283 ASSERT(extent_bno_ptrs
!= NULL
);
284 ASSERT(extent_bno_ptrs
[agno
] != NULL
);
286 avl_delete(extent_bno_ptrs
[agno
], &ext
->avl_node
);
292 * normalizing constant for bcnt size -> address conversion (see avl ops)
293 * used by the AVL tree code to convert sizes and must be used when
294 * doing an AVL search in the tree (e.g. avl_findrange(s))
296 #define MAXBCNT 0xFFFFFFFF
297 #define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
300 * the next 4 routines manage the trees of free extents -- 2 trees
301 * per AG. The first tree is sorted by block number. The second
302 * tree is sorted by extent size. This is the bcnt tree.
305 add_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
306 xfs_extlen_t blockcount
)
308 extent_tree_node_t
*ext
, *prev
, *current
, *top
;
309 xfs_agblock_t tmp_startblock
;
310 xfs_extlen_t tmp_blockcount
;
311 extent_state_t tmp_state
;
313 ASSERT(extent_bcnt_ptrs
!= NULL
);
314 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
316 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_FREE
);
318 ASSERT(ext
->next
== NULL
);
321 fprintf(stderr
, "adding bcnt: agno = %d, start = %u, count = %u\n",
322 agno
, startblock
, blockcount
);
324 if ((current
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
325 blockcount
)) != NULL
) {
327 * avl tree code doesn't handle dups so insert
328 * onto linked list in increasing startblock order
330 * when called from mk_incore_fstree,
331 * startblock is in increasing order.
332 * current is an "anchor" node.
333 * quick check if the new ext goes to the end.
334 * if so, append at the end, using the last field
337 ASSERT(current
->last
!= NULL
);
338 if (startblock
> current
->last
->ex_startblock
) {
339 current
->last
->next
= ext
;
345 * scan, to find the proper location for new entry.
346 * this scan is *very* expensive and gets worse with
347 * with increasing entries.
349 top
= prev
= current
;
350 while (current
!= NULL
&&
351 startblock
> current
->ex_startblock
) {
353 current
= current
->next
;
356 if (top
== current
) {
359 * new entry should be ahead of current.
360 * to keep the avl tree intact,
361 * swap the values of to-be-inserted element
362 * and the values of the head of the list.
363 * then insert as the 2nd element on the list.
365 * see the comment in get_bcnt_extent()
366 * as to why we have to do this.
368 tmp_startblock
= top
->ex_startblock
;
369 tmp_blockcount
= top
->ex_blockcount
;
370 tmp_state
= top
->ex_state
;
372 top
->ex_startblock
= ext
->ex_startblock
;
373 top
->ex_blockcount
= ext
->ex_blockcount
;
374 top
->ex_state
= ext
->ex_state
;
376 ext
->ex_startblock
= tmp_startblock
;
377 ext
->ex_blockcount
= tmp_blockcount
;
378 ext
->ex_state
= tmp_state
;
390 if (avl_insert(extent_bcnt_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
391 do_error(_(": duplicate bno extent range\n"));
394 ext
->last
= ext
; /* ext is an "anchor" node */
400 findfirst_bcnt_extent(xfs_agnumber_t agno
)
402 ASSERT(extent_bcnt_ptrs
!= NULL
);
403 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
405 return((extent_tree_node_t
*) extent_bcnt_ptrs
[agno
]->avl_firstino
);
409 findbiggest_bcnt_extent(xfs_agnumber_t agno
)
411 extern avlnode_t
*avl_lastino(avlnode_t
*root
);
413 ASSERT(extent_bcnt_ptrs
!= NULL
);
414 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
416 return((extent_tree_node_t
*) avl_lastino(extent_bcnt_ptrs
[agno
]->avl_root
));
420 findnext_bcnt_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
)
424 if (ext
->next
!= NULL
) {
425 ASSERT(ext
->ex_blockcount
== ext
->next
->ex_blockcount
);
426 ASSERT(ext
->ex_startblock
< ext
->next
->ex_startblock
);
430 * have to look at the top of the list to get the
431 * correct avl_nextino pointer since that pointer
432 * is maintained and altered by the AVL code.
434 nextino
= avl_find(extent_bcnt_ptrs
[agno
], ext
->ex_blockcount
);
435 ASSERT(nextino
!= NULL
);
436 if (nextino
->avl_nextino
!= NULL
) {
437 ASSERT(ext
->ex_blockcount
< ((extent_tree_node_t
*)
438 nextino
->avl_nextino
)->ex_blockcount
);
440 return((extent_tree_node_t
*) nextino
->avl_nextino
);
445 * this is meant to be called after you walk the bno tree to
446 * determine exactly which extent you want (so you'll know the
447 * desired value for startblock when you call this routine).
450 get_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
451 xfs_extlen_t blockcount
)
453 extent_tree_node_t
*ext
, *prev
, *top
;
454 xfs_agblock_t tmp_startblock
;
455 xfs_extlen_t tmp_blockcount
;
456 extent_state_t tmp_state
;
459 ASSERT(extent_bcnt_ptrs
!= NULL
);
460 ASSERT(extent_bcnt_ptrs
[agno
] != NULL
);
462 if ((ext
= (extent_tree_node_t
*) avl_find(extent_bcnt_ptrs
[agno
],
463 blockcount
)) == NULL
)
468 if (ext
->next
!= NULL
) {
470 * pull it off the list
472 while (ext
!= NULL
&& startblock
!= ext
->ex_startblock
) {
479 * this node is linked into the tree so we
480 * swap the core values so we can delete
481 * the next item on the list instead of
482 * the head of the list. This is because
483 * the rest of the tree undoubtedly has
484 * pointers to the piece of memory that
485 * is the head of the list so pulling
486 * the item out of the list and hence
487 * the avl tree would be a bad idea.
489 * (cheaper than the alternative, a tree
490 * delete of this node followed by a tree
491 * insert of the next node on the list).
493 tmp_startblock
= ext
->next
->ex_startblock
;
494 tmp_blockcount
= ext
->next
->ex_blockcount
;
495 tmp_state
= ext
->next
->ex_state
;
497 ext
->next
->ex_startblock
= ext
->ex_startblock
;
498 ext
->next
->ex_blockcount
= ext
->ex_blockcount
;
499 ext
->next
->ex_state
= ext
->ex_state
;
501 ext
->ex_startblock
= tmp_startblock
;
502 ext
->ex_blockcount
= tmp_blockcount
;
503 ext
->ex_state
= tmp_state
;
509 * now, a simple list deletion
511 prev
->next
= ext
->next
;
515 * no list, just one node. simply delete
517 avl_delete(extent_bcnt_ptrs
[agno
], &ext
->avl_node
);
520 ASSERT(ext
->ex_startblock
== startblock
);
521 ASSERT(ext
->ex_blockcount
== blockcount
);
526 * the next 2 routines manage the trees of duplicate extents -- 1 tree
530 add_dup_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
531 xfs_extlen_t blockcount
)
533 extent_tree_node_t
*first
, *last
, *ext
, *next_ext
;
534 xfs_agblock_t new_startblock
;
535 xfs_extlen_t new_blockcount
;
537 ASSERT(agno
< glob_agcount
);
540 fprintf(stderr
, "Adding dup extent - %d/%d %d\n", agno
, startblock
, blockcount
);
542 avl_findranges(extent_tree_ptrs
[agno
], startblock
- 1,
543 startblock
+ blockcount
+ 1,
544 (avlnode_t
**) &first
, (avlnode_t
**) &last
);
546 * find adjacent and overlapping extent blocks
548 if (first
== NULL
&& last
== NULL
) {
549 /* nothing, just make and insert new extent */
551 ext
= mk_extent_tree_nodes(startblock
, blockcount
, XR_E_MULT
);
553 if (avl_insert(extent_tree_ptrs
[agno
],
554 (avlnode_t
*) ext
) == NULL
) {
555 do_error(_("duplicate extent range\n"));
561 ASSERT(first
!= NULL
&& last
!= NULL
);
564 * find the new composite range, delete old extent nodes
567 new_startblock
= startblock
;
568 new_blockcount
= blockcount
;
571 ext
!= (extent_tree_node_t
*) last
->avl_node
.avl_nextino
;
574 * preserve the next inorder node
576 next_ext
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
578 * just bail if the new extent is contained within an old one
580 if (ext
->ex_startblock
<= startblock
&&
581 ext
->ex_blockcount
>= blockcount
)
584 * now check for overlaps and adjacent extents
586 if (ext
->ex_startblock
+ ext
->ex_blockcount
>= startblock
587 || ext
->ex_startblock
<= startblock
+ blockcount
) {
589 if (ext
->ex_startblock
< new_startblock
)
590 new_startblock
= ext
->ex_startblock
;
592 if (ext
->ex_startblock
+ ext
->ex_blockcount
>
593 new_startblock
+ new_blockcount
)
594 new_blockcount
= ext
->ex_startblock
+
598 avl_delete(extent_tree_ptrs
[agno
], (avlnode_t
*) ext
);
603 ext
= mk_extent_tree_nodes(new_startblock
, new_blockcount
, XR_E_MULT
);
605 if (avl_insert(extent_tree_ptrs
[agno
], (avlnode_t
*) ext
) == NULL
) {
606 do_error(_("duplicate extent range\n"));
612 static __psunsigned_t
613 avl_ext_start(avlnode_t
*node
)
615 return((__psunsigned_t
)
616 ((extent_tree_node_t
*) node
)->ex_startblock
);
619 static __psunsigned_t
620 avl_ext_end(avlnode_t
*node
)
622 return((__psunsigned_t
) (
623 ((extent_tree_node_t
*) node
)->ex_startblock
+
624 ((extent_tree_node_t
*) node
)->ex_blockcount
));
628 * convert size to an address for the AVL tree code -- the bigger the size,
629 * the lower the address so the biggest extent will be first in the tree
631 static __psunsigned_t
632 avl_ext_bcnt_start(avlnode_t
*node
)
635 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
636 node)->ex_blockcount)));
638 return((__psunsigned_t
) ((extent_tree_node_t
*)node
)->ex_blockcount
);
641 static __psunsigned_t
642 avl_ext_bcnt_end(avlnode_t
*node
)
645 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
646 node)->ex_blockcount)));
648 return((__psunsigned_t
) ((extent_tree_node_t
*)node
)->ex_blockcount
);
651 avlops_t avl_extent_bcnt_tree_ops
= {
656 avlops_t avl_extent_tree_ops
= {
662 * for real-time extents -- have to dup code since realtime extent
663 * startblocks can be 64-bit values.
665 static rt_extent_tree_node_t
*
666 mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock
,
667 xfs_extlen_t new_blockcount
, extent_state_t new_state
)
670 rt_extent_tree_node_t
*new;
671 rt_extent_alloc_rec_t
*rec
;
673 pthread_mutex_lock(&rt_ext_flist_lock
);
674 if (rt_ext_flist
.cnt
== 0) {
675 ASSERT(rt_ext_flist
.list
== NULL
);
677 if ((rec
= malloc(sizeof(rt_extent_alloc_rec_t
))) == NULL
)
679 _("couldn't allocate new extent descriptors.\n"));
681 record_allocation(&rec
->alloc_rec
, rt_ba_list
);
683 new = &rec
->extents
[0];
685 for (i
= 0; i
< ALLOC_NUM_EXTS
; i
++) {
686 new->avl_node
.avl_nextino
= (avlnode_t
*)
688 rt_ext_flist
.list
= new;
694 ASSERT(rt_ext_flist
.list
!= NULL
);
696 new = rt_ext_flist
.list
;
697 rt_ext_flist
.list
= (rt_extent_tree_node_t
*) new->avl_node
.avl_nextino
;
699 new->avl_node
.avl_nextino
= NULL
;
700 pthread_mutex_unlock(&rt_ext_flist_lock
);
702 /* initialize node */
704 new->rt_startblock
= new_startblock
;
705 new->rt_blockcount
= new_blockcount
;
706 new->rt_state
= new_state
;
713 release_rt_extent_tree_node(rt_extent_tree_node_t
*node
)
715 node
->avl_node
.avl_nextino
= (avlnode_t
*) rt_ext_flist
.list
;
716 rt_ext_flist
.list
= node
;
723 release_rt_extent_tree()
725 extent_tree_node_t
*ext
;
726 extent_tree_node_t
*tmp
;
727 extent_tree_node_t
*lext
;
728 extent_tree_node_t
*ltmp
;
729 avl64tree_desc_t
*tree
;
731 tree
= rt_extent_tree_ptr
;
733 if (tree
->avl_firstino
== NULL
)
736 ext
= (extent_tree_node_t
*) tree
->avl_firstino
;
738 while (ext
!= NULL
) {
739 tmp
= (extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
740 release_rt_extent_tree_node(ext
);
744 tree
->avl_root
= tree
->avl_firstino
= NULL
;
751 * don't need release functions for realtime tree teardown
752 * since we only have one tree, not one per AG
756 free_rt_dup_extent_tree(xfs_mount_t
*mp
)
758 ASSERT(mp
->m_sb
.sb_rblocks
!= 0);
760 free_allocations(rt_ba_list
);
761 free(rt_ext_tree_ptr
);
764 rt_ext_tree_ptr
= NULL
;
770 * add a duplicate real-time extent
773 add_rt_dup_extent(xfs_drtbno_t startblock
, xfs_extlen_t blockcount
)
775 rt_extent_tree_node_t
*first
, *last
, *ext
, *next_ext
;
776 xfs_drtbno_t new_startblock
;
777 xfs_extlen_t new_blockcount
;
779 pthread_mutex_lock(&rt_ext_tree_lock
);
780 avl64_findranges(rt_ext_tree_ptr
, startblock
- 1,
781 startblock
+ blockcount
+ 1,
782 (avl64node_t
**) &first
, (avl64node_t
**) &last
);
784 * find adjacent and overlapping extent blocks
786 if (first
== NULL
&& last
== NULL
) {
787 /* nothing, just make and insert new extent */
789 ext
= mk_rt_extent_tree_nodes(startblock
,
790 blockcount
, XR_E_MULT
);
792 if (avl64_insert(rt_ext_tree_ptr
,
793 (avl64node_t
*) ext
) == NULL
) {
794 do_error(_("duplicate extent range\n"));
797 pthread_mutex_unlock(&rt_ext_tree_lock
);
801 ASSERT(first
!= NULL
&& last
!= NULL
);
804 * find the new composite range, delete old extent nodes
807 new_startblock
= startblock
;
808 new_blockcount
= blockcount
;
811 ext
!= (rt_extent_tree_node_t
*) last
->avl_node
.avl_nextino
;
814 * preserve the next inorder node
816 next_ext
= (rt_extent_tree_node_t
*) ext
->avl_node
.avl_nextino
;
818 * just bail if the new extent is contained within an old one
820 if (ext
->rt_startblock
<= startblock
&&
821 ext
->rt_blockcount
>= blockcount
) {
822 pthread_mutex_unlock(&rt_ext_tree_lock
);
826 * now check for overlaps and adjacent extents
828 if (ext
->rt_startblock
+ ext
->rt_blockcount
>= startblock
829 || ext
->rt_startblock
<= startblock
+ blockcount
) {
831 if (ext
->rt_startblock
< new_startblock
)
832 new_startblock
= ext
->rt_startblock
;
834 if (ext
->rt_startblock
+ ext
->rt_blockcount
>
835 new_startblock
+ new_blockcount
)
836 new_blockcount
= ext
->rt_startblock
+
840 avl64_delete(rt_ext_tree_ptr
, (avl64node_t
*) ext
);
845 ext
= mk_rt_extent_tree_nodes(new_startblock
,
846 new_blockcount
, XR_E_MULT
);
848 if (avl64_insert(rt_ext_tree_ptr
, (avl64node_t
*) ext
) == NULL
) {
849 do_error(_("duplicate extent range\n"));
852 pthread_mutex_unlock(&rt_ext_tree_lock
);
857 * returns 1 if block is a dup, 0 if not
861 search_rt_dup_extent(xfs_mount_t
*mp
, xfs_drtbno_t bno
)
865 pthread_mutex_lock(&rt_ext_tree_lock
);
866 if (avl64_findrange(rt_ext_tree_ptr
, bno
) != NULL
)
870 pthread_mutex_unlock(&rt_ext_tree_lock
);
875 avl64_rt_ext_start(avl64node_t
*node
)
877 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
);
881 avl64_ext_end(avl64node_t
*node
)
883 return(((rt_extent_tree_node_t
*) node
)->rt_startblock
+
884 ((rt_extent_tree_node_t
*) node
)->rt_blockcount
);
887 avl64ops_t avl64_extent_tree_ops
= {
893 incore_ext_init(xfs_mount_t
*mp
)
896 xfs_agnumber_t agcount
= mp
->m_sb
.sb_agcount
;
900 pthread_mutex_init(&ext_flist_lock
, NULL
);
901 pthread_mutex_init(&rt_ext_tree_lock
, NULL
);
902 pthread_mutex_init(&rt_ext_flist_lock
, NULL
);
904 if ((extent_tree_ptrs
= malloc(agcount
*
905 sizeof(avltree_desc_t
*))) == NULL
)
907 _("couldn't malloc dup extent tree descriptor table\n"));
909 if ((extent_bno_ptrs
= malloc(agcount
*
910 sizeof(avltree_desc_t
*))) == NULL
)
912 _("couldn't malloc free by-bno extent tree descriptor table\n"));
914 if ((extent_bcnt_ptrs
= malloc(agcount
*
915 sizeof(avltree_desc_t
*))) == NULL
)
917 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
919 for (i
= 0; i
< agcount
; i
++) {
920 if ((extent_tree_ptrs
[i
] =
921 malloc(sizeof(avltree_desc_t
))) == NULL
)
923 _("couldn't malloc dup extent tree descriptor\n"));
924 if ((extent_bno_ptrs
[i
] =
925 malloc(sizeof(avltree_desc_t
))) == NULL
)
927 _("couldn't malloc bno extent tree descriptor\n"));
928 if ((extent_bcnt_ptrs
[i
] =
929 malloc(sizeof(avltree_desc_t
))) == NULL
)
931 _("couldn't malloc bcnt extent tree descriptor\n"));
934 for (i
= 0; i
< agcount
; i
++) {
935 avl_init_tree(extent_tree_ptrs
[i
], &avl_extent_tree_ops
);
936 avl_init_tree(extent_bno_ptrs
[i
], &avl_extent_tree_ops
);
937 avl_init_tree(extent_bcnt_ptrs
[i
], &avl_extent_bcnt_tree_ops
);
940 if ((rt_ext_tree_ptr
= malloc(sizeof(avltree_desc_t
))) == NULL
)
941 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
943 avl64_init_tree(rt_ext_tree_ptr
, &avl64_extent_tree_ops
);
946 ext_flist
.list
= NULL
;
952 * this routine actually frees all the memory used to track per-AG trees
955 incore_ext_teardown(xfs_mount_t
*mp
)
959 free_allocations(ba_list
);
961 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
962 free(extent_tree_ptrs
[i
]);
963 free(extent_bno_ptrs
[i
]);
964 free(extent_bcnt_ptrs
[i
]);
967 free(extent_bcnt_ptrs
);
968 free(extent_bno_ptrs
);
969 free(extent_tree_ptrs
);
971 extent_bcnt_ptrs
= extent_bno_ptrs
= extent_tree_ptrs
= NULL
;
977 count_extents(xfs_agnumber_t agno
, avltree_desc_t
*tree
, int whichtree
)
979 extent_tree_node_t
*node
;
982 node
= (extent_tree_node_t
*) tree
->avl_firstino
;
984 while (node
!= NULL
) {
987 node
= findnext_bcnt_extent(agno
, node
);
989 node
= findnext_bno_extent(node
);
996 count_bno_extents_blocks(xfs_agnumber_t agno
, uint
*numblocks
)
999 extent_tree_node_t
*node
;
1002 ASSERT(agno
< glob_agcount
);
1006 node
= (extent_tree_node_t
*) extent_bno_ptrs
[agno
]->avl_firstino
;
1008 while (node
!= NULL
) {
1009 nblocks
+= node
->ex_blockcount
;
1011 node
= findnext_bno_extent(node
);
1014 *numblocks
= nblocks
;
1019 count_bno_extents(xfs_agnumber_t agno
)
1021 ASSERT(agno
< glob_agcount
);
1022 return(count_extents(agno
, extent_bno_ptrs
[agno
], 0));
1026 count_bcnt_extents(xfs_agnumber_t agno
)
1028 ASSERT(agno
< glob_agcount
);
1029 return(count_extents(agno
, extent_bcnt_ptrs
[agno
], 1));