]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ext.c
xfs_repair: replace custom block allocation lists with list_heads
[thirdparty/xfsprogs-dev.git] / repair / incore_ext.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <libxfs.h>
20 #include "avl.h"
21 #include "globals.h"
22 #include "incore.h"
23 #include "agheader.h"
24 #include "protos.h"
25 #include "err_protos.h"
26 #include "avl64.h"
27 #include "threads.h"
28 #define ALLOC_NUM_EXTS 100
29
30 /*
31 * paranoia -- account for any weird padding, 64/32-bit alignment, etc.
32 */
33 typedef struct extent_alloc_rec {
34 struct list_head list;
35 extent_tree_node_t extents[ALLOC_NUM_EXTS];
36 } extent_alloc_rec_t;
37
38 typedef struct rt_extent_alloc_rec {
39 struct list_head list;
40 rt_extent_tree_node_t extents[ALLOC_NUM_EXTS];
41 } rt_extent_alloc_rec_t;
42
43 /*
44 * note: there are 4 sets of incore things handled here:
45 * block bitmaps, extent trees, uncertain inode list,
46 * and inode tree. The tree-based code uses the AVL
47 * tree package used by the IRIX kernel VM code
48 * (sys/avl.h). The inode list code uses the same records
49 * as the inode tree code for convenience. The bitmaps
50 * and bitmap operators are mostly macros defined in incore.h.
51 * There are one of everything per AG except for extent
52 * trees. There's one duplicate extent tree, one bno and
53 * one bcnt extent tree per AG. Not all of the above exist
54 * through all phases. The duplicate extent tree gets trashed
55 * at the end of phase 4. The bno/bcnt trees don't appear until
56 * phase 5. The uncertain inode list goes away at the end of
57 * phase 3. The inode tree and bno/bnct trees go away after phase 5.
58 */
59 typedef struct ext_flist_s {
60 extent_tree_node_t *list;
61 int cnt;
62 } ext_flist_t;
63
64 static ext_flist_t ext_flist;
65
66 typedef struct rt_ext_flist_s {
67 rt_extent_tree_node_t *list;
68 int cnt;
69 } rt_ext_flist_t;
70
71 static rt_ext_flist_t rt_ext_flist;
72
73 static avl64tree_desc_t *rt_ext_tree_ptr; /* dup extent tree for rt */
74
75 avltree_desc_t **extent_tree_ptrs; /* array of extent tree ptrs */
76 /* one per ag for dups */
77 static avltree_desc_t **extent_bno_ptrs; /*
78 * array of extent tree ptrs
79 * one per ag for free extents
80 * sorted by starting block
81 * number
82 */
83 static avltree_desc_t **extent_bcnt_ptrs; /*
84 * array of extent tree ptrs
85 * one per ag for free extents
86 * sorted by size
87 */
88
89 /*
90 * list of allocated "blocks" for easy freeing later
91 */
92 static struct list_head ba_list;
93 static struct list_head rt_ba_list;
94
95 /*
96 * locks.
97 */
98 static pthread_mutex_t ext_flist_lock;
99 static pthread_mutex_t rt_ext_tree_lock;
100 static pthread_mutex_t rt_ext_flist_lock;
101
102 /*
103 * extent tree stuff is avl trees of duplicate extents,
104 * sorted in order by block number. there is one tree per ag.
105 */
106
107 static extent_tree_node_t *
108 mk_extent_tree_nodes(xfs_agblock_t new_startblock,
109 xfs_extlen_t new_blockcount, extent_state_t new_state)
110 {
111 int i;
112 extent_tree_node_t *new;
113 extent_alloc_rec_t *rec;
114
115 pthread_mutex_lock(&ext_flist_lock);
116 if (ext_flist.cnt == 0) {
117 ASSERT(ext_flist.list == NULL);
118
119 if ((rec = malloc(sizeof(extent_alloc_rec_t))) == NULL)
120 do_error(
121 _("couldn't allocate new extent descriptors.\n"));
122
123 list_add(&rec->list, &ba_list);
124
125 new = &rec->extents[0];
126
127 for (i = 0; i < ALLOC_NUM_EXTS; i++) {
128 new->avl_node.avl_nextino = (avlnode_t *)
129 ext_flist.list;
130 ext_flist.list = new;
131 ext_flist.cnt++;
132 new++;
133 }
134 }
135
136 ASSERT(ext_flist.list != NULL);
137
138 new = ext_flist.list;
139 ext_flist.list = (extent_tree_node_t *) new->avl_node.avl_nextino;
140 ext_flist.cnt--;
141 new->avl_node.avl_nextino = NULL;
142 pthread_mutex_unlock(&ext_flist_lock);
143
144 /* initialize node */
145
146 new->ex_startblock = new_startblock;
147 new->ex_blockcount = new_blockcount;
148 new->ex_state = new_state;
149 new->next = NULL;
150 new->last = NULL;
151
152 return(new);
153 }
154
155 void
156 release_extent_tree_node(extent_tree_node_t *node)
157 {
158 pthread_mutex_lock(&ext_flist_lock);
159 node->avl_node.avl_nextino = (avlnode_t *) ext_flist.list;
160 ext_flist.list = node;
161 ext_flist.cnt++;
162 pthread_mutex_unlock(&ext_flist_lock);
163
164 return;
165 }
166
167 /*
168 * routines to recycle all nodes in a tree. it walks the tree
169 * and puts all nodes back on the free list so the nodes can be
170 * reused. the duplicate and bno/bcnt extent trees for each AG
171 * are recycled after they're no longer needed to save memory
172 */
173 void
174 release_extent_tree(avltree_desc_t *tree)
175 {
176 extent_tree_node_t *ext;
177 extent_tree_node_t *tmp;
178 extent_tree_node_t *lext;
179 extent_tree_node_t *ltmp;
180
181 if (tree->avl_firstino == NULL)
182 return;
183
184 ext = (extent_tree_node_t *) tree->avl_firstino;
185
186 while (ext != NULL) {
187 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
188
189 /*
190 * ext->next is guaranteed to be set only in bcnt trees
191 */
192 if (ext->next != NULL) {
193 lext = ext->next;
194 while (lext != NULL) {
195 ltmp = lext->next;
196 release_extent_tree_node(lext);
197 lext = ltmp;
198 }
199 }
200
201 release_extent_tree_node(ext);
202 ext = tmp;
203 }
204
205 tree->avl_root = tree->avl_firstino = NULL;
206
207 return;
208 }
209
210 /*
211 * top-level (visible) routines
212 */
213 void
214 release_dup_extent_tree(xfs_agnumber_t agno)
215 {
216 release_extent_tree(extent_tree_ptrs[agno]);
217
218 return;
219 }
220
221 void
222 release_agbno_extent_tree(xfs_agnumber_t agno)
223 {
224 release_extent_tree(extent_bno_ptrs[agno]);
225
226 return;
227 }
228
229 void
230 release_agbcnt_extent_tree(xfs_agnumber_t agno)
231 {
232 release_extent_tree(extent_bcnt_ptrs[agno]);
233
234 return;
235 }
236
237 /*
238 * the next 4 routines manage the trees of free extents -- 2 trees
239 * per AG. The first tree is sorted by block number. The second
240 * tree is sorted by extent size. This is the bno tree.
241 */
242 void
243 add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
244 xfs_extlen_t blockcount)
245 {
246 extent_tree_node_t *ext;
247
248 ASSERT(extent_bno_ptrs != NULL);
249 ASSERT(extent_bno_ptrs[agno] != NULL);
250
251 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
252
253 if (avl_insert(extent_bno_ptrs[agno], (avlnode_t *) ext) == NULL) {
254 do_error(_("duplicate bno extent range\n"));
255 }
256 }
257
258 extent_tree_node_t *
259 findfirst_bno_extent(xfs_agnumber_t agno)
260 {
261 ASSERT(extent_bno_ptrs != NULL);
262 ASSERT(extent_bno_ptrs[agno] != NULL);
263
264 return((extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino);
265 }
266
267 extent_tree_node_t *
268 find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock)
269 {
270 ASSERT(extent_bno_ptrs != NULL);
271 ASSERT(extent_bno_ptrs[agno] != NULL);
272
273 return((extent_tree_node_t *) avl_find(extent_bno_ptrs[agno],
274 startblock));
275 }
276
277 /*
278 * delete a node that's in the tree (pointer obtained by a find routine)
279 */
280 void
281 get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
282 {
283 ASSERT(extent_bno_ptrs != NULL);
284 ASSERT(extent_bno_ptrs[agno] != NULL);
285
286 avl_delete(extent_bno_ptrs[agno], &ext->avl_node);
287
288 return;
289 }
290
291 /*
292 * normalizing constant for bcnt size -> address conversion (see avl ops)
293 * used by the AVL tree code to convert sizes and must be used when
294 * doing an AVL search in the tree (e.g. avl_findrange(s))
295 */
296 #define MAXBCNT 0xFFFFFFFF
297 #define BCNT_ADDR(cnt) ((unsigned int) MAXBCNT - (cnt))
298
299 /*
300 * the next 4 routines manage the trees of free extents -- 2 trees
301 * per AG. The first tree is sorted by block number. The second
302 * tree is sorted by extent size. This is the bcnt tree.
303 */
304 void
305 add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
306 xfs_extlen_t blockcount)
307 {
308 extent_tree_node_t *ext, *prev, *current, *top;
309 xfs_agblock_t tmp_startblock;
310 xfs_extlen_t tmp_blockcount;
311 extent_state_t tmp_state;
312
313 ASSERT(extent_bcnt_ptrs != NULL);
314 ASSERT(extent_bcnt_ptrs[agno] != NULL);
315
316 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_FREE);
317
318 ASSERT(ext->next == NULL);
319
320 #ifdef XR_BCNT_TRACE
321 fprintf(stderr, "adding bcnt: agno = %d, start = %u, count = %u\n",
322 agno, startblock, blockcount);
323 #endif
324 if ((current = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
325 blockcount)) != NULL) {
326 /*
327 * avl tree code doesn't handle dups so insert
328 * onto linked list in increasing startblock order
329 *
330 * when called from mk_incore_fstree,
331 * startblock is in increasing order.
332 * current is an "anchor" node.
333 * quick check if the new ext goes to the end.
334 * if so, append at the end, using the last field
335 * of the "anchor".
336 */
337 ASSERT(current->last != NULL);
338 if (startblock > current->last->ex_startblock) {
339 current->last->next = ext;
340 current->last = ext;
341 return;
342 }
343
344 /*
345 * scan, to find the proper location for new entry.
346 * this scan is *very* expensive and gets worse with
347 * with increasing entries.
348 */
349 top = prev = current;
350 while (current != NULL &&
351 startblock > current->ex_startblock) {
352 prev = current;
353 current = current->next;
354 }
355
356 if (top == current) {
357 ASSERT(top == prev);
358 /*
359 * new entry should be ahead of current.
360 * to keep the avl tree intact,
361 * swap the values of to-be-inserted element
362 * and the values of the head of the list.
363 * then insert as the 2nd element on the list.
364 *
365 * see the comment in get_bcnt_extent()
366 * as to why we have to do this.
367 */
368 tmp_startblock = top->ex_startblock;
369 tmp_blockcount = top->ex_blockcount;
370 tmp_state = top->ex_state;
371
372 top->ex_startblock = ext->ex_startblock;
373 top->ex_blockcount = ext->ex_blockcount;
374 top->ex_state = ext->ex_state;
375
376 ext->ex_startblock = tmp_startblock;
377 ext->ex_blockcount = tmp_blockcount;
378 ext->ex_state = tmp_state;
379
380 current = top->next;
381 prev = top;
382 }
383
384 prev->next = ext;
385 ext->next = current;
386
387 return;
388 }
389
390 if (avl_insert(extent_bcnt_ptrs[agno], (avlnode_t *) ext) == NULL) {
391 do_error(_(": duplicate bno extent range\n"));
392 }
393
394 ext->last = ext; /* ext is an "anchor" node */
395
396 return;
397 }
398
399 extent_tree_node_t *
400 findfirst_bcnt_extent(xfs_agnumber_t agno)
401 {
402 ASSERT(extent_bcnt_ptrs != NULL);
403 ASSERT(extent_bcnt_ptrs[agno] != NULL);
404
405 return((extent_tree_node_t *) extent_bcnt_ptrs[agno]->avl_firstino);
406 }
407
408 extent_tree_node_t *
409 findbiggest_bcnt_extent(xfs_agnumber_t agno)
410 {
411 extern avlnode_t *avl_lastino(avlnode_t *root);
412
413 ASSERT(extent_bcnt_ptrs != NULL);
414 ASSERT(extent_bcnt_ptrs[agno] != NULL);
415
416 return((extent_tree_node_t *) avl_lastino(extent_bcnt_ptrs[agno]->avl_root));
417 }
418
419 extent_tree_node_t *
420 findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext)
421 {
422 avlnode_t *nextino;
423
424 if (ext->next != NULL) {
425 ASSERT(ext->ex_blockcount == ext->next->ex_blockcount);
426 ASSERT(ext->ex_startblock < ext->next->ex_startblock);
427 return(ext->next);
428 } else {
429 /*
430 * have to look at the top of the list to get the
431 * correct avl_nextino pointer since that pointer
432 * is maintained and altered by the AVL code.
433 */
434 nextino = avl_find(extent_bcnt_ptrs[agno], ext->ex_blockcount);
435 ASSERT(nextino != NULL);
436 if (nextino->avl_nextino != NULL) {
437 ASSERT(ext->ex_blockcount < ((extent_tree_node_t *)
438 nextino->avl_nextino)->ex_blockcount);
439 }
440 return((extent_tree_node_t *) nextino->avl_nextino);
441 }
442 }
443
444 /*
445 * this is meant to be called after you walk the bno tree to
446 * determine exactly which extent you want (so you'll know the
447 * desired value for startblock when you call this routine).
448 */
449 extent_tree_node_t *
450 get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
451 xfs_extlen_t blockcount)
452 {
453 extent_tree_node_t *ext, *prev, *top;
454 xfs_agblock_t tmp_startblock;
455 xfs_extlen_t tmp_blockcount;
456 extent_state_t tmp_state;
457
458 prev = NULL;
459 ASSERT(extent_bcnt_ptrs != NULL);
460 ASSERT(extent_bcnt_ptrs[agno] != NULL);
461
462 if ((ext = (extent_tree_node_t *) avl_find(extent_bcnt_ptrs[agno],
463 blockcount)) == NULL)
464 return(NULL);
465
466 top = ext;
467
468 if (ext->next != NULL) {
469 /*
470 * pull it off the list
471 */
472 while (ext != NULL && startblock != ext->ex_startblock) {
473 prev = ext;
474 ext = ext->next;
475 }
476 ASSERT(ext != NULL);
477 if (ext == top) {
478 /*
479 * this node is linked into the tree so we
480 * swap the core values so we can delete
481 * the next item on the list instead of
482 * the head of the list. This is because
483 * the rest of the tree undoubtedly has
484 * pointers to the piece of memory that
485 * is the head of the list so pulling
486 * the item out of the list and hence
487 * the avl tree would be a bad idea.
488 *
489 * (cheaper than the alternative, a tree
490 * delete of this node followed by a tree
491 * insert of the next node on the list).
492 */
493 tmp_startblock = ext->next->ex_startblock;
494 tmp_blockcount = ext->next->ex_blockcount;
495 tmp_state = ext->next->ex_state;
496
497 ext->next->ex_startblock = ext->ex_startblock;
498 ext->next->ex_blockcount = ext->ex_blockcount;
499 ext->next->ex_state = ext->ex_state;
500
501 ext->ex_startblock = tmp_startblock;
502 ext->ex_blockcount = tmp_blockcount;
503 ext->ex_state = tmp_state;
504
505 ext = ext->next;
506 prev = top;
507 }
508 /*
509 * now, a simple list deletion
510 */
511 prev->next = ext->next;
512 ext->next = NULL;
513 } else {
514 /*
515 * no list, just one node. simply delete
516 */
517 avl_delete(extent_bcnt_ptrs[agno], &ext->avl_node);
518 }
519
520 ASSERT(ext->ex_startblock == startblock);
521 ASSERT(ext->ex_blockcount == blockcount);
522 return(ext);
523 }
524
525 /*
526 * the next 2 routines manage the trees of duplicate extents -- 1 tree
527 * per AG
528 */
529 void
530 add_dup_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
531 xfs_extlen_t blockcount)
532 {
533 extent_tree_node_t *first, *last, *ext, *next_ext;
534 xfs_agblock_t new_startblock;
535 xfs_extlen_t new_blockcount;
536
537 ASSERT(agno < glob_agcount);
538
539 #ifdef XR_DUP_TRACE
540 fprintf(stderr, "Adding dup extent - %d/%d %d\n", agno, startblock, blockcount);
541 #endif
542 avl_findranges(extent_tree_ptrs[agno], startblock - 1,
543 startblock + blockcount + 1,
544 (avlnode_t **) &first, (avlnode_t **) &last);
545 /*
546 * find adjacent and overlapping extent blocks
547 */
548 if (first == NULL && last == NULL) {
549 /* nothing, just make and insert new extent */
550
551 ext = mk_extent_tree_nodes(startblock, blockcount, XR_E_MULT);
552
553 if (avl_insert(extent_tree_ptrs[agno],
554 (avlnode_t *) ext) == NULL) {
555 do_error(_("duplicate extent range\n"));
556 }
557
558 return;
559 }
560
561 ASSERT(first != NULL && last != NULL);
562
563 /*
564 * find the new composite range, delete old extent nodes
565 * as we go
566 */
567 new_startblock = startblock;
568 new_blockcount = blockcount;
569
570 for (ext = first;
571 ext != (extent_tree_node_t *) last->avl_node.avl_nextino;
572 ext = next_ext) {
573 /*
574 * preserve the next inorder node
575 */
576 next_ext = (extent_tree_node_t *) ext->avl_node.avl_nextino;
577 /*
578 * just bail if the new extent is contained within an old one
579 */
580 if (ext->ex_startblock <= startblock &&
581 ext->ex_blockcount >= blockcount)
582 return;
583 /*
584 * now check for overlaps and adjacent extents
585 */
586 if (ext->ex_startblock + ext->ex_blockcount >= startblock
587 || ext->ex_startblock <= startblock + blockcount) {
588
589 if (ext->ex_startblock < new_startblock)
590 new_startblock = ext->ex_startblock;
591
592 if (ext->ex_startblock + ext->ex_blockcount >
593 new_startblock + new_blockcount)
594 new_blockcount = ext->ex_startblock +
595 ext->ex_blockcount -
596 new_startblock;
597
598 avl_delete(extent_tree_ptrs[agno], (avlnode_t *) ext);
599 continue;
600 }
601 }
602
603 ext = mk_extent_tree_nodes(new_startblock, new_blockcount, XR_E_MULT);
604
605 if (avl_insert(extent_tree_ptrs[agno], (avlnode_t *) ext) == NULL) {
606 do_error(_("duplicate extent range\n"));
607 }
608
609 return;
610 }
611
612 static __psunsigned_t
613 avl_ext_start(avlnode_t *node)
614 {
615 return((__psunsigned_t)
616 ((extent_tree_node_t *) node)->ex_startblock);
617 }
618
619 static __psunsigned_t
620 avl_ext_end(avlnode_t *node)
621 {
622 return((__psunsigned_t) (
623 ((extent_tree_node_t *) node)->ex_startblock +
624 ((extent_tree_node_t *) node)->ex_blockcount));
625 }
626
627 /*
628 * convert size to an address for the AVL tree code -- the bigger the size,
629 * the lower the address so the biggest extent will be first in the tree
630 */
631 static __psunsigned_t
632 avl_ext_bcnt_start(avlnode_t *node)
633 {
634 /*
635 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
636 node)->ex_blockcount)));
637 */
638 return((__psunsigned_t) ((extent_tree_node_t *)node)->ex_blockcount);
639 }
640
641 static __psunsigned_t
642 avl_ext_bcnt_end(avlnode_t *node)
643 {
644 /*
645 return((__psunsigned_t) (BCNT_ADDR(((extent_tree_node_t *)
646 node)->ex_blockcount)));
647 */
648 return((__psunsigned_t) ((extent_tree_node_t *)node)->ex_blockcount);
649 }
650
651 avlops_t avl_extent_bcnt_tree_ops = {
652 avl_ext_bcnt_start,
653 avl_ext_bcnt_end
654 };
655
656 avlops_t avl_extent_tree_ops = {
657 avl_ext_start,
658 avl_ext_end
659 };
660
661 /*
662 * for real-time extents -- have to dup code since realtime extent
663 * startblocks can be 64-bit values.
664 */
665 static rt_extent_tree_node_t *
666 mk_rt_extent_tree_nodes(xfs_drtbno_t new_startblock,
667 xfs_extlen_t new_blockcount, extent_state_t new_state)
668 {
669 int i;
670 rt_extent_tree_node_t *new;
671 rt_extent_alloc_rec_t *rec;
672
673 pthread_mutex_lock(&rt_ext_flist_lock);
674 if (rt_ext_flist.cnt == 0) {
675 ASSERT(rt_ext_flist.list == NULL);
676
677 if ((rec = malloc(sizeof(rt_extent_alloc_rec_t))) == NULL)
678 do_error(
679 _("couldn't allocate new extent descriptors.\n"));
680
681 list_add(&rec->list, &rt_ba_list);
682
683 new = &rec->extents[0];
684
685 for (i = 0; i < ALLOC_NUM_EXTS; i++) {
686 new->avl_node.avl_nextino = (avlnode_t *)
687 rt_ext_flist.list;
688 rt_ext_flist.list = new;
689 rt_ext_flist.cnt++;
690 new++;
691 }
692 }
693
694 ASSERT(rt_ext_flist.list != NULL);
695
696 new = rt_ext_flist.list;
697 rt_ext_flist.list = (rt_extent_tree_node_t *) new->avl_node.avl_nextino;
698 rt_ext_flist.cnt--;
699 new->avl_node.avl_nextino = NULL;
700 pthread_mutex_unlock(&rt_ext_flist_lock);
701
702 /* initialize node */
703
704 new->rt_startblock = new_startblock;
705 new->rt_blockcount = new_blockcount;
706 new->rt_state = new_state;
707
708 return(new);
709 }
710
711 #if 0
712 void
713 release_rt_extent_tree_node(rt_extent_tree_node_t *node)
714 {
715 node->avl_node.avl_nextino = (avlnode_t *) rt_ext_flist.list;
716 rt_ext_flist.list = node;
717 rt_ext_flist.cnt++;
718
719 return;
720 }
721
722 void
723 release_rt_extent_tree()
724 {
725 extent_tree_node_t *ext;
726 extent_tree_node_t *tmp;
727 extent_tree_node_t *lext;
728 extent_tree_node_t *ltmp;
729 avl64tree_desc_t *tree;
730
731 tree = rt_extent_tree_ptr;
732
733 if (tree->avl_firstino == NULL)
734 return;
735
736 ext = (extent_tree_node_t *) tree->avl_firstino;
737
738 while (ext != NULL) {
739 tmp = (extent_tree_node_t *) ext->avl_node.avl_nextino;
740 release_rt_extent_tree_node(ext);
741 ext = tmp;
742 }
743
744 tree->avl_root = tree->avl_firstino = NULL;
745
746 return;
747 }
748 #endif
749
750 /*
751 * don't need release functions for realtime tree teardown
752 * since we only have one tree, not one per AG
753 */
754 /* ARGSUSED */
755 void
756 free_rt_dup_extent_tree(xfs_mount_t *mp)
757 {
758 rt_extent_alloc_rec_t *cur, *tmp;
759
760 ASSERT(mp->m_sb.sb_rblocks != 0);
761
762 list_for_each_entry_safe(cur, tmp, &rt_ba_list, list)
763 free(cur);
764
765 free(rt_ext_tree_ptr);
766
767 rt_ext_tree_ptr = NULL;
768
769 return;
770 }
771
772 /*
773 * add a duplicate real-time extent
774 */
775 void
776 add_rt_dup_extent(xfs_drtbno_t startblock, xfs_extlen_t blockcount)
777 {
778 rt_extent_tree_node_t *first, *last, *ext, *next_ext;
779 xfs_drtbno_t new_startblock;
780 xfs_extlen_t new_blockcount;
781
782 pthread_mutex_lock(&rt_ext_tree_lock);
783 avl64_findranges(rt_ext_tree_ptr, startblock - 1,
784 startblock + blockcount + 1,
785 (avl64node_t **) &first, (avl64node_t **) &last);
786 /*
787 * find adjacent and overlapping extent blocks
788 */
789 if (first == NULL && last == NULL) {
790 /* nothing, just make and insert new extent */
791
792 ext = mk_rt_extent_tree_nodes(startblock,
793 blockcount, XR_E_MULT);
794
795 if (avl64_insert(rt_ext_tree_ptr,
796 (avl64node_t *) ext) == NULL) {
797 do_error(_("duplicate extent range\n"));
798 }
799
800 pthread_mutex_unlock(&rt_ext_tree_lock);
801 return;
802 }
803
804 ASSERT(first != NULL && last != NULL);
805
806 /*
807 * find the new composite range, delete old extent nodes
808 * as we go
809 */
810 new_startblock = startblock;
811 new_blockcount = blockcount;
812
813 for (ext = first;
814 ext != (rt_extent_tree_node_t *) last->avl_node.avl_nextino;
815 ext = next_ext) {
816 /*
817 * preserve the next inorder node
818 */
819 next_ext = (rt_extent_tree_node_t *) ext->avl_node.avl_nextino;
820 /*
821 * just bail if the new extent is contained within an old one
822 */
823 if (ext->rt_startblock <= startblock &&
824 ext->rt_blockcount >= blockcount) {
825 pthread_mutex_unlock(&rt_ext_tree_lock);
826 return;
827 }
828 /*
829 * now check for overlaps and adjacent extents
830 */
831 if (ext->rt_startblock + ext->rt_blockcount >= startblock
832 || ext->rt_startblock <= startblock + blockcount) {
833
834 if (ext->rt_startblock < new_startblock)
835 new_startblock = ext->rt_startblock;
836
837 if (ext->rt_startblock + ext->rt_blockcount >
838 new_startblock + new_blockcount)
839 new_blockcount = ext->rt_startblock +
840 ext->rt_blockcount -
841 new_startblock;
842
843 avl64_delete(rt_ext_tree_ptr, (avl64node_t *) ext);
844 continue;
845 }
846 }
847
848 ext = mk_rt_extent_tree_nodes(new_startblock,
849 new_blockcount, XR_E_MULT);
850
851 if (avl64_insert(rt_ext_tree_ptr, (avl64node_t *) ext) == NULL) {
852 do_error(_("duplicate extent range\n"));
853 }
854
855 pthread_mutex_unlock(&rt_ext_tree_lock);
856 return;
857 }
858
859 /*
860 * returns 1 if block is a dup, 0 if not
861 */
862 /* ARGSUSED */
863 int
864 search_rt_dup_extent(xfs_mount_t *mp, xfs_drtbno_t bno)
865 {
866 int ret;
867
868 pthread_mutex_lock(&rt_ext_tree_lock);
869 if (avl64_findrange(rt_ext_tree_ptr, bno) != NULL)
870 ret = 1;
871 else
872 ret = 0;
873 pthread_mutex_unlock(&rt_ext_tree_lock);
874 return(ret);
875 }
876
877 static __uint64_t
878 avl64_rt_ext_start(avl64node_t *node)
879 {
880 return(((rt_extent_tree_node_t *) node)->rt_startblock);
881 }
882
883 static __uint64_t
884 avl64_ext_end(avl64node_t *node)
885 {
886 return(((rt_extent_tree_node_t *) node)->rt_startblock +
887 ((rt_extent_tree_node_t *) node)->rt_blockcount);
888 }
889
890 avl64ops_t avl64_extent_tree_ops = {
891 avl64_rt_ext_start,
892 avl64_ext_end
893 };
894
895 void
896 incore_ext_init(xfs_mount_t *mp)
897 {
898 int i;
899 xfs_agnumber_t agcount = mp->m_sb.sb_agcount;
900
901 list_head_init(&ba_list);
902 list_head_init(&rt_ba_list);
903 pthread_mutex_init(&ext_flist_lock, NULL);
904 pthread_mutex_init(&rt_ext_tree_lock, NULL);
905 pthread_mutex_init(&rt_ext_flist_lock, NULL);
906
907 if ((extent_tree_ptrs = malloc(agcount *
908 sizeof(avltree_desc_t *))) == NULL)
909 do_error(
910 _("couldn't malloc dup extent tree descriptor table\n"));
911
912 if ((extent_bno_ptrs = malloc(agcount *
913 sizeof(avltree_desc_t *))) == NULL)
914 do_error(
915 _("couldn't malloc free by-bno extent tree descriptor table\n"));
916
917 if ((extent_bcnt_ptrs = malloc(agcount *
918 sizeof(avltree_desc_t *))) == NULL)
919 do_error(
920 _("couldn't malloc free by-bcnt extent tree descriptor table\n"));
921
922 for (i = 0; i < agcount; i++) {
923 if ((extent_tree_ptrs[i] =
924 malloc(sizeof(avltree_desc_t))) == NULL)
925 do_error(
926 _("couldn't malloc dup extent tree descriptor\n"));
927 if ((extent_bno_ptrs[i] =
928 malloc(sizeof(avltree_desc_t))) == NULL)
929 do_error(
930 _("couldn't malloc bno extent tree descriptor\n"));
931 if ((extent_bcnt_ptrs[i] =
932 malloc(sizeof(avltree_desc_t))) == NULL)
933 do_error(
934 _("couldn't malloc bcnt extent tree descriptor\n"));
935 }
936
937 for (i = 0; i < agcount; i++) {
938 avl_init_tree(extent_tree_ptrs[i], &avl_extent_tree_ops);
939 avl_init_tree(extent_bno_ptrs[i], &avl_extent_tree_ops);
940 avl_init_tree(extent_bcnt_ptrs[i], &avl_extent_bcnt_tree_ops);
941 }
942
943 if ((rt_ext_tree_ptr = malloc(sizeof(avltree_desc_t))) == NULL)
944 do_error(_("couldn't malloc dup rt extent tree descriptor\n"));
945
946 avl64_init_tree(rt_ext_tree_ptr, &avl64_extent_tree_ops);
947
948 ext_flist.cnt = 0;
949 ext_flist.list = NULL;
950
951 return;
952 }
953
954 /*
955 * this routine actually frees all the memory used to track per-AG trees
956 */
957 void
958 incore_ext_teardown(xfs_mount_t *mp)
959 {
960 extent_alloc_rec_t *cur, *tmp;
961 xfs_agnumber_t i;
962
963 list_for_each_entry_safe(cur, tmp, &ba_list, list)
964 free(cur);
965
966 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
967 free(extent_tree_ptrs[i]);
968 free(extent_bno_ptrs[i]);
969 free(extent_bcnt_ptrs[i]);
970 }
971
972 free(extent_bcnt_ptrs);
973 free(extent_bno_ptrs);
974 free(extent_tree_ptrs);
975
976 extent_bcnt_ptrs = extent_bno_ptrs = extent_tree_ptrs = NULL;
977
978 return;
979 }
980
981 int
982 count_extents(xfs_agnumber_t agno, avltree_desc_t *tree, int whichtree)
983 {
984 extent_tree_node_t *node;
985 int i = 0;
986
987 node = (extent_tree_node_t *) tree->avl_firstino;
988
989 while (node != NULL) {
990 i++;
991 if (whichtree)
992 node = findnext_bcnt_extent(agno, node);
993 else
994 node = findnext_bno_extent(node);
995 }
996
997 return(i);
998 }
999
1000 int
1001 count_bno_extents_blocks(xfs_agnumber_t agno, uint *numblocks)
1002 {
1003 __uint64_t nblocks;
1004 extent_tree_node_t *node;
1005 int i = 0;
1006
1007 ASSERT(agno < glob_agcount);
1008
1009 nblocks = 0;
1010
1011 node = (extent_tree_node_t *) extent_bno_ptrs[agno]->avl_firstino;
1012
1013 while (node != NULL) {
1014 nblocks += node->ex_blockcount;
1015 i++;
1016 node = findnext_bno_extent(node);
1017 }
1018
1019 *numblocks = nblocks;
1020 return(i);
1021 }
1022
1023 int
1024 count_bno_extents(xfs_agnumber_t agno)
1025 {
1026 ASSERT(agno < glob_agcount);
1027 return(count_extents(agno, extent_bno_ptrs[agno], 0));
1028 }
1029
1030 int
1031 count_bcnt_extents(xfs_agnumber_t agno)
1032 {
1033 ASSERT(agno < glob_agcount);
1034 return(count_extents(agno, extent_bcnt_ptrs[agno], 1));
1035 }