]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
Update copyright/license notices to match SGI legal prefered boilerplate.
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <libxfs.h>
20 #include "avl.h"
21 #include "globals.h"
22 #include "incore.h"
23 #include "agheader.h"
24 #include "protos.h"
25 #include "err_protos.h"
26
27 extern avlnode_t *avl_firstino(avlnode_t *root);
28
29 /*
30 * array of inode tree ptrs, one per ag
31 */
32 static avltree_desc_t **inode_tree_ptrs;
33
34 /*
35 * ditto for uncertain inodes
36 */
37 static avltree_desc_t **inode_uncertain_tree_ptrs;
38
39 #define ALLOC_NUM_INOS 100
40
41 /* free lists -- inode nodes and extent nodes */
42
43 typedef struct ino_flist_s {
44 ino_tree_node_t *list;
45 ino_tree_node_t *last;
46 long long cnt;
47 } ino_flist_t;
48
49 static ino_flist_t ino_flist; /* free list must be initialized before use */
50
51 /*
52 * next is the uncertain inode list -- a sorted (in ascending order)
53 * list of inode records sorted on the starting inode number. There
54 * is one list per ag.
55 */
56
57 /*
58 * common code for creating inode records for use by trees and lists.
59 * called only from add_inodes and add_inodes_uncertain
60 *
61 * IMPORTANT: all inodes (inode records) start off as free and
62 * unconfirmed.
63 */
64 /* ARGSUSED */
65 static ino_tree_node_t *
66 mk_ino_tree_nodes(xfs_agino_t starting_ino)
67 {
68 int i;
69 ino_tree_node_t *new;
70 avlnode_t *node;
71
72 if (ino_flist.cnt == 0) {
73 ASSERT(ino_flist.list == NULL);
74
75 if ((new = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS])))
76 == NULL)
77 do_error(_("inode map malloc failed\n"));
78
79 for (i = 0; i < ALLOC_NUM_INOS; i++) {
80 new->avl_node.avl_nextino =
81 (avlnode_t *) ino_flist.list;
82 ino_flist.list = new;
83 ino_flist.cnt++;
84 new++;
85 }
86 }
87
88 ASSERT(ino_flist.list != NULL);
89
90 new = ino_flist.list;
91 ino_flist.list = (ino_tree_node_t *) new->avl_node.avl_nextino;
92 ino_flist.cnt--;
93 node = &new->avl_node;
94 node->avl_nextino = node->avl_forw = node->avl_back = NULL;
95
96 /* initialize node */
97
98 new->ino_startnum = 0;
99 new->ino_confirmed = 0;
100 new->ino_isa_dir = 0;
101 new->ir_free = (xfs_inofree_t) - 1;
102 new->ino_un.backptrs = NULL;
103
104 return(new);
105 }
106
107 /*
108 * return inode record to free list, will be initialized when
109 * it gets pulled off list
110 */
111 static void
112 free_ino_tree_node(ino_tree_node_t *ino_rec)
113 {
114 ino_rec->avl_node.avl_nextino = NULL;
115 ino_rec->avl_node.avl_forw = NULL;
116 ino_rec->avl_node.avl_back = NULL;
117
118 if (ino_flist.list != NULL) {
119 ASSERT(ino_flist.cnt > 0);
120 ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list;
121 } else {
122 ASSERT(ino_flist.cnt == 0);
123 ino_rec->avl_node.avl_nextino = NULL;
124 }
125
126 ino_flist.list = ino_rec;
127 ino_flist.cnt++;
128
129 if (ino_rec->ino_un.backptrs != NULL) {
130 if (full_backptrs && ino_rec->ino_un.backptrs->parents != NULL)
131 free(ino_rec->ino_un.backptrs->parents);
132 if (ino_rec->ino_un.plist != NULL)
133 free(ino_rec->ino_un.plist);
134 }
135
136 return;
137 }
138
139 /*
140 * last referenced cache for uncertain inodes
141 */
142 static ino_tree_node_t **last_rec;
143
144 /*
145 * ok, the uncertain inodes are a set of trees just like the
146 * good inodes but all starting inode records are (arbitrarily)
147 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
148 * this means we may have partials records in the tree (e.g. records
149 * without 64 confirmed uncertain inodes). Tough.
150 *
151 * free is set to 1 if the inode is thought to be free, 0 if used
152 */
153 void
154 add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free)
155 {
156 ino_tree_node_t *ino_rec;
157 xfs_agino_t s_ino;
158 int offset;
159
160 ASSERT(agno < glob_agcount);
161 ASSERT(last_rec != NULL);
162
163 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
164
165 /*
166 * check for a cache hit
167 */
168 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
169 offset = ino - s_ino;
170 if (free)
171 set_inode_free(last_rec[agno], offset);
172 else
173 set_inode_used(last_rec[agno], offset);
174
175 return;
176 }
177
178 /*
179 * check to see if record containing inode is already in the tree.
180 * if not, add it
181 */
182 if ((ino_rec = (ino_tree_node_t *)
183 avl_findrange(inode_uncertain_tree_ptrs[agno],
184 s_ino)) == NULL) {
185 ino_rec = mk_ino_tree_nodes(s_ino);
186 ino_rec->ino_startnum = s_ino;
187
188 if (avl_insert(inode_uncertain_tree_ptrs[agno],
189 (avlnode_t *) ino_rec) == NULL) {
190 do_error(_("add_aginode_uncertain - "
191 "duplicate inode range\n"));
192 }
193 }
194
195 if (free)
196 set_inode_free(ino_rec, ino - s_ino);
197 else
198 set_inode_used(ino_rec, ino - s_ino);
199
200 /*
201 * set cache entry
202 */
203 last_rec[agno] = ino_rec;
204
205 return;
206 }
207
208 /*
209 * like add_aginode_uncertain() only it needs an xfs_mount_t *
210 * to perform the inode number conversion.
211 */
212 void
213 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
214 {
215 add_aginode_uncertain(XFS_INO_TO_AGNO(mp, ino),
216 XFS_INO_TO_AGINO(mp, ino), free);
217 }
218
219 /*
220 * pull the indicated inode record out of the uncertain inode tree
221 */
222 void
223 get_uncertain_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
224 {
225 ASSERT(inode_tree_ptrs != NULL);
226 ASSERT(inode_tree_ptrs[agno] != NULL);
227
228 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
229
230 ino_rec->avl_node.avl_nextino = NULL;
231 ino_rec->avl_node.avl_forw = NULL;
232 ino_rec->avl_node.avl_back = NULL;
233 }
234
235 ino_tree_node_t *
236 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
237 {
238 return((ino_tree_node_t *)
239 inode_uncertain_tree_ptrs[agno]->avl_firstino);
240 }
241
242 ino_tree_node_t *
243 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
244 {
245 return((ino_tree_node_t *)
246 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
247 }
248
249 void
250 clear_uncertain_ino_cache(xfs_agnumber_t agno)
251 {
252 last_rec[agno] = NULL;
253
254 return;
255 }
256
257
258 /*
259 * next comes the inode trees. One per ag. AVL trees
260 * of inode records, each inode record tracking 64 inodes
261 */
262 /*
263 * set up an inode tree record for a group of inodes that will
264 * include the requested inode.
265 *
266 * does NOT error-check for duplicate records. Caller is
267 * responsible for checking that.
268 *
269 * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk
270 *
271 * Each inode resides in a 64-inode chunk which can be part
272 * one or more chunks (MAX(64, inodes-per-block). The fs allocates
273 * in chunks (as opposed to 1 chunk) when a block can hold more than
274 * one chunk (inodes per block > 64). Allocating in one chunk pieces
275 * causes us problems when it takes more than one fs block to contain
276 * an inode chunk because the chunks can start on *any* block boundary.
277 * So we assume that the caller has a clue because at this level, we
278 * don't.
279 */
280 static ino_tree_node_t *
281 add_inode(xfs_agnumber_t agno, xfs_agino_t ino)
282 {
283 ino_tree_node_t *ino_rec;
284
285 /* no record exists, make some and put them into the tree */
286
287 ino_rec = mk_ino_tree_nodes(ino);
288 ino_rec->ino_startnum = ino;
289
290 if (avl_insert(inode_tree_ptrs[agno],
291 (avlnode_t *) ino_rec) == NULL) {
292 do_warn(_("add_inode - duplicate inode range\n"));
293 }
294
295 return(ino_rec);
296 }
297
298 /*
299 * pull the indicated inode record out of the inode tree
300 */
301 void
302 get_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
303 {
304 ASSERT(inode_tree_ptrs != NULL);
305 ASSERT(inode_tree_ptrs[agno] != NULL);
306
307 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
308
309 ino_rec->avl_node.avl_nextino = NULL;
310 ino_rec->avl_node.avl_forw = NULL;
311 ino_rec->avl_node.avl_back = NULL;
312 }
313
314 /*
315 * free the designated inode record (return it to the free pool)
316 */
317 /* ARGSUSED */
318 void
319 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
320 {
321 free_ino_tree_node(ino_rec);
322
323 return;
324 }
325
326 /*
327 * returns the inode record desired containing the inode
328 * returns NULL if inode doesn't exist. The tree-based find
329 * routines do NOT pull records out of the tree.
330 */
331 ino_tree_node_t *
332 find_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
333 {
334 return((ino_tree_node_t *)
335 avl_findrange(inode_tree_ptrs[agno], ino));
336 }
337
338 void
339 find_inode_rec_range(xfs_agnumber_t agno, xfs_agino_t start_ino,
340 xfs_agino_t end_ino, ino_tree_node_t **first,
341 ino_tree_node_t **last)
342 {
343 *first = *last = NULL;
344
345 avl_findranges(inode_tree_ptrs[agno], start_ino,
346 end_ino, (avlnode_t **) first, (avlnode_t **) last);
347 return;
348 }
349
350 /*
351 * if ino doesn't exist, it must be properly aligned -- on a
352 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
353 * whichever alignment is larger.
354 */
355 ino_tree_node_t *
356 set_inode_used_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
357 {
358 ino_tree_node_t *ino_rec;
359
360 /*
361 * check alignment -- the only way to detect this
362 * is too see if the chunk overlaps another chunk
363 * already in the tree
364 */
365 ino_rec = add_inode(agno, ino);
366
367 ASSERT(ino_rec != NULL);
368 ASSERT(ino >= ino_rec->ino_startnum &&
369 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
370
371 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
372
373 return(ino_rec);
374 }
375
376 ino_tree_node_t *
377 set_inode_free_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
378 {
379 ino_tree_node_t *ino_rec;
380
381 ino_rec = add_inode(agno, ino);
382
383 ASSERT(ino_rec != NULL);
384 ASSERT(ino >= ino_rec->ino_startnum &&
385 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
386
387 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
388
389 return(ino_rec);
390 }
391
392 ino_tree_node_t *
393 findfirst_inode_rec(xfs_agnumber_t agno)
394 {
395 return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino);
396 }
397
398 void
399 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
400 {
401 ino_tree_node_t *ino_rec;
402
403 if (!uncertain) {
404 fprintf(stderr, _("good inode list is --\n"));
405 ino_rec = findfirst_inode_rec(agno);
406 } else {
407 fprintf(stderr, _("uncertain inode list is --\n"));
408 ino_rec = findfirst_uncertain_inode_rec(agno);
409 }
410
411 if (ino_rec == NULL) {
412 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
413 return;
414 }
415
416 printf(_("agno %d\n"), agno);
417
418 while(ino_rec != NULL) {
419 fprintf(stderr,
420 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
421 (unsigned long)ino_rec,
422 ino_rec->ino_startnum,
423 (unsigned long long)ino_rec->ir_free,
424 (unsigned long long)ino_rec->ino_confirmed);
425 if (ino_rec->ino_startnum == 0)
426 ino_rec = ino_rec;
427 ino_rec = next_ino_rec(ino_rec);
428 }
429 }
430
431 void
432 print_inode_list(xfs_agnumber_t agno)
433 {
434 print_inode_list_int(agno, 0);
435 }
436
437 void
438 print_uncertain_inode_list(xfs_agnumber_t agno)
439 {
440 print_inode_list_int(agno, 1);
441 }
442
443 /*
444 * set parent -- use a bitmask and a packed array. The bitmask
445 * indicate which inodes have an entry in the array. An inode that
446 * is the Nth bit set in the mask is stored in the Nth location in
447 * the array where N starts at 0.
448 */
449 void
450 set_inode_parent(ino_tree_node_t *irec, int offset, xfs_ino_t parent)
451 {
452 int i;
453 int cnt;
454 int target;
455 __uint64_t bitmask;
456 parent_entry_t *tmp;
457
458 ASSERT(full_backptrs == 0);
459
460 if (irec->ino_un.plist == NULL) {
461 irec->ino_un.plist =
462 (parent_list_t*)malloc(sizeof(parent_list_t));
463 if (!irec->ino_un.plist)
464 do_error(_("couldn't malloc parent list table\n"));
465
466 irec->ino_un.plist->pmask = 1LL << offset;
467 irec->ino_un.plist->pentries =
468 (xfs_ino_t*)memalign(sizeof(xfs_ino_t), sizeof(xfs_ino_t));
469 if (!irec->ino_un.plist->pentries)
470 do_error(_("couldn't memalign pentries table\n"));
471 #ifdef DEBUG
472 irec->ino_un.plist->cnt = 1;
473 #endif
474 irec->ino_un.plist->pentries[0] = parent;
475
476 return;
477 }
478
479 if (irec->ino_un.plist->pmask & (1LL << offset)) {
480 bitmask = 1LL;
481 target = 0;
482
483 for (i = 0; i < offset; i++) {
484 if (irec->ino_un.plist->pmask & bitmask)
485 target++;
486 bitmask <<= 1;
487 }
488 #ifdef DEBUG
489 ASSERT(target < irec->ino_un.plist->cnt);
490 #endif
491 irec->ino_un.plist->pentries[target] = parent;
492
493 return;
494 }
495
496 bitmask = 1LL;
497 cnt = target = 0;
498
499 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
500 if (irec->ino_un.plist->pmask & bitmask) {
501 cnt++;
502 if (i < offset)
503 target++;
504 }
505
506 bitmask <<= 1;
507 }
508
509 #ifdef DEBUG
510 ASSERT(cnt == irec->ino_un.plist->cnt);
511 #endif
512 ASSERT(cnt >= target);
513
514 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
515 if (!tmp)
516 do_error(_("couldn't memalign pentries table\n"));
517
518 (void) bcopy(irec->ino_un.plist->pentries, tmp,
519 target * sizeof(parent_entry_t));
520
521 if (cnt > target)
522 (void) bcopy(irec->ino_un.plist->pentries + target,
523 tmp + target + 1,
524 (cnt - target) * sizeof(parent_entry_t));
525
526 free(irec->ino_un.plist->pentries);
527
528 irec->ino_un.plist->pentries = tmp;
529
530 #ifdef DEBUG
531 irec->ino_un.plist->cnt++;
532 #endif
533 irec->ino_un.plist->pentries[target] = parent;
534 irec->ino_un.plist->pmask |= (1LL << offset);
535
536 return;
537 }
538
539 #if 0
540 /*
541 * not needed for now since we don't set the parent info
542 * until phase 4 -- at which point we know that the directory
543 * inode won't be going away -- so we won't ever need to clear
544 * directory parent data that we set.
545 */
546 void
547 clear_inode_parent(ino_tree_node_t *irec, int offset)
548 {
549 ASSERT(full_backptrs == 0);
550 ASSERT(irec->ino_un.plist != NULL);
551
552 return;
553 }
554 #endif
555
556 xfs_ino_t
557 get_inode_parent(ino_tree_node_t *irec, int offset)
558 {
559 __uint64_t bitmask;
560 parent_list_t *ptbl;
561 int i;
562 int target;
563
564 if (full_backptrs)
565 ptbl = irec->ino_un.backptrs->parents;
566 else
567 ptbl = irec->ino_un.plist;
568
569 if (ptbl->pmask & (1LL << offset)) {
570 bitmask = 1LL;
571 target = 0;
572
573 for (i = 0; i < offset; i++) {
574 if (ptbl->pmask & bitmask)
575 target++;
576 bitmask <<= 1;
577 }
578 #ifdef DEBUG
579 ASSERT(target < ptbl->cnt);
580 #endif
581 return(ptbl->pentries[target]);
582 }
583
584 return(0LL);
585 }
586
587 /*
588 * code that deals with the inode descriptor appendages -- the back
589 * pointers, link counts and reached bits for phase 6 and phase 7.
590 */
591
592 void
593 add_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
594 {
595 ASSERT(ino_rec->ino_un.backptrs != NULL);
596
597 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
598 XFS_INO_RCHD_SET_RCHD(ino_rec, ino_offset);
599
600 ASSERT(is_inode_reached(ino_rec, ino_offset));
601
602 return;
603 }
604
605 int
606 is_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
607 {
608 ASSERT(ino_rec->ino_un.backptrs != NULL);
609 return(XFS_INO_RCHD_IS_RCHD(ino_rec, ino_offset));
610 }
611
612 void
613 add_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
614 {
615 ASSERT(ino_rec->ino_un.backptrs != NULL);
616
617 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
618
619 return;
620 }
621
622 void
623 drop_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
624 {
625 ASSERT(ino_rec->ino_un.backptrs != NULL);
626 ASSERT(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
627
628 if (--ino_rec->ino_un.backptrs->nlinks[ino_offset] == 0)
629 XFS_INO_RCHD_CLR_RCHD(ino_rec, ino_offset);
630
631 return;
632 }
633
634 int
635 is_inode_referenced(ino_tree_node_t *ino_rec, int ino_offset)
636 {
637 ASSERT(ino_rec->ino_un.backptrs != NULL);
638 return(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
639 }
640
641 __uint32_t
642 num_inode_references(ino_tree_node_t *ino_rec, int ino_offset)
643 {
644 ASSERT(ino_rec->ino_un.backptrs != NULL);
645 return(ino_rec->ino_un.backptrs->nlinks[ino_offset]);
646 }
647
648 #if 0
649 static backptrs_t *bptrs;
650 static int bptrs_index;
651 #define BPTR_ALLOC_NUM 1000
652
653 backptrs_t *
654 get_backptr(void)
655 {
656 backptrs_t *bptr;
657
658 if (bptrs_index == BPTR_ALLOC_NUM) {
659 ASSERT(bptrs == NULL);
660
661 if ((bptrs = malloc(sizeof(backptrs_t[BPTR_ALLOC_NUM])))
662 == NULL) {
663 do_error(_("couldn't malloc ino rec backptrs.\n"));
664 }
665
666 bptrs_index = 0;
667 }
668
669 ASSERT(bptrs != NULL);
670
671 bptr = &bptrs[bptrs_index];
672 bptrs_index++;
673
674 if (bptrs_index == BPTR_ALLOC_NUM)
675 bptrs = NULL;
676
677 bzero(bptr, sizeof(backptrs_t));
678
679 return(bptr);
680 }
681 #endif
682
683 backptrs_t *
684 get_backptr(void)
685 {
686 backptrs_t *ptr;
687
688 if ((ptr = malloc(sizeof(backptrs_t))) == NULL)
689 do_error(_("could not malloc back pointer table\n"));
690
691 bzero(ptr, sizeof(backptrs_t));
692
693 return(ptr);
694 }
695
696 void
697 add_ino_backptrs(xfs_mount_t *mp)
698 {
699 #ifdef XR_BCKPTR_DBG
700 xfs_ino_t ino;
701 int j, k;
702 #endif /* XR_BCKPTR_DBG */
703 ino_tree_node_t *ino_rec;
704 parent_list_t *tmp;
705 xfs_agnumber_t i;
706
707 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
708 ino_rec = findfirst_inode_rec(i);
709
710 while (ino_rec != NULL) {
711 tmp = ino_rec->ino_un.plist;
712 ino_rec->ino_un.backptrs = get_backptr();
713 ino_rec->ino_un.backptrs->parents = tmp;
714
715 #ifdef XR_BCKPTR_DBG
716 if (tmp != NULL) {
717 k = 0;
718 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
719 ino = XFS_AGINO_TO_INO(mp, i,
720 ino_rec->ino_startnum + j);
721 if (ino == 25165846) {
722 do_warn("THERE 1 !!!\n");
723 }
724 if (tmp->pentries[j] != 0) {
725 k++;
726 do_warn(
727 "inode %llu - parent %llu\n",
728 ino,
729 tmp->pentries[j]);
730 if (ino == 25165846) {
731 do_warn("THERE!!!\n");
732 }
733 }
734 }
735
736 if (k != tmp->cnt) {
737 do_warn(
738 "ERROR - count = %d, counted %d\n",
739 tmp->cnt, k);
740 }
741 }
742 #endif /* XR_BCKPTR_DBG */
743 ino_rec = next_ino_rec(ino_rec);
744 }
745 }
746
747 full_backptrs = 1;
748
749 return;
750 }
751
752 static __psunsigned_t
753 avl_ino_start(avlnode_t *node)
754 {
755 return((__psunsigned_t) ((ino_tree_node_t *) node)->ino_startnum);
756 }
757
758 static __psunsigned_t
759 avl_ino_end(avlnode_t *node)
760 {
761 return((__psunsigned_t) (
762 ((ino_tree_node_t *) node)->ino_startnum +
763 XFS_INODES_PER_CHUNK));
764 }
765
766 avlops_t avl_ino_tree_ops = {
767 avl_ino_start,
768 avl_ino_end
769 };
770
771 void
772 incore_ino_init(xfs_mount_t *mp)
773 {
774 int i;
775 int agcount = mp->m_sb.sb_agcount;
776
777 if ((inode_tree_ptrs = malloc(agcount *
778 sizeof(avltree_desc_t *))) == NULL)
779 do_error(_("couldn't malloc inode tree descriptor table\n"));
780 if ((inode_uncertain_tree_ptrs = malloc(agcount *
781 sizeof(avltree_desc_t *))) == NULL)
782 do_error(
783 _("couldn't malloc uncertain ino tree descriptor table\n"));
784
785 for (i = 0; i < agcount; i++) {
786 if ((inode_tree_ptrs[i] =
787 malloc(sizeof(avltree_desc_t))) == NULL)
788 do_error(_("couldn't malloc inode tree descriptor\n"));
789 if ((inode_uncertain_tree_ptrs[i] =
790 malloc(sizeof(avltree_desc_t))) == NULL)
791 do_error(
792 _("couldn't malloc uncertain ino tree descriptor\n"));
793 }
794 for (i = 0; i < agcount; i++) {
795 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
796 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
797 }
798
799 ino_flist.cnt = 0;
800 ino_flist.list = NULL;
801
802 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
803 do_error(_("couldn't malloc uncertain inode cache area\n"));
804
805 bzero(last_rec, sizeof(ino_tree_node_t *) * agcount);
806
807 full_backptrs = 0;
808
809 return;
810 }
811
812 #ifdef XR_INO_REF_DEBUG
813 void
814 add_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
815 {
816 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset));
817
818 ASSERT(is_inode_refchecked(ino, ino_rec, ino_offset));
819
820 return;
821 }
822
823 int
824 is_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
825 {
826 return(XFS_INOPROC_IS_PROC(ino_rec, ino_offset) == 0LL ? 0 : 1);
827 }
828 #endif /* XR_INO_REF_DEBUG */