]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
Undoes mod: xfs-cmds:slinx:120772a
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 /*
2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <libxfs.h>
34 #include <malloc.h>
35 #include "avl.h"
36 #include "globals.h"
37 #include "incore.h"
38 #include "agheader.h"
39 #include "protos.h"
40 #include "err_protos.h"
41
42 extern avlnode_t *avl_firstino(avlnode_t *root);
43
44 /*
45 * array of inode tree ptrs, one per ag
46 */
47 static avltree_desc_t **inode_tree_ptrs;
48
49 /*
50 * ditto for uncertain inodes
51 */
52 static avltree_desc_t **inode_uncertain_tree_ptrs;
53
54 #define ALLOC_NUM_INOS 100
55
56 /* free lists -- inode nodes and extent nodes */
57
58 typedef struct ino_flist_s {
59 ino_tree_node_t *list;
60 ino_tree_node_t *last;
61 long long cnt;
62 } ino_flist_t;
63
64 static ino_flist_t ino_flist; /* free list must be initialized before use */
65
66 /*
67 * next is the uncertain inode list -- a sorted (in ascending order)
68 * list of inode records sorted on the starting inode number. There
69 * is one list per ag.
70 */
71
72 /*
73 * common code for creating inode records for use by trees and lists.
74 * called only from add_inodes and add_inodes_uncertain
75 *
76 * IMPORTANT: all inodes (inode records) start off as free and
77 * unconfirmed.
78 */
79 /* ARGSUSED */
80 static ino_tree_node_t *
81 mk_ino_tree_nodes(xfs_agino_t starting_ino)
82 {
83 int i;
84 ino_tree_node_t *new;
85 avlnode_t *node;
86
87 if (ino_flist.cnt == 0) {
88 ASSERT(ino_flist.list == NULL);
89
90 if ((new = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS])))
91 == NULL)
92 do_error("inode map malloc failed\n");
93
94 for (i = 0; i < ALLOC_NUM_INOS; i++) {
95 new->avl_node.avl_nextino =
96 (avlnode_t *) ino_flist.list;
97 ino_flist.list = new;
98 ino_flist.cnt++;
99 new++;
100 }
101 }
102
103 ASSERT(ino_flist.list != NULL);
104
105 new = ino_flist.list;
106 ino_flist.list = (ino_tree_node_t *) new->avl_node.avl_nextino;
107 ino_flist.cnt--;
108 node = &new->avl_node;
109 node->avl_nextino = node->avl_forw = node->avl_back = NULL;
110
111 /* initialize node */
112
113 new->ino_startnum = 0;
114 new->ino_confirmed = 0;
115 new->ino_isa_dir = 0;
116 new->ir_free = (xfs_inofree_t) - 1;
117 new->ino_un.backptrs = NULL;
118
119 return(new);
120 }
121
122 /*
123 * return inode record to free list, will be initialized when
124 * it gets pulled off list
125 */
126 static void
127 free_ino_tree_node(ino_tree_node_t *ino_rec)
128 {
129 ino_rec->avl_node.avl_nextino = NULL;
130 ino_rec->avl_node.avl_forw = NULL;
131 ino_rec->avl_node.avl_back = NULL;
132
133 if (ino_flist.list != NULL) {
134 ASSERT(ino_flist.cnt > 0);
135 ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list;
136 } else {
137 ASSERT(ino_flist.cnt == 0);
138 ino_rec->avl_node.avl_nextino = NULL;
139 }
140
141 ino_flist.list = ino_rec;
142 ino_flist.cnt++;
143
144 if (ino_rec->ino_un.backptrs != NULL) {
145 if (full_backptrs && ino_rec->ino_un.backptrs->parents != NULL)
146 free(ino_rec->ino_un.backptrs->parents);
147 if (ino_rec->ino_un.plist != NULL)
148 free(ino_rec->ino_un.plist);
149 }
150
151 return;
152 }
153
154 /*
155 * last referenced cache for uncertain inodes
156 */
157 static ino_tree_node_t **last_rec;
158
159 /*
160 * ok, the uncertain inodes are a set of trees just like the
161 * good inodes but all starting inode records are (arbitrarily)
162 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
163 * this means we may have partials records in the tree (e.g. records
164 * without 64 confirmed uncertain inodes). Tough.
165 *
166 * free is set to 1 if the inode is thought to be free, 0 if used
167 */
168 void
169 add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free)
170 {
171 ino_tree_node_t *ino_rec;
172 xfs_agino_t s_ino;
173 int offset;
174
175 ASSERT(agno < glob_agcount);
176 ASSERT(last_rec != NULL);
177
178 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
179
180 /*
181 * check for a cache hit
182 */
183 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
184 offset = ino - s_ino;
185 if (free)
186 set_inode_free(last_rec[agno], offset);
187 else
188 set_inode_used(last_rec[agno], offset);
189
190 return;
191 }
192
193 /*
194 * check to see if record containing inode is already in the tree.
195 * if not, add it
196 */
197 if ((ino_rec = (ino_tree_node_t *)
198 avl_findrange(inode_uncertain_tree_ptrs[agno],
199 s_ino)) == NULL) {
200 ino_rec = mk_ino_tree_nodes(s_ino);
201 ino_rec->ino_startnum = s_ino;
202
203 if (avl_insert(inode_uncertain_tree_ptrs[agno],
204 (avlnode_t *) ino_rec) == NULL) {
205 do_error("xfs_repair: add_aginode_uncertain - "
206 "duplicate inode range\n");
207 }
208 }
209
210 if (free)
211 set_inode_free(ino_rec, ino - s_ino);
212 else
213 set_inode_used(ino_rec, ino - s_ino);
214
215 /*
216 * set cache entry
217 */
218 last_rec[agno] = ino_rec;
219
220 return;
221 }
222
223 /*
224 * like add_aginode_uncertain() only it needs an xfs_mount_t *
225 * to perform the inode number conversion.
226 */
227 void
228 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
229 {
230 add_aginode_uncertain(XFS_INO_TO_AGNO(mp, ino),
231 XFS_INO_TO_AGINO(mp, ino), free);
232 }
233
234 /*
235 * pull the indicated inode record out of the uncertain inode tree
236 */
237 void
238 get_uncertain_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
239 {
240 ASSERT(inode_tree_ptrs != NULL);
241 ASSERT(inode_tree_ptrs[agno] != NULL);
242
243 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
244
245 ino_rec->avl_node.avl_nextino = NULL;
246 ino_rec->avl_node.avl_forw = NULL;
247 ino_rec->avl_node.avl_back = NULL;
248 }
249
250 ino_tree_node_t *
251 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
252 {
253 return((ino_tree_node_t *)
254 inode_uncertain_tree_ptrs[agno]->avl_firstino);
255 }
256
257 ino_tree_node_t *
258 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
259 {
260 return((ino_tree_node_t *)
261 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
262 }
263
264 void
265 clear_uncertain_ino_cache(xfs_agnumber_t agno)
266 {
267 last_rec[agno] = NULL;
268
269 return;
270 }
271
272
273 /*
274 * next comes the inode trees. One per ag. AVL trees
275 * of inode records, each inode record tracking 64 inodes
276 */
277 /*
278 * set up an inode tree record for a group of inodes that will
279 * include the requested inode.
280 *
281 * does NOT error-check for duplicate records. Caller is
282 * responsible for checking that.
283 *
284 * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk
285 *
286 * Each inode resides in a 64-inode chunk which can be part
287 * one or more chunks (MAX(64, inodes-per-block). The fs allocates
288 * in chunks (as opposed to 1 chunk) when a block can hold more than
289 * one chunk (inodes per block > 64). Allocating in one chunk pieces
290 * causes us problems when it takes more than one fs block to contain
291 * an inode chunk because the chunks can start on *any* block boundary.
292 * So we assume that the caller has a clue because at this level, we
293 * don't.
294 */
295 static ino_tree_node_t *
296 add_inode(xfs_agnumber_t agno, xfs_agino_t ino)
297 {
298 ino_tree_node_t *ino_rec;
299
300 /* no record exists, make some and put them into the tree */
301
302 ino_rec = mk_ino_tree_nodes(ino);
303 ino_rec->ino_startnum = ino;
304
305 if (avl_insert(inode_tree_ptrs[agno],
306 (avlnode_t *) ino_rec) == NULL) {
307 do_warn("xfs_repair: add_inode - duplicate inode range\n");
308 }
309
310 return(ino_rec);
311 }
312
313 /*
314 * pull the indicated inode record out of the inode tree
315 */
316 void
317 get_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
318 {
319 ASSERT(inode_tree_ptrs != NULL);
320 ASSERT(inode_tree_ptrs[agno] != NULL);
321
322 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
323
324 ino_rec->avl_node.avl_nextino = NULL;
325 ino_rec->avl_node.avl_forw = NULL;
326 ino_rec->avl_node.avl_back = NULL;
327 }
328
329 /*
330 * free the designated inode record (return it to the free pool)
331 */
332 /* ARGSUSED */
333 void
334 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
335 {
336 free_ino_tree_node(ino_rec);
337
338 return;
339 }
340
341 /*
342 * returns the inode record desired containing the inode
343 * returns NULL if inode doesn't exist. The tree-based find
344 * routines do NOT pull records out of the tree.
345 */
346 ino_tree_node_t *
347 find_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
348 {
349 return((ino_tree_node_t *)
350 avl_findrange(inode_tree_ptrs[agno], ino));
351 }
352
353 void
354 find_inode_rec_range(xfs_agnumber_t agno, xfs_agino_t start_ino,
355 xfs_agino_t end_ino, ino_tree_node_t **first,
356 ino_tree_node_t **last)
357 {
358 *first = *last = NULL;
359
360 avl_findranges(inode_tree_ptrs[agno], start_ino,
361 end_ino, (avlnode_t **) first, (avlnode_t **) last);
362 return;
363 }
364
365 /*
366 * if ino doesn't exist, it must be properly aligned -- on a
367 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
368 * whichever alignment is larger.
369 */
370 ino_tree_node_t *
371 set_inode_used_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
372 {
373 ino_tree_node_t *ino_rec;
374
375 /*
376 * check alignment -- the only way to detect this
377 * is too see if the chunk overlaps another chunk
378 * already in the tree
379 */
380 ino_rec = add_inode(agno, ino);
381
382 ASSERT(ino_rec != NULL);
383 ASSERT(ino >= ino_rec->ino_startnum &&
384 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
385
386 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
387
388 return(ino_rec);
389 }
390
391 ino_tree_node_t *
392 set_inode_free_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
393 {
394 ino_tree_node_t *ino_rec;
395
396 ino_rec = add_inode(agno, ino);
397
398 ASSERT(ino_rec != NULL);
399 ASSERT(ino >= ino_rec->ino_startnum &&
400 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
401
402 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
403
404 return(ino_rec);
405 }
406
407 ino_tree_node_t *
408 findfirst_inode_rec(xfs_agnumber_t agno)
409 {
410 return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino);
411 }
412
413 void
414 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
415 {
416 ino_tree_node_t *ino_rec;
417
418 if (!uncertain) {
419 fprintf(stderr, "good inode list is --\n");
420 ino_rec = findfirst_inode_rec(agno);
421 } else {
422 fprintf(stderr, "uncertain inode list is --\n");
423 ino_rec = findfirst_uncertain_inode_rec(agno);
424 }
425
426 if (ino_rec == NULL) {
427 fprintf(stderr, "agno %d -- no inodes\n", agno);
428 return;
429 }
430
431 printf("agno %d\n", agno);
432
433 while(ino_rec != NULL) {
434 fprintf(stderr,
435 "\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n",
436 (unsigned long)ino_rec,
437 ino_rec->ino_startnum,
438 (unsigned long long)ino_rec->ir_free,
439 (unsigned long long)ino_rec->ino_confirmed);
440 if (ino_rec->ino_startnum == 0)
441 ino_rec = ino_rec;
442 ino_rec = next_ino_rec(ino_rec);
443 }
444 }
445
446 void
447 print_inode_list(xfs_agnumber_t agno)
448 {
449 print_inode_list_int(agno, 0);
450 }
451
452 void
453 print_uncertain_inode_list(xfs_agnumber_t agno)
454 {
455 print_inode_list_int(agno, 1);
456 }
457
458 /*
459 * set parent -- use a bitmask and a packed array. The bitmask
460 * indicate which inodes have an entry in the array. An inode that
461 * is the Nth bit set in the mask is stored in the Nth location in
462 * the array where N starts at 0.
463 */
464 void
465 set_inode_parent(ino_tree_node_t *irec, int offset, xfs_ino_t parent)
466 {
467 int i;
468 int cnt;
469 int target;
470 __uint64_t bitmask;
471 parent_entry_t *tmp;
472
473 ASSERT(full_backptrs == 0);
474
475 if (irec->ino_un.plist == NULL) {
476 irec->ino_un.plist =
477 (parent_list_t*)malloc(sizeof(parent_list_t));
478 if (!irec->ino_un.plist)
479 do_error("couldn't malloc parent list table\n");
480
481 irec->ino_un.plist->pmask = 1LL << offset;
482 irec->ino_un.plist->pentries =
483 (xfs_ino_t*)memalign(sizeof(xfs_ino_t), sizeof(xfs_ino_t));
484 if (!irec->ino_un.plist->pentries)
485 do_error("couldn't memalign pentries table\n");
486 #ifdef DEBUG
487 irec->ino_un.plist->cnt = 1;
488 #endif
489 irec->ino_un.plist->pentries[0] = parent;
490
491 return;
492 }
493
494 if (irec->ino_un.plist->pmask & (1LL << offset)) {
495 bitmask = 1LL;
496 target = 0;
497
498 for (i = 0; i < offset; i++) {
499 if (irec->ino_un.plist->pmask & bitmask)
500 target++;
501 bitmask <<= 1;
502 }
503 #ifdef DEBUG
504 ASSERT(target < irec->ino_un.plist->cnt);
505 #endif
506 irec->ino_un.plist->pentries[target] = parent;
507
508 return;
509 }
510
511 bitmask = 1LL;
512 cnt = target = 0;
513
514 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
515 if (irec->ino_un.plist->pmask & bitmask) {
516 cnt++;
517 if (i < offset)
518 target++;
519 }
520
521 bitmask <<= 1;
522 }
523
524 #ifdef DEBUG
525 ASSERT(cnt == irec->ino_un.plist->cnt);
526 #endif
527 ASSERT(cnt >= target);
528
529 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
530 if (!tmp)
531 do_error("couldn't memalign pentries table\n");
532
533 (void) bcopy(irec->ino_un.plist->pentries, tmp,
534 target * sizeof(parent_entry_t));
535
536 if (cnt > target)
537 (void) bcopy(irec->ino_un.plist->pentries + target,
538 tmp + target + 1,
539 (cnt - target) * sizeof(parent_entry_t));
540
541 free(irec->ino_un.plist->pentries);
542
543 irec->ino_un.plist->pentries = tmp;
544
545 #ifdef DEBUG
546 irec->ino_un.plist->cnt++;
547 #endif
548 irec->ino_un.plist->pentries[target] = parent;
549 irec->ino_un.plist->pmask |= (1LL << offset);
550
551 return;
552 }
553
554 #if 0
555 /*
556 * not needed for now since we don't set the parent info
557 * until phase 4 -- at which point we know that the directory
558 * inode won't be going away -- so we won't ever need to clear
559 * directory parent data that we set.
560 */
561 void
562 clear_inode_parent(ino_tree_node_t *irec, int offset)
563 {
564 ASSERT(full_backptrs == 0);
565 ASSERT(irec->ino_un.plist != NULL);
566
567 return;
568 }
569 #endif
570
571 xfs_ino_t
572 get_inode_parent(ino_tree_node_t *irec, int offset)
573 {
574 __uint64_t bitmask;
575 parent_list_t *ptbl;
576 int i;
577 int target;
578
579 if (full_backptrs)
580 ptbl = irec->ino_un.backptrs->parents;
581 else
582 ptbl = irec->ino_un.plist;
583
584 if (ptbl->pmask & (1LL << offset)) {
585 bitmask = 1LL;
586 target = 0;
587
588 for (i = 0; i < offset; i++) {
589 if (ptbl->pmask & bitmask)
590 target++;
591 bitmask <<= 1;
592 }
593 #ifdef DEBUG
594 ASSERT(target < ptbl->cnt);
595 #endif
596 return(ptbl->pentries[target]);
597 }
598
599 return(0LL);
600 }
601
602 /*
603 * code that deals with the inode descriptor appendages -- the back
604 * pointers, link counts and reached bits for phase 6 and phase 7.
605 */
606
607 void
608 add_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
609 {
610 ASSERT(ino_rec->ino_un.backptrs != NULL);
611
612 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
613 XFS_INO_RCHD_SET_RCHD(ino_rec, ino_offset);
614
615 ASSERT(is_inode_reached(ino_rec, ino_offset));
616
617 return;
618 }
619
620 int
621 is_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
622 {
623 ASSERT(ino_rec->ino_un.backptrs != NULL);
624 return(XFS_INO_RCHD_IS_RCHD(ino_rec, ino_offset));
625 }
626
627 void
628 add_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
629 {
630 ASSERT(ino_rec->ino_un.backptrs != NULL);
631
632 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
633
634 return;
635 }
636
637 void
638 drop_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
639 {
640 ASSERT(ino_rec->ino_un.backptrs != NULL);
641 ASSERT(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
642
643 if (--ino_rec->ino_un.backptrs->nlinks[ino_offset] == 0)
644 XFS_INO_RCHD_CLR_RCHD(ino_rec, ino_offset);
645
646 return;
647 }
648
649 int
650 is_inode_referenced(ino_tree_node_t *ino_rec, int ino_offset)
651 {
652 ASSERT(ino_rec->ino_un.backptrs != NULL);
653 return(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
654 }
655
656 __uint32_t
657 num_inode_references(ino_tree_node_t *ino_rec, int ino_offset)
658 {
659 ASSERT(ino_rec->ino_un.backptrs != NULL);
660 return(ino_rec->ino_un.backptrs->nlinks[ino_offset]);
661 }
662
663 #if 0
664 static backptrs_t *bptrs;
665 static int bptrs_index;
666 #define BPTR_ALLOC_NUM 1000
667
668 backptrs_t *
669 get_backptr(void)
670 {
671 backptrs_t *bptr;
672
673 if (bptrs_index == BPTR_ALLOC_NUM) {
674 ASSERT(bptrs == NULL);
675
676 if ((bptrs = malloc(sizeof(backptrs_t[BPTR_ALLOC_NUM])))
677 == NULL) {
678 do_error("couldn't malloc ino rec backptrs.\n");
679 }
680
681 bptrs_index = 0;
682 }
683
684 ASSERT(bptrs != NULL);
685
686 bptr = &bptrs[bptrs_index];
687 bptrs_index++;
688
689 if (bptrs_index == BPTR_ALLOC_NUM)
690 bptrs = NULL;
691
692 bzero(bptr, sizeof(backptrs_t));
693
694 return(bptr);
695 }
696 #endif
697
698 backptrs_t *
699 get_backptr(void)
700 {
701 backptrs_t *ptr;
702
703 if ((ptr = malloc(sizeof(backptrs_t))) == NULL)
704 do_error("could not malloc back pointer table\n");
705
706 bzero(ptr, sizeof(backptrs_t));
707
708 return(ptr);
709 }
710
711 void
712 add_ino_backptrs(xfs_mount_t *mp)
713 {
714 #ifdef XR_BCKPTR_DBG
715 xfs_ino_t ino;
716 int j, k;
717 #endif /* XR_BCKPTR_DBG */
718 ino_tree_node_t *ino_rec;
719 parent_list_t *tmp;
720 xfs_agnumber_t i;
721
722 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
723 ino_rec = findfirst_inode_rec(i);
724
725 while (ino_rec != NULL) {
726 tmp = ino_rec->ino_un.plist;
727 ino_rec->ino_un.backptrs = get_backptr();
728 ino_rec->ino_un.backptrs->parents = tmp;
729
730 #ifdef XR_BCKPTR_DBG
731 if (tmp != NULL) {
732 k = 0;
733 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
734 ino = XFS_AGINO_TO_INO(mp, i,
735 ino_rec->ino_startnum + j);
736 if (ino == 25165846) {
737 do_warn("THERE 1 !!!\n");
738 }
739 if (tmp->pentries[j] != 0) {
740 k++;
741 do_warn(
742 "inode %llu - parent %llu\n",
743 ino,
744 tmp->pentries[j]);
745 if (ino == 25165846) {
746 do_warn("THERE!!!\n");
747 }
748 }
749 }
750
751 if (k != tmp->cnt) {
752 do_warn(
753 "ERROR - count = %d, counted %d\n",
754 tmp->cnt, k);
755 }
756 }
757 #endif /* XR_BCKPTR_DBG */
758 ino_rec = next_ino_rec(ino_rec);
759 }
760 }
761
762 full_backptrs = 1;
763
764 return;
765 }
766
767 static __psunsigned_t
768 avl_ino_start(avlnode_t *node)
769 {
770 return((__psunsigned_t) ((ino_tree_node_t *) node)->ino_startnum);
771 }
772
773 static __psunsigned_t
774 avl_ino_end(avlnode_t *node)
775 {
776 return((__psunsigned_t) (
777 ((ino_tree_node_t *) node)->ino_startnum +
778 XFS_INODES_PER_CHUNK));
779 }
780
781 avlops_t avl_ino_tree_ops = {
782 avl_ino_start,
783 avl_ino_end
784 };
785
786 void
787 incore_ino_init(xfs_mount_t *mp)
788 {
789 int i;
790 int agcount = mp->m_sb.sb_agcount;
791
792 if ((inode_tree_ptrs = malloc(agcount *
793 sizeof(avltree_desc_t *))) == NULL)
794 do_error("couldn't malloc inode tree descriptor table\n");
795 if ((inode_uncertain_tree_ptrs = malloc(agcount *
796 sizeof(avltree_desc_t *))) == NULL)
797 do_error("couldn't malloc uncertain ino tree descriptor table\n");
798
799 for (i = 0; i < agcount; i++) {
800 if ((inode_tree_ptrs[i] =
801 malloc(sizeof(avltree_desc_t))) == NULL)
802 do_error("couldn't malloc inode tree descriptor\n");
803 if ((inode_uncertain_tree_ptrs[i] =
804 malloc(sizeof(avltree_desc_t))) == NULL)
805 do_error(
806 "couldn't malloc uncertain ino tree descriptor\n");
807 }
808 for (i = 0; i < agcount; i++) {
809 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
810 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
811 }
812
813 ino_flist.cnt = 0;
814 ino_flist.list = NULL;
815
816 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
817 do_error("couldn't malloc uncertain inode cache area\n");
818
819 bzero(last_rec, sizeof(ino_tree_node_t *) * agcount);
820
821 full_backptrs = 0;
822
823 return;
824 }
825
826 #ifdef XR_INO_REF_DEBUG
827 void
828 add_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
829 {
830 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset));
831
832 ASSERT(is_inode_refchecked(ino, ino_rec, ino_offset));
833
834 return;
835 }
836
837 int
838 is_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
839 {
840 return(XFS_INOPROC_IS_PROC(ino_rec, ino_offset) == 0LL ? 0 : 1);
841 }
842 #endif /* XR_INO_REF_DEBUG */