]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/incore_ino.c
cmd/xfs/bmap/Makefile 1.8 Renamed to cmd/xfsprogs/bmap/Makefile
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
CommitLineData
2bd0ea18
NS
1/*
2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33#include <libxfs.h>
34#include <malloc.h>
35#include "avl.h"
36#include "globals.h"
37#include "incore.h"
38#include "agheader.h"
39#include "protos.h"
40#include "err_protos.h"
41
42extern avlnode_t *avl_firstino(avlnode_t *root);
43
44/*
45 * array of inode tree ptrs, one per ag
46 */
47static avltree_desc_t **inode_tree_ptrs;
48
49/*
50 * ditto for uncertain inodes
51 */
52static avltree_desc_t **inode_uncertain_tree_ptrs;
53
54#define ALLOC_NUM_INOS 100
55
56/* free lists -- inode nodes and extent nodes */
57
58typedef struct ino_flist_s {
59 ino_tree_node_t *list;
60 ino_tree_node_t *last;
61 long long cnt;
62} ino_flist_t;
63
64static ino_flist_t ino_flist; /* free list must be initialized before use */
65
66/*
67 * next is the uncertain inode list -- a sorted (in ascending order)
68 * list of inode records sorted on the starting inode number. There
69 * is one list per ag.
70 */
71
72/*
73 * common code for creating inode records for use by trees and lists.
74 * called only from add_inodes and add_inodes_uncertain
75 *
76 * IMPORTANT: all inodes (inode records) start off as free and
77 * unconfirmed.
78 */
79/* ARGSUSED */
80static ino_tree_node_t *
81mk_ino_tree_nodes(xfs_agino_t starting_ino)
82{
83 int i;
84 ino_tree_node_t *new;
85 avlnode_t *node;
86
87 if (ino_flist.cnt == 0) {
88 ASSERT(ino_flist.list == NULL);
89
90 if ((new = malloc(sizeof(ino_tree_node_t[ALLOC_NUM_INOS])))
91 == NULL)
92 do_error("inode map malloc failed\n");
93
94 for (i = 0; i < ALLOC_NUM_INOS; i++) {
95 new->avl_node.avl_nextino =
96 (avlnode_t *) ino_flist.list;
97 ino_flist.list = new;
98 ino_flist.cnt++;
99 new++;
100 }
101 }
102
103 ASSERT(ino_flist.list != NULL);
104
105 new = ino_flist.list;
106 ino_flist.list = (ino_tree_node_t *) new->avl_node.avl_nextino;
107 ino_flist.cnt--;
108 node = &new->avl_node;
109 node->avl_nextino = node->avl_forw = node->avl_back = NULL;
110
111 /* initialize node */
112
113 new->ino_startnum = 0;
114 new->ino_confirmed = 0;
115 new->ino_isa_dir = 0;
116 new->ir_free = (xfs_inofree_t) - 1;
117 new->ino_un.backptrs = NULL;
118
119 return(new);
120}
121
122/*
123 * return inode record to free list, will be initialized when
124 * it gets pulled off list
125 */
126static void
127free_ino_tree_node(ino_tree_node_t *ino_rec)
128{
129 ino_rec->avl_node.avl_nextino = NULL;
130 ino_rec->avl_node.avl_forw = NULL;
131 ino_rec->avl_node.avl_back = NULL;
132
133 if (ino_flist.list != NULL) {
134 ASSERT(ino_flist.cnt > 0);
135 ino_rec->avl_node.avl_nextino = (avlnode_t *) ino_flist.list;
136 } else {
137 ASSERT(ino_flist.cnt == 0);
138 ino_rec->avl_node.avl_nextino = NULL;
139 }
140
141 ino_flist.list = ino_rec;
142 ino_flist.cnt++;
143
144 if (ino_rec->ino_un.backptrs != NULL) {
145 if (full_backptrs && ino_rec->ino_un.backptrs->parents != NULL)
146 free(ino_rec->ino_un.backptrs->parents);
147 if (ino_rec->ino_un.plist != NULL)
148 free(ino_rec->ino_un.plist);
149 }
150
151 return;
152}
153
154/*
155 * last referenced cache for uncertain inodes
156 */
157static ino_tree_node_t **last_rec;
158
159/*
160 * ok, the uncertain inodes are a set of trees just like the
161 * good inodes but all starting inode records are (arbitrarily)
162 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
163 * this means we may have partials records in the tree (e.g. records
164 * without 64 confirmed uncertain inodes). Tough.
165 *
166 * free is set to 1 if the inode is thought to be free, 0 if used
167 */
168void
169add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free)
170{
171 ino_tree_node_t *ino_rec;
172 xfs_agino_t s_ino;
173 int offset;
174
175 ASSERT(agno < glob_agcount);
176 ASSERT(last_rec != NULL);
177
178 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
179
180 /*
181 * check for a cache hit
182 */
183 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
184 offset = ino - s_ino;
185 if (free)
186 set_inode_free(last_rec[agno], offset);
187 else
188 set_inode_used(last_rec[agno], offset);
189
190 return;
191 }
192
193 /*
194 * check to see if record containing inode is already in the tree.
195 * if not, add it
196 */
197 if ((ino_rec = (ino_tree_node_t *)
198 avl_findrange(inode_uncertain_tree_ptrs[agno],
199 s_ino)) == NULL) {
200 ino_rec = mk_ino_tree_nodes(s_ino);
201 ino_rec->ino_startnum = s_ino;
202
203 if (avl_insert(inode_uncertain_tree_ptrs[agno],
204 (avlnode_t *) ino_rec) == NULL) {
205 do_error("xfs_repair: duplicate inode range\n");
206 }
207 }
208
209 if (free)
210 set_inode_free(ino_rec, ino - s_ino);
211 else
212 set_inode_used(ino_rec, ino - s_ino);
213
214 /*
215 * set cache entry
216 */
217 last_rec[agno] = ino_rec;
218
219 return;
220}
221
222/*
223 * like add_aginode_uncertain() only it needs an xfs_mount_t *
224 * to perform the inode number conversion.
225 */
226void
227add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
228{
229 add_aginode_uncertain(XFS_INO_TO_AGNO(mp, ino),
230 XFS_INO_TO_AGINO(mp, ino), free);
231}
232
233/*
234 * pull the indicated inode record out of the uncertain inode tree
235 */
236void
237get_uncertain_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
238{
239 ASSERT(inode_tree_ptrs != NULL);
240 ASSERT(inode_tree_ptrs[agno] != NULL);
241
242 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
243
244 ino_rec->avl_node.avl_nextino = NULL;
245 ino_rec->avl_node.avl_forw = NULL;
246 ino_rec->avl_node.avl_back = NULL;
247}
248
249ino_tree_node_t *
250findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
251{
252 return((ino_tree_node_t *)
253 inode_uncertain_tree_ptrs[agno]->avl_firstino);
254}
255
256void
257clear_uncertain_ino_cache(xfs_agnumber_t agno)
258{
259 last_rec[agno] = NULL;
260
261 return;
262}
263
264
265/*
266 * next comes the inode trees. One per ag. AVL trees
267 * of inode records, each inode record tracking 64 inodes
268 */
269/*
270 * set up an inode tree record for a group of inodes that will
271 * include the requested inode.
272 *
273 * does NOT error-check for duplicate records. Caller is
274 * responsible for checking that.
275 *
276 * ino must be the start of an XFS_INODES_PER_CHUNK (64) inode chunk
277 *
278 * Each inode resides in a 64-inode chunk which can be part
279 * one or more chunks (MAX(64, inodes-per-block). The fs allocates
280 * in chunks (as opposed to 1 chunk) when a block can hold more than
281 * one chunk (inodes per block > 64). Allocating in one chunk pieces
282 * causes us problems when it takes more than one fs block to contain
283 * an inode chunk because the chunks can start on *any* block boundary.
284 * So we assume that the caller has a clue because at this level, we
285 * don't.
286 */
287static ino_tree_node_t *
288add_inode(xfs_agnumber_t agno, xfs_agino_t ino)
289{
290 ino_tree_node_t *ino_rec;
291
292 /* no record exists, make some and put them into the tree */
293
294 ino_rec = mk_ino_tree_nodes(ino);
295 ino_rec->ino_startnum = ino;
296
297 if (avl_insert(inode_tree_ptrs[agno],
298 (avlnode_t *) ino_rec) == NULL) {
299 do_error("xfs_repair: duplicate inode range\n");
300 }
301
302 return(ino_rec);
303}
304
305/*
306 * pull the indicated inode record out of the inode tree
307 */
308void
309get_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
310{
311 ASSERT(inode_tree_ptrs != NULL);
312 ASSERT(inode_tree_ptrs[agno] != NULL);
313
314 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
315
316 ino_rec->avl_node.avl_nextino = NULL;
317 ino_rec->avl_node.avl_forw = NULL;
318 ino_rec->avl_node.avl_back = NULL;
319}
320
321/*
322 * free the designated inode record (return it to the free pool)
323 */
324/* ARGSUSED */
325void
326free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
327{
328 free_ino_tree_node(ino_rec);
329
330 return;
331}
332
333/*
334 * returns the inode record desired containing the inode
335 * returns NULL if inode doesn't exist. The tree-based find
336 * routines do NOT pull records out of the tree.
337 */
338ino_tree_node_t *
339find_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
340{
341 return((ino_tree_node_t *)
342 avl_findrange(inode_tree_ptrs[agno], ino));
343}
344
345void
346find_inode_rec_range(xfs_agnumber_t agno, xfs_agino_t start_ino,
347 xfs_agino_t end_ino, ino_tree_node_t **first,
348 ino_tree_node_t **last)
349{
350 *first = *last = NULL;
351
352 avl_findranges(inode_tree_ptrs[agno], start_ino,
353 end_ino, (avlnode_t **) first, (avlnode_t **) last);
354 return;
355}
356
357/*
358 * if ino doesn't exist, it must be properly aligned -- on a
359 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
360 * whichever alignment is larger.
361 */
362ino_tree_node_t *
363set_inode_used_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
364{
365 ino_tree_node_t *ino_rec;
366
367 /*
368 * check alignment -- the only way to detect this
369 * is too see if the chunk overlaps another chunk
370 * already in the tree
371 */
372 ino_rec = add_inode(agno, ino);
373
374 ASSERT(ino_rec != NULL);
375 ASSERT(ino >= ino_rec->ino_startnum &&
376 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
377
378 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
379
380 return(ino_rec);
381}
382
383ino_tree_node_t *
384set_inode_free_alloc(xfs_agnumber_t agno, xfs_agino_t ino)
385{
386 ino_tree_node_t *ino_rec;
387
388 ino_rec = add_inode(agno, ino);
389
390 ASSERT(ino_rec != NULL);
391 ASSERT(ino >= ino_rec->ino_startnum &&
392 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
393
394 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
395
396 return(ino_rec);
397}
398
399ino_tree_node_t *
400findfirst_inode_rec(xfs_agnumber_t agno)
401{
402 return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino);
403}
404
405void
406print_inode_list_int(xfs_agnumber_t agno, int uncertain)
407{
408 ino_tree_node_t *ino_rec;
409
410 if (!uncertain) {
411 fprintf(stderr, "good inode list is --\n");
412 ino_rec = findfirst_inode_rec(agno);
413 } else {
414 fprintf(stderr, "uncertain inode list is --\n");
415 ino_rec = findfirst_uncertain_inode_rec(agno);
416 }
417
418 if (ino_rec == NULL) {
419 fprintf(stderr, "agno %d -- no inodes\n", agno);
420 return;
421 }
422
423 printf("agno %d\n", agno);
424
425 while(ino_rec != NULL) {
426 fprintf(stderr,
427 "\tptr = %p, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n",
428 ino_rec,
429 ino_rec->ino_startnum,
430 ino_rec->ir_free,
431 ino_rec->ino_confirmed);
432 if (ino_rec->ino_startnum == 0)
433 ino_rec = ino_rec;
434 ino_rec = next_ino_rec(ino_rec);
435 }
436}
437
438void
439print_inode_list(xfs_agnumber_t agno)
440{
441 print_inode_list_int(agno, 0);
442}
443
444void
445print_uncertain_inode_list(xfs_agnumber_t agno)
446{
447 print_inode_list_int(agno, 1);
448}
449
450/*
451 * set parent -- use a bitmask and a packed array. The bitmask
452 * indicate which inodes have an entry in the array. An inode that
453 * is the Nth bit set in the mask is stored in the Nth location in
454 * the array where N starts at 0.
455 */
456void
457set_inode_parent(ino_tree_node_t *irec, int offset, xfs_ino_t parent)
458{
459 int i;
460 int cnt;
461 int target;
462 __uint64_t bitmask;
463 parent_entry_t *tmp;
464
465 ASSERT(full_backptrs == 0);
466
467 if (irec->ino_un.plist == NULL) {
468 irec->ino_un.plist =
469 (parent_list_t*)malloc(sizeof(parent_list_t));
470 if (!irec->ino_un.plist)
471 do_error("couldn't malloc parent list table\n");
472
473 irec->ino_un.plist->pmask = 1LL << offset;
474 irec->ino_un.plist->pentries =
475 (xfs_ino_t*)memalign(sizeof(xfs_ino_t), sizeof(xfs_ino_t));
476 if (!irec->ino_un.plist->pentries)
477 do_error("couldn't memalign pentries table\n");
478#ifdef DEBUG
479 irec->ino_un.plist->cnt = 1;
480#endif
481 irec->ino_un.plist->pentries[0] = parent;
482
483 return;
484 }
485
486 if (irec->ino_un.plist->pmask & (1LL << offset)) {
487 bitmask = 1LL;
488 target = 0;
489
490 for (i = 0; i < offset; i++) {
491 if (irec->ino_un.plist->pmask & bitmask)
492 target++;
493 bitmask <<= 1;
494 }
495#ifdef DEBUG
496 ASSERT(target < irec->ino_un.plist->cnt);
497#endif
498 irec->ino_un.plist->pentries[target] = parent;
499
500 return;
501 }
502
503 bitmask = 1LL;
504 cnt = target = 0;
505
506 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
507 if (irec->ino_un.plist->pmask & bitmask) {
508 cnt++;
509 if (i < offset)
510 target++;
511 }
512
513 bitmask <<= 1;
514 }
515
516#ifdef DEBUG
517 ASSERT(cnt == irec->ino_un.plist->cnt);
518#endif
519 ASSERT(cnt >= target);
520
521 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
522 if (!tmp)
523 do_error("couldn't memalign pentries table\n");
524
525 (void) bcopy(irec->ino_un.plist->pentries, tmp,
526 target * sizeof(parent_entry_t));
527
528 if (cnt > target)
529 (void) bcopy(irec->ino_un.plist->pentries + target,
530 tmp + target + 1,
531 (cnt - target) * sizeof(parent_entry_t));
532
533 free(irec->ino_un.plist->pentries);
534
535 irec->ino_un.plist->pentries = tmp;
536
537#ifdef DEBUG
538 irec->ino_un.plist->cnt++;
539#endif
540 irec->ino_un.plist->pentries[target] = parent;
541 irec->ino_un.plist->pmask |= (1LL << offset);
542
543 return;
544}
545
546#if 0
547/*
548 * not needed for now since we don't set the parent info
549 * until phase 4 -- at which point we know that the directory
550 * inode won't be going away -- so we won't ever need to clear
551 * directory parent data that we set.
552 */
553void
554clear_inode_parent(ino_tree_node_t *irec, int offset)
555{
556 ASSERT(full_backptrs == 0);
557 ASSERT(irec->ino_un.plist != NULL);
558
559 return;
560}
561#endif
562
563xfs_ino_t
564get_inode_parent(ino_tree_node_t *irec, int offset)
565{
566 __uint64_t bitmask;
567 parent_list_t *ptbl;
568 int i;
569 int target;
570
571 if (full_backptrs)
572 ptbl = irec->ino_un.backptrs->parents;
573 else
574 ptbl = irec->ino_un.plist;
575
576 if (ptbl->pmask & (1LL << offset)) {
577 bitmask = 1LL;
578 target = 0;
579
580 for (i = 0; i < offset; i++) {
581 if (ptbl->pmask & bitmask)
582 target++;
583 bitmask <<= 1;
584 }
585#ifdef DEBUG
586 ASSERT(target < ptbl->cnt);
587#endif
588 return(ptbl->pentries[target]);
589 }
590
591 return(0LL);
592}
593
594/*
595 * code that deals with the inode descriptor appendages -- the back
596 * pointers, link counts and reached bits for phase 6 and phase 7.
597 */
598
599void
600add_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
601{
602 ASSERT(ino_rec->ino_un.backptrs != NULL);
603
604 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
605 XFS_INO_RCHD_SET_RCHD(ino_rec, ino_offset);
606
607 ASSERT(is_inode_reached(ino_rec, ino_offset));
608
609 return;
610}
611
612int
613is_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
614{
615 ASSERT(ino_rec->ino_un.backptrs != NULL);
616 return(XFS_INO_RCHD_IS_RCHD(ino_rec, ino_offset));
617}
618
619void
620add_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
621{
622 ASSERT(ino_rec->ino_un.backptrs != NULL);
623
624 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
625
626 return;
627}
628
629void
630drop_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
631{
632 ASSERT(ino_rec->ino_un.backptrs != NULL);
633 ASSERT(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
634
635 if (--ino_rec->ino_un.backptrs->nlinks[ino_offset] == 0)
636 XFS_INO_RCHD_CLR_RCHD(ino_rec, ino_offset);
637
638 return;
639}
640
641int
642is_inode_referenced(ino_tree_node_t *ino_rec, int ino_offset)
643{
644 ASSERT(ino_rec->ino_un.backptrs != NULL);
645 return(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
646}
647
648__uint32_t
649num_inode_references(ino_tree_node_t *ino_rec, int ino_offset)
650{
651 ASSERT(ino_rec->ino_un.backptrs != NULL);
652 return(ino_rec->ino_un.backptrs->nlinks[ino_offset]);
653}
654
655#if 0
656static backptrs_t *bptrs;
657static int bptrs_index;
658#define BPTR_ALLOC_NUM 1000
659
660backptrs_t *
661get_backptr(void)
662{
663 backptrs_t *bptr;
664
665 if (bptrs_index == BPTR_ALLOC_NUM) {
666 ASSERT(bptrs == NULL);
667
668 if ((bptrs = malloc(sizeof(backptrs_t[BPTR_ALLOC_NUM])))
669 == NULL) {
670 do_error("couldn't malloc ino rec backptrs.\n");
671 }
672
673 bptrs_index = 0;
674 }
675
676 ASSERT(bptrs != NULL);
677
678 bptr = &bptrs[bptrs_index];
679 bptrs_index++;
680
681 if (bptrs_index == BPTR_ALLOC_NUM)
682 bptrs = NULL;
683
684 bzero(bptr, sizeof(backptrs_t));
685
686 return(bptr);
687}
688#endif
689
690backptrs_t *
691get_backptr(void)
692{
693 backptrs_t *ptr;
694
695 if ((ptr = malloc(sizeof(backptrs_t))) == NULL)
696 do_error("could not malloc back pointer table\n");
697
698 bzero(ptr, sizeof(backptrs_t));
699
700 return(ptr);
701}
702
703void
704add_ino_backptrs(xfs_mount_t *mp)
705{
706#ifdef XR_BCKPTR_DBG
707 xfs_ino_t ino;
708 int j, k;
709#endif /* XR_BCKPTR_DBG */
710 ino_tree_node_t *ino_rec;
711 parent_list_t *tmp;
712 xfs_agnumber_t i;
713
714 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
715 ino_rec = findfirst_inode_rec(i);
716
717 while (ino_rec != NULL) {
718 tmp = ino_rec->ino_un.plist;
719 ino_rec->ino_un.backptrs = get_backptr();
720 ino_rec->ino_un.backptrs->parents = tmp;
721
722#ifdef XR_BCKPTR_DBG
723 if (tmp != NULL) {
724 k = 0;
725 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
726 ino = XFS_AGINO_TO_INO(mp, i,
727 ino_rec->ino_startnum + j);
728 if (ino == 25165846) {
729 do_warn("THERE 1 !!!\n");
730 }
731 if (tmp->pentries[j] != 0) {
732 k++;
733 do_warn(
734 "inode %llu - parent %llu\n",
735 ino,
736 tmp->pentries[j]);
737 if (ino == 25165846) {
738 do_warn("THERE!!!\n");
739 }
740 }
741 }
742
743 if (k != tmp->cnt) {
744 do_warn(
745 "ERROR - count = %d, counted %d\n",
746 tmp->cnt, k);
747 }
748 }
749#endif /* XR_BCKPTR_DBG */
750 ino_rec = next_ino_rec(ino_rec);
751 }
752 }
753
754 full_backptrs = 1;
755
756 return;
757}
758
759static __psunsigned_t
760avl_ino_start(avlnode_t *node)
761{
762 return((__psunsigned_t) ((ino_tree_node_t *) node)->ino_startnum);
763}
764
765static __psunsigned_t
766avl_ino_end(avlnode_t *node)
767{
768 return((__psunsigned_t) (
769 ((ino_tree_node_t *) node)->ino_startnum +
770 XFS_INODES_PER_CHUNK));
771}
772
773avlops_t avl_ino_tree_ops = {
774 avl_ino_start,
775 avl_ino_end
776};
777
778void
779incore_ino_init(xfs_mount_t *mp)
780{
781 int i;
782 int agcount = mp->m_sb.sb_agcount;
783
784 if ((inode_tree_ptrs = malloc(agcount *
785 sizeof(avltree_desc_t *))) == NULL)
786 do_error("couldn't malloc inode tree descriptor table\n");
787 if ((inode_uncertain_tree_ptrs = malloc(agcount *
788 sizeof(avltree_desc_t *))) == NULL)
789 do_error("couldn't malloc uncertain ino tree descriptor table\n");
790
791 for (i = 0; i < agcount; i++) {
792 if ((inode_tree_ptrs[i] =
793 malloc(sizeof(avltree_desc_t))) == NULL)
794 do_error("couldn't malloc inode tree descriptor\n");
795 if ((inode_uncertain_tree_ptrs[i] =
796 malloc(sizeof(avltree_desc_t))) == NULL)
797 do_error(
798 "couldn't malloc uncertain ino tree descriptor\n");
799 }
800 for (i = 0; i < agcount; i++) {
801 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
802 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
803 }
804
805 ino_flist.cnt = 0;
806 ino_flist.list = NULL;
807
808 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
809 do_error("couldn't malloc uncertain inode cache area\n");
810
811 bzero(last_rec, sizeof(ino_tree_node_t *) * agcount);
812
813 full_backptrs = 0;
814
815 return;
816}
817
818#ifdef XR_INO_REF_DEBUG
819void
820add_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
821{
822 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset));
823
824 ASSERT(is_inode_refchecked(ino, ino_rec, ino_offset));
825
826 return;
827}
828
829int
830is_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
831{
832 return(XFS_INOPROC_IS_PROC(ino_rec, ino_offset) == 0LL ? 0 : 1);
833}
834#endif /* XR_INO_REF_DEBUG */