2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "err_protos.h"
28 extern avlnode_t
*avl_firstino(avlnode_t
*root
);
31 * array of inode tree ptrs, one per ag
33 avltree_desc_t
**inode_tree_ptrs
;
36 * ditto for uncertain inodes
38 static avltree_desc_t
**inode_uncertain_tree_ptrs
;
40 /* memory optimised nlink counting for all inodes */
42 static void nlink_grow_8_to_16(ino_tree_node_t
*irec
);
43 static void nlink_grow_16_to_32(ino_tree_node_t
*irec
);
46 disk_nlink_32_set(ino_tree_node_t
*irec
, int ino_offset
, __uint32_t nlinks
)
48 ((__uint32_t
*)irec
->disk_nlinks
)[ino_offset
] = nlinks
;
52 disk_nlink_32_get(ino_tree_node_t
*irec
, int ino_offset
)
54 return ((__uint32_t
*)irec
->disk_nlinks
)[ino_offset
];
58 counted_nlink_32_get(ino_tree_node_t
*irec
, int ino_offset
)
60 return ((__uint32_t
*)irec
->ino_un
.ex_data
->counted_nlinks
)[ino_offset
];
64 counted_nlink_32_inc(ino_tree_node_t
*irec
, int ino_offset
)
66 return ++(((__uint32_t
*)irec
->ino_un
.ex_data
->counted_nlinks
)[ino_offset
]);
70 counted_nlink_32_dec(ino_tree_node_t
*irec
, int ino_offset
)
72 __uint32_t
*nlinks
= (__uint32_t
*)irec
->ino_un
.ex_data
->counted_nlinks
;
74 ASSERT(nlinks
[ino_offset
] > 0);
75 return --(nlinks
[ino_offset
]);
80 disk_nlink_16_set(ino_tree_node_t
*irec
, int ino_offset
, __uint32_t nlinks
)
82 if (nlinks
>= 0x10000) {
83 nlink_grow_16_to_32(irec
);
84 disk_nlink_32_set(irec
, ino_offset
, nlinks
);
86 ((__uint16_t
*)irec
->disk_nlinks
)[ino_offset
] = nlinks
;
90 disk_nlink_16_get(ino_tree_node_t
*irec
, int ino_offset
)
92 return ((__uint16_t
*)irec
->disk_nlinks
)[ino_offset
];
96 counted_nlink_16_get(ino_tree_node_t
*irec
, int ino_offset
)
98 return ((__uint16_t
*)irec
->ino_un
.ex_data
->counted_nlinks
)[ino_offset
];
102 counted_nlink_16_inc(ino_tree_node_t
*irec
, int ino_offset
)
104 __uint16_t
*nlinks
= (__uint16_t
*)irec
->ino_un
.ex_data
->counted_nlinks
;
106 if (nlinks
[ino_offset
] == 0xffff) {
107 nlink_grow_16_to_32(irec
);
108 return counted_nlink_32_inc(irec
, ino_offset
);
110 return ++(nlinks
[ino_offset
]);
114 counted_nlink_16_dec(ino_tree_node_t
*irec
, int ino_offset
)
116 __uint16_t
*nlinks
= (__uint16_t
*)irec
->ino_un
.ex_data
->counted_nlinks
;
118 ASSERT(nlinks
[ino_offset
] > 0);
119 return --(nlinks
[ino_offset
]);
124 disk_nlink_8_set(ino_tree_node_t
*irec
, int ino_offset
, __uint32_t nlinks
)
126 if (nlinks
>= 0x100) {
127 nlink_grow_8_to_16(irec
);
128 disk_nlink_16_set(irec
, ino_offset
, nlinks
);
130 irec
->disk_nlinks
[ino_offset
] = nlinks
;
134 disk_nlink_8_get(ino_tree_node_t
*irec
, int ino_offset
)
136 return irec
->disk_nlinks
[ino_offset
];
140 counted_nlink_8_get(ino_tree_node_t
*irec
, int ino_offset
)
142 return irec
->ino_un
.ex_data
->counted_nlinks
[ino_offset
];
146 counted_nlink_8_inc(ino_tree_node_t
*irec
, int ino_offset
)
148 if (irec
->ino_un
.ex_data
->counted_nlinks
[ino_offset
] == 0xff) {
149 nlink_grow_8_to_16(irec
);
150 return counted_nlink_16_inc(irec
, ino_offset
);
152 return ++(irec
->ino_un
.ex_data
->counted_nlinks
[ino_offset
]);
156 counted_nlink_8_dec(ino_tree_node_t
*irec
, int ino_offset
)
158 ASSERT(irec
->ino_un
.ex_data
->counted_nlinks
[ino_offset
] > 0);
159 return --(irec
->ino_un
.ex_data
->counted_nlinks
[ino_offset
]);
163 static nlink_ops_t nlinkops
[] = {
164 {sizeof(__uint8_t
) * XFS_INODES_PER_CHUNK
,
165 disk_nlink_8_set
, disk_nlink_8_get
,
166 counted_nlink_8_get
, counted_nlink_8_inc
, counted_nlink_8_dec
},
167 {sizeof(__uint16_t
) * XFS_INODES_PER_CHUNK
,
168 disk_nlink_16_set
, disk_nlink_16_get
,
169 counted_nlink_16_get
, counted_nlink_16_inc
, counted_nlink_16_dec
},
170 {sizeof(__uint32_t
) * XFS_INODES_PER_CHUNK
,
171 disk_nlink_32_set
, disk_nlink_32_get
,
172 counted_nlink_32_get
, counted_nlink_32_inc
, counted_nlink_32_dec
},
176 nlink_grow_8_to_16(ino_tree_node_t
*irec
)
178 __uint16_t
*new_nlinks
;
181 new_nlinks
= malloc(sizeof(__uint16_t
) * XFS_INODES_PER_CHUNK
);
182 if (new_nlinks
== NULL
)
183 do_error(_("could not allocate expanded nlink array\n"));
184 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++)
185 new_nlinks
[i
] = irec
->disk_nlinks
[i
];
186 free(irec
->disk_nlinks
);
187 irec
->disk_nlinks
= (__uint8_t
*)new_nlinks
;
189 if (full_ino_ex_data
) {
190 new_nlinks
= malloc(sizeof(__uint16_t
) * XFS_INODES_PER_CHUNK
);
191 if (new_nlinks
== NULL
)
192 do_error(_("could not allocate expanded nlink array\n"));
193 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++)
194 new_nlinks
[i
] = irec
->ino_un
.ex_data
->counted_nlinks
[i
];
195 free(irec
->ino_un
.ex_data
->counted_nlinks
);
196 irec
->ino_un
.ex_data
->counted_nlinks
= (__uint8_t
*)new_nlinks
;
198 irec
->nlinkops
= &nlinkops
[1];
202 nlink_grow_16_to_32(ino_tree_node_t
*irec
)
204 __uint32_t
*new_nlinks
;
207 new_nlinks
= malloc(sizeof(__uint32_t
) * XFS_INODES_PER_CHUNK
);
208 if (new_nlinks
== NULL
)
209 do_error(_("could not allocate expanded nlink array\n"));
210 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++)
211 new_nlinks
[i
] = ((__int16_t
*)&irec
->disk_nlinks
)[i
];
212 free(irec
->disk_nlinks
);
213 irec
->disk_nlinks
= (__uint8_t
*)new_nlinks
;
215 if (full_ino_ex_data
) {
216 new_nlinks
= malloc(sizeof(__uint32_t
) * XFS_INODES_PER_CHUNK
);
217 if (new_nlinks
== NULL
)
218 do_error(_("could not allocate expanded nlink array\n"));
219 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++)
220 new_nlinks
[i
] = ((__int16_t
*)&irec
->ino_un
.ex_data
->counted_nlinks
)[i
];
221 free(irec
->ino_un
.ex_data
->counted_nlinks
);
222 irec
->ino_un
.ex_data
->counted_nlinks
= (__uint8_t
*)new_nlinks
;
224 irec
->nlinkops
= &nlinkops
[2];
228 * Next is the uncertain inode list -- a sorted (in ascending order)
229 * list of inode records sorted on the starting inode number. There
230 * is one list per ag.
234 * Common code for creating inode records for use by trees and lists.
235 * called only from add_inodes and add_inodes_uncertain
237 * IMPORTANT: all inodes (inode records) start off as free and
240 static struct ino_tree_node
*
242 xfs_agino_t starting_ino
)
244 struct ino_tree_node
*irec
;
246 irec
= malloc(sizeof(*irec
));
248 do_error(_("inode map malloc failed\n"));
250 irec
->avl_node
.avl_nextino
= NULL
;
251 irec
->avl_node
.avl_forw
= NULL
;
252 irec
->avl_node
.avl_back
= NULL
;
254 irec
->ino_startnum
= starting_ino
;
255 irec
->ino_confirmed
= 0;
256 irec
->ino_isa_dir
= 0;
257 irec
->ir_free
= (xfs_inofree_t
) - 1;
258 irec
->ino_un
.ex_data
= NULL
;
259 irec
->nlinkops
= &nlinkops
[0];
260 irec
->disk_nlinks
= calloc(1, nlinkops
[0].nlink_size
);
261 if (!irec
->disk_nlinks
)
262 do_error(_("could not allocate nlink array\n"));
268 struct ino_tree_node
*irec
)
270 irec
->avl_node
.avl_nextino
= NULL
;
271 irec
->avl_node
.avl_forw
= NULL
;
272 irec
->avl_node
.avl_back
= NULL
;
274 free(irec
->disk_nlinks
);
275 if (irec
->ino_un
.ex_data
!= NULL
) {
276 if (full_ino_ex_data
) {
277 free(irec
->ino_un
.ex_data
->parents
);
278 free(irec
->ino_un
.ex_data
->counted_nlinks
);
280 free(irec
->ino_un
.ex_data
);
288 * last referenced cache for uncertain inodes
290 static ino_tree_node_t
**last_rec
;
293 * ok, the uncertain inodes are a set of trees just like the
294 * good inodes but all starting inode records are (arbitrarily)
295 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
296 * this means we may have partials records in the tree (e.g. records
297 * without 64 confirmed uncertain inodes). Tough.
299 * free is set to 1 if the inode is thought to be free, 0 if used
302 add_aginode_uncertain(xfs_agnumber_t agno
, xfs_agino_t ino
, int free
)
304 ino_tree_node_t
*ino_rec
;
308 ASSERT(agno
< glob_agcount
);
309 ASSERT(last_rec
!= NULL
);
311 s_ino
= rounddown(ino
, XFS_INODES_PER_CHUNK
);
314 * check for a cache hit
316 if (last_rec
[agno
] != NULL
&& last_rec
[agno
]->ino_startnum
== s_ino
) {
317 offset
= ino
- s_ino
;
319 set_inode_free(last_rec
[agno
], offset
);
321 set_inode_used(last_rec
[agno
], offset
);
327 * check to see if record containing inode is already in the tree.
330 ino_rec
= (ino_tree_node_t
*)
331 avl_findrange(inode_uncertain_tree_ptrs
[agno
], s_ino
);
333 ino_rec
= alloc_ino_node(s_ino
);
335 if (!avl_insert(inode_uncertain_tree_ptrs
[agno
],
338 _("add_aginode_uncertain - duplicate inode range\n"));
342 set_inode_free(ino_rec
, ino
- s_ino
);
344 set_inode_used(ino_rec
, ino
- s_ino
);
349 last_rec
[agno
] = ino_rec
;
353 * like add_aginode_uncertain() only it needs an xfs_mount_t *
354 * to perform the inode number conversion.
357 add_inode_uncertain(xfs_mount_t
*mp
, xfs_ino_t ino
, int free
)
359 add_aginode_uncertain(XFS_INO_TO_AGNO(mp
, ino
),
360 XFS_INO_TO_AGINO(mp
, ino
), free
);
364 * pull the indicated inode record out of the uncertain inode tree
367 get_uncertain_inode_rec(struct xfs_mount
*mp
, xfs_agnumber_t agno
,
368 ino_tree_node_t
*ino_rec
)
370 ASSERT(inode_tree_ptrs
!= NULL
);
371 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
372 ASSERT(inode_tree_ptrs
[agno
] != NULL
);
374 avl_delete(inode_uncertain_tree_ptrs
[agno
], &ino_rec
->avl_node
);
376 ino_rec
->avl_node
.avl_nextino
= NULL
;
377 ino_rec
->avl_node
.avl_forw
= NULL
;
378 ino_rec
->avl_node
.avl_back
= NULL
;
382 findfirst_uncertain_inode_rec(xfs_agnumber_t agno
)
384 return((ino_tree_node_t
*)
385 inode_uncertain_tree_ptrs
[agno
]->avl_firstino
);
389 find_uncertain_inode_rec(xfs_agnumber_t agno
, xfs_agino_t ino
)
391 return((ino_tree_node_t
*)
392 avl_findrange(inode_uncertain_tree_ptrs
[agno
], ino
));
396 clear_uncertain_ino_cache(xfs_agnumber_t agno
)
398 last_rec
[agno
] = NULL
;
403 * Next comes the inode trees. One per AG, AVL trees of inode records, each
404 * inode record tracking 64 inodes
408 * Set up an inode tree record for a group of inodes that will include the
411 * This does NOT do error-check for duplicate records. The caller is
412 * responsible for checking that. Ino must be the start of an
413 * XFS_INODES_PER_CHUNK (64) inode chunk
415 * Each inode resides in a 64-inode chunk which can be part one or more chunks
416 * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
417 * chunk) when a block can hold more than one chunk (inodes per block > 64).
418 * Allocating in one chunk pieces causes us problems when it takes more than
419 * one fs block to contain an inode chunk because the chunks can start on
420 * *any* block boundary. So we assume that the caller has a clue because at
421 * this level, we don't.
423 static struct ino_tree_node
*
425 struct xfs_mount
*mp
,
429 struct ino_tree_node
*irec
;
431 irec
= alloc_ino_node(agino
);
432 if (!avl_insert(inode_tree_ptrs
[agno
], &irec
->avl_node
))
433 do_warn(_("add_inode - duplicate inode range\n"));
438 * pull the indicated inode record out of the inode tree
441 get_inode_rec(struct xfs_mount
*mp
, xfs_agnumber_t agno
, ino_tree_node_t
*ino_rec
)
443 ASSERT(inode_tree_ptrs
!= NULL
);
444 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
445 ASSERT(inode_tree_ptrs
[agno
] != NULL
);
447 avl_delete(inode_tree_ptrs
[agno
], &ino_rec
->avl_node
);
449 ino_rec
->avl_node
.avl_nextino
= NULL
;
450 ino_rec
->avl_node
.avl_forw
= NULL
;
451 ino_rec
->avl_node
.avl_back
= NULL
;
455 * free the designated inode record (return it to the free pool)
459 free_inode_rec(xfs_agnumber_t agno
, ino_tree_node_t
*ino_rec
)
461 free_ino_tree_node(ino_rec
);
465 find_inode_rec_range(struct xfs_mount
*mp
, xfs_agnumber_t agno
,
466 xfs_agino_t start_ino
, xfs_agino_t end_ino
,
467 ino_tree_node_t
**first
, ino_tree_node_t
**last
)
469 *first
= *last
= NULL
;
472 * Is the AG inside the file system ?
474 if (agno
< mp
->m_sb
.sb_agcount
)
475 avl_findranges(inode_tree_ptrs
[agno
], start_ino
,
476 end_ino
, (avlnode_t
**) first
, (avlnode_t
**) last
);
480 * if ino doesn't exist, it must be properly aligned -- on a
481 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
482 * whichever alignment is larger.
485 set_inode_used_alloc(struct xfs_mount
*mp
, xfs_agnumber_t agno
, xfs_agino_t ino
)
487 ino_tree_node_t
*ino_rec
;
490 * check alignment -- the only way to detect this
491 * is too see if the chunk overlaps another chunk
492 * already in the tree
494 ino_rec
= add_inode(mp
, agno
, ino
);
496 ASSERT(ino_rec
!= NULL
);
497 ASSERT(ino
>= ino_rec
->ino_startnum
&&
498 ino
- ino_rec
->ino_startnum
< XFS_INODES_PER_CHUNK
);
500 set_inode_used(ino_rec
, ino
- ino_rec
->ino_startnum
);
506 set_inode_free_alloc(struct xfs_mount
*mp
, xfs_agnumber_t agno
, xfs_agino_t ino
)
508 ino_tree_node_t
*ino_rec
;
510 ino_rec
= add_inode(mp
, agno
, ino
);
512 ASSERT(ino_rec
!= NULL
);
513 ASSERT(ino
>= ino_rec
->ino_startnum
&&
514 ino
- ino_rec
->ino_startnum
< XFS_INODES_PER_CHUNK
);
516 set_inode_free(ino_rec
, ino
- ino_rec
->ino_startnum
);
522 print_inode_list_int(xfs_agnumber_t agno
, int uncertain
)
524 ino_tree_node_t
*ino_rec
;
527 fprintf(stderr
, _("good inode list is --\n"));
528 ino_rec
= findfirst_inode_rec(agno
);
530 fprintf(stderr
, _("uncertain inode list is --\n"));
531 ino_rec
= findfirst_uncertain_inode_rec(agno
);
534 if (ino_rec
== NULL
) {
535 fprintf(stderr
, _("agno %d -- no inodes\n"), agno
);
539 printf(_("agno %d\n"), agno
);
541 while(ino_rec
!= NULL
) {
543 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
544 (unsigned long)ino_rec
,
545 ino_rec
->ino_startnum
,
546 (unsigned long long)ino_rec
->ir_free
,
547 (unsigned long long)ino_rec
->ino_confirmed
);
548 if (ino_rec
->ino_startnum
== 0)
550 ino_rec
= next_ino_rec(ino_rec
);
555 print_inode_list(xfs_agnumber_t agno
)
557 print_inode_list_int(agno
, 0);
561 print_uncertain_inode_list(xfs_agnumber_t agno
)
563 print_inode_list_int(agno
, 1);
567 * set parent -- use a bitmask and a packed array. The bitmask
568 * indicate which inodes have an entry in the array. An inode that
569 * is the Nth bit set in the mask is stored in the Nth location in
570 * the array where N starts at 0.
575 ino_tree_node_t
*irec
,
586 if (full_ino_ex_data
)
587 ptbl
= irec
->ino_un
.ex_data
->parents
;
589 ptbl
= irec
->ino_un
.plist
;
592 ptbl
= (parent_list_t
*)malloc(sizeof(parent_list_t
));
594 do_error(_("couldn't malloc parent list table\n"));
596 if (full_ino_ex_data
)
597 irec
->ino_un
.ex_data
->parents
= ptbl
;
599 irec
->ino_un
.plist
= ptbl
;
601 ptbl
->pmask
= 1LL << offset
;
602 ptbl
->pentries
= (xfs_ino_t
*)memalign(sizeof(xfs_ino_t
),
605 do_error(_("couldn't memalign pentries table\n"));
609 ptbl
->pentries
[0] = parent
;
614 if (ptbl
->pmask
& (1LL << offset
)) {
618 for (i
= 0; i
< offset
; i
++) {
619 if (ptbl
->pmask
& bitmask
)
624 ASSERT(target
< ptbl
->cnt
);
626 ptbl
->pentries
[target
] = parent
;
634 for (i
= 0; i
< XFS_INODES_PER_CHUNK
; i
++) {
635 if (ptbl
->pmask
& bitmask
) {
645 ASSERT(cnt
== ptbl
->cnt
);
647 ASSERT(cnt
>= target
);
649 tmp
= (xfs_ino_t
*)memalign(sizeof(xfs_ino_t
), (cnt
+ 1) * sizeof(xfs_ino_t
));
651 do_error(_("couldn't memalign pentries table\n"));
653 memmove(tmp
, ptbl
->pentries
, target
* sizeof(parent_entry_t
));
656 memmove(tmp
+ target
+ 1, ptbl
->pentries
+ target
,
657 (cnt
- target
) * sizeof(parent_entry_t
));
659 free(ptbl
->pentries
);
661 ptbl
->pentries
= tmp
;
666 ptbl
->pentries
[target
] = parent
;
667 ptbl
->pmask
|= (1LL << offset
);
671 get_inode_parent(ino_tree_node_t
*irec
, int offset
)
678 if (full_ino_ex_data
)
679 ptbl
= irec
->ino_un
.ex_data
->parents
;
681 ptbl
= irec
->ino_un
.plist
;
683 if (ptbl
->pmask
& (1LL << offset
)) {
687 for (i
= 0; i
< offset
; i
++) {
688 if (ptbl
->pmask
& bitmask
)
693 ASSERT(target
< ptbl
->cnt
);
695 return(ptbl
->pentries
[target
]);
702 alloc_ex_data(ino_tree_node_t
*irec
)
706 ptbl
= irec
->ino_un
.plist
;
707 irec
->ino_un
.ex_data
= (ino_ex_data_t
*)calloc(1, sizeof(ino_ex_data_t
));
708 if (irec
->ino_un
.ex_data
== NULL
)
709 do_error(_("could not malloc inode extra data\n"));
711 irec
->ino_un
.ex_data
->parents
= ptbl
;
712 irec
->ino_un
.ex_data
->counted_nlinks
= calloc(1, irec
->nlinkops
->nlink_size
);
714 if (irec
->ino_un
.ex_data
->counted_nlinks
== NULL
)
715 do_error(_("could not malloc inode extra data\n"));
719 add_ino_ex_data(xfs_mount_t
*mp
)
721 ino_tree_node_t
*ino_rec
;
724 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
725 ino_rec
= findfirst_inode_rec(i
);
727 while (ino_rec
!= NULL
) {
728 alloc_ex_data(ino_rec
);
729 ino_rec
= next_ino_rec(ino_rec
);
732 full_ino_ex_data
= 1;
735 static __psunsigned_t
736 avl_ino_start(avlnode_t
*node
)
738 return((__psunsigned_t
) ((ino_tree_node_t
*) node
)->ino_startnum
);
741 static __psunsigned_t
742 avl_ino_end(avlnode_t
*node
)
744 return((__psunsigned_t
) (
745 ((ino_tree_node_t
*) node
)->ino_startnum
+
746 XFS_INODES_PER_CHUNK
));
749 avlops_t avl_ino_tree_ops
= {
755 incore_ino_init(xfs_mount_t
*mp
)
758 int agcount
= mp
->m_sb
.sb_agcount
;
760 if ((inode_tree_ptrs
= malloc(agcount
*
761 sizeof(avltree_desc_t
*))) == NULL
)
762 do_error(_("couldn't malloc inode tree descriptor table\n"));
763 if ((inode_uncertain_tree_ptrs
= malloc(agcount
*
764 sizeof(avltree_desc_t
*))) == NULL
)
766 _("couldn't malloc uncertain ino tree descriptor table\n"));
768 for (i
= 0; i
< agcount
; i
++) {
769 if ((inode_tree_ptrs
[i
] =
770 malloc(sizeof(avltree_desc_t
))) == NULL
)
771 do_error(_("couldn't malloc inode tree descriptor\n"));
772 if ((inode_uncertain_tree_ptrs
[i
] =
773 malloc(sizeof(avltree_desc_t
))) == NULL
)
775 _("couldn't malloc uncertain ino tree descriptor\n"));
777 for (i
= 0; i
< agcount
; i
++) {
778 avl_init_tree(inode_tree_ptrs
[i
], &avl_ino_tree_ops
);
779 avl_init_tree(inode_uncertain_tree_ptrs
[i
], &avl_ino_tree_ops
);
782 if ((last_rec
= malloc(sizeof(ino_tree_node_t
*) * agcount
)) == NULL
)
783 do_error(_("couldn't malloc uncertain inode cache area\n"));
785 memset(last_rec
, 0, sizeof(ino_tree_node_t
*) * agcount
);
787 full_ino_ex_data
= 0;
790 #ifdef XR_INO_REF_DEBUG
792 add_inode_refchecked(xfs_ino_t ino
, ino_tree_node_t
*ino_rec
, int ino_offset
)
794 XFS_INOPROC_SET_PROC((ino_rec
), (ino_offset
));
796 ASSERT(is_inode_refchecked(ino
, ino_rec
, ino_offset
));
800 is_inode_refchecked(xfs_ino_t ino
, ino_tree_node_t
*ino_rec
, int ino_offset
)
802 return(XFS_INOPROC_IS_PROC(ino_rec
, ino_offset
) == 0LL ? 0 : 1);
804 #endif /* XR_INO_REF_DEBUG */