]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
fix up some format strings again
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <libxfs.h>
20 #include "avl.h"
21 #include "globals.h"
22 #include "incore.h"
23 #include "agheader.h"
24 #include "protos.h"
25 #include "threads.h"
26 #include "err_protos.h"
27
28 extern avlnode_t *avl_firstino(avlnode_t *root);
29
30 /*
31 * array of inode tree ptrs, one per ag
32 */
33 avltree_desc_t **inode_tree_ptrs;
34
35 /*
36 * ditto for uncertain inodes
37 */
38 static avltree_desc_t **inode_uncertain_tree_ptrs;
39
40 /* memory optimised nlink counting for all inodes */
41
42 static void nlink_grow_8_to_16(ino_tree_node_t *irec);
43 static void nlink_grow_16_to_32(ino_tree_node_t *irec);
44
45 static void
46 disk_nlink_32_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks)
47 {
48 ((__uint32_t*)irec->disk_nlinks)[ino_offset] = nlinks;
49 }
50
51 static __uint32_t
52 disk_nlink_32_get(ino_tree_node_t *irec, int ino_offset)
53 {
54 return ((__uint32_t*)irec->disk_nlinks)[ino_offset];
55 }
56
57 static __uint32_t
58 counted_nlink_32_get(ino_tree_node_t *irec, int ino_offset)
59 {
60 return ((__uint32_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset];
61 }
62
63 static __uint32_t
64 counted_nlink_32_inc(ino_tree_node_t *irec, int ino_offset)
65 {
66 return ++(((__uint32_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset]);
67 }
68
69 static __uint32_t
70 counted_nlink_32_dec(ino_tree_node_t *irec, int ino_offset)
71 {
72 __uint32_t *nlinks = (__uint32_t*)irec->ino_un.ex_data->counted_nlinks;
73
74 ASSERT(nlinks[ino_offset] > 0);
75 return --(nlinks[ino_offset]);
76 }
77
78
79 static void
80 disk_nlink_16_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks)
81 {
82 if (nlinks >= 0x10000) {
83 nlink_grow_16_to_32(irec);
84 disk_nlink_32_set(irec, ino_offset, nlinks);
85 } else
86 ((__uint16_t*)irec->disk_nlinks)[ino_offset] = nlinks;
87 }
88
89 static __uint32_t
90 disk_nlink_16_get(ino_tree_node_t *irec, int ino_offset)
91 {
92 return ((__uint16_t*)irec->disk_nlinks)[ino_offset];
93 }
94
95 static __uint32_t
96 counted_nlink_16_get(ino_tree_node_t *irec, int ino_offset)
97 {
98 return ((__uint16_t*)irec->ino_un.ex_data->counted_nlinks)[ino_offset];
99 }
100
101 static __uint32_t
102 counted_nlink_16_inc(ino_tree_node_t *irec, int ino_offset)
103 {
104 __uint16_t *nlinks = (__uint16_t*)irec->ino_un.ex_data->counted_nlinks;
105
106 if (nlinks[ino_offset] == 0xffff) {
107 nlink_grow_16_to_32(irec);
108 return counted_nlink_32_inc(irec, ino_offset);
109 }
110 return ++(nlinks[ino_offset]);
111 }
112
113 static __uint32_t
114 counted_nlink_16_dec(ino_tree_node_t *irec, int ino_offset)
115 {
116 __uint16_t *nlinks = (__uint16_t*)irec->ino_un.ex_data->counted_nlinks;
117
118 ASSERT(nlinks[ino_offset] > 0);
119 return --(nlinks[ino_offset]);
120 }
121
122
123 static void
124 disk_nlink_8_set(ino_tree_node_t *irec, int ino_offset, __uint32_t nlinks)
125 {
126 if (nlinks >= 0x100) {
127 nlink_grow_8_to_16(irec);
128 disk_nlink_16_set(irec, ino_offset, nlinks);
129 } else
130 irec->disk_nlinks[ino_offset] = nlinks;
131 }
132
133 static __uint32_t
134 disk_nlink_8_get(ino_tree_node_t *irec, int ino_offset)
135 {
136 return irec->disk_nlinks[ino_offset];
137 }
138
139 static __uint32_t
140 counted_nlink_8_get(ino_tree_node_t *irec, int ino_offset)
141 {
142 return irec->ino_un.ex_data->counted_nlinks[ino_offset];
143 }
144
145 static __uint32_t
146 counted_nlink_8_inc(ino_tree_node_t *irec, int ino_offset)
147 {
148 if (irec->ino_un.ex_data->counted_nlinks[ino_offset] == 0xff) {
149 nlink_grow_8_to_16(irec);
150 return counted_nlink_16_inc(irec, ino_offset);
151 }
152 return ++(irec->ino_un.ex_data->counted_nlinks[ino_offset]);
153 }
154
155 static __uint32_t
156 counted_nlink_8_dec(ino_tree_node_t *irec, int ino_offset)
157 {
158 ASSERT(irec->ino_un.ex_data->counted_nlinks[ino_offset] > 0);
159 return --(irec->ino_un.ex_data->counted_nlinks[ino_offset]);
160 }
161
162
163 static nlink_ops_t nlinkops[] = {
164 {sizeof(__uint8_t) * XFS_INODES_PER_CHUNK,
165 disk_nlink_8_set, disk_nlink_8_get,
166 counted_nlink_8_get, counted_nlink_8_inc, counted_nlink_8_dec},
167 {sizeof(__uint16_t) * XFS_INODES_PER_CHUNK,
168 disk_nlink_16_set, disk_nlink_16_get,
169 counted_nlink_16_get, counted_nlink_16_inc, counted_nlink_16_dec},
170 {sizeof(__uint32_t) * XFS_INODES_PER_CHUNK,
171 disk_nlink_32_set, disk_nlink_32_get,
172 counted_nlink_32_get, counted_nlink_32_inc, counted_nlink_32_dec},
173 };
174
175 static void
176 nlink_grow_8_to_16(ino_tree_node_t *irec)
177 {
178 __uint16_t *new_nlinks;
179 int i;
180
181 new_nlinks = malloc(sizeof(__uint16_t) * XFS_INODES_PER_CHUNK);
182 if (new_nlinks == NULL)
183 do_error(_("could not allocate expanded nlink array\n"));
184 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
185 new_nlinks[i] = irec->disk_nlinks[i];
186 free(irec->disk_nlinks);
187 irec->disk_nlinks = (__uint8_t*)new_nlinks;
188
189 if (full_ino_ex_data) {
190 new_nlinks = malloc(sizeof(__uint16_t) * XFS_INODES_PER_CHUNK);
191 if (new_nlinks == NULL)
192 do_error(_("could not allocate expanded nlink array\n"));
193 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
194 new_nlinks[i] = irec->ino_un.ex_data->counted_nlinks[i];
195 free(irec->ino_un.ex_data->counted_nlinks);
196 irec->ino_un.ex_data->counted_nlinks = (__uint8_t*)new_nlinks;
197 }
198 irec->nlinkops = &nlinkops[1];
199 }
200
201 static void
202 nlink_grow_16_to_32(ino_tree_node_t *irec)
203 {
204 __uint32_t *new_nlinks;
205 int i;
206
207 new_nlinks = malloc(sizeof(__uint32_t) * XFS_INODES_PER_CHUNK);
208 if (new_nlinks == NULL)
209 do_error(_("could not allocate expanded nlink array\n"));
210 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
211 new_nlinks[i] = ((__int16_t*)&irec->disk_nlinks)[i];
212 free(irec->disk_nlinks);
213 irec->disk_nlinks = (__uint8_t*)new_nlinks;
214
215 if (full_ino_ex_data) {
216 new_nlinks = malloc(sizeof(__uint32_t) * XFS_INODES_PER_CHUNK);
217 if (new_nlinks == NULL)
218 do_error(_("could not allocate expanded nlink array\n"));
219 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
220 new_nlinks[i] = ((__int16_t*)&irec->ino_un.ex_data->counted_nlinks)[i];
221 free(irec->ino_un.ex_data->counted_nlinks);
222 irec->ino_un.ex_data->counted_nlinks = (__uint8_t*)new_nlinks;
223 }
224 irec->nlinkops = &nlinkops[2];
225 }
226
227 /*
228 * Next is the uncertain inode list -- a sorted (in ascending order)
229 * list of inode records sorted on the starting inode number. There
230 * is one list per ag.
231 */
232
233 /*
234 * Common code for creating inode records for use by trees and lists.
235 * called only from add_inodes and add_inodes_uncertain
236 *
237 * IMPORTANT: all inodes (inode records) start off as free and
238 * unconfirmed.
239 */
240 static struct ino_tree_node *
241 alloc_ino_node(
242 xfs_agino_t starting_ino)
243 {
244 struct ino_tree_node *irec;
245
246 irec = malloc(sizeof(*irec));
247 if (!irec)
248 do_error(_("inode map malloc failed\n"));
249
250 irec->avl_node.avl_nextino = NULL;
251 irec->avl_node.avl_forw = NULL;
252 irec->avl_node.avl_back = NULL;
253
254 irec->ino_startnum = starting_ino;
255 irec->ino_confirmed = 0;
256 irec->ino_isa_dir = 0;
257 irec->ir_free = (xfs_inofree_t) - 1;
258 irec->ino_un.ex_data = NULL;
259 irec->nlinkops = &nlinkops[0];
260 irec->disk_nlinks = calloc(1, nlinkops[0].nlink_size);
261 if (!irec->disk_nlinks)
262 do_error(_("could not allocate nlink array\n"));
263 return irec;
264 }
265
266 static void
267 free_ino_tree_node(
268 struct ino_tree_node *irec)
269 {
270 irec->avl_node.avl_nextino = NULL;
271 irec->avl_node.avl_forw = NULL;
272 irec->avl_node.avl_back = NULL;
273
274 free(irec->disk_nlinks);
275 if (irec->ino_un.ex_data != NULL) {
276 if (full_ino_ex_data) {
277 free(irec->ino_un.ex_data->parents);
278 free(irec->ino_un.ex_data->counted_nlinks);
279 }
280 free(irec->ino_un.ex_data);
281
282 }
283
284 free(irec);
285 }
286
287 /*
288 * last referenced cache for uncertain inodes
289 */
290 static ino_tree_node_t **last_rec;
291
292 /*
293 * ok, the uncertain inodes are a set of trees just like the
294 * good inodes but all starting inode records are (arbitrarily)
295 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
296 * this means we may have partials records in the tree (e.g. records
297 * without 64 confirmed uncertain inodes). Tough.
298 *
299 * free is set to 1 if the inode is thought to be free, 0 if used
300 */
301 void
302 add_aginode_uncertain(xfs_agnumber_t agno, xfs_agino_t ino, int free)
303 {
304 ino_tree_node_t *ino_rec;
305 xfs_agino_t s_ino;
306 int offset;
307
308 ASSERT(agno < glob_agcount);
309 ASSERT(last_rec != NULL);
310
311 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
312
313 /*
314 * check for a cache hit
315 */
316 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
317 offset = ino - s_ino;
318 if (free)
319 set_inode_free(last_rec[agno], offset);
320 else
321 set_inode_used(last_rec[agno], offset);
322
323 return;
324 }
325
326 /*
327 * check to see if record containing inode is already in the tree.
328 * if not, add it
329 */
330 ino_rec = (ino_tree_node_t *)
331 avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
332 if (!ino_rec) {
333 ino_rec = alloc_ino_node(s_ino);
334
335 if (!avl_insert(inode_uncertain_tree_ptrs[agno],
336 &ino_rec->avl_node))
337 do_error(
338 _("add_aginode_uncertain - duplicate inode range\n"));
339 }
340
341 if (free)
342 set_inode_free(ino_rec, ino - s_ino);
343 else
344 set_inode_used(ino_rec, ino - s_ino);
345
346 /*
347 * set cache entry
348 */
349 last_rec[agno] = ino_rec;
350 }
351
352 /*
353 * like add_aginode_uncertain() only it needs an xfs_mount_t *
354 * to perform the inode number conversion.
355 */
356 void
357 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
358 {
359 add_aginode_uncertain(XFS_INO_TO_AGNO(mp, ino),
360 XFS_INO_TO_AGINO(mp, ino), free);
361 }
362
363 /*
364 * pull the indicated inode record out of the uncertain inode tree
365 */
366 void
367 get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno,
368 ino_tree_node_t *ino_rec)
369 {
370 ASSERT(inode_tree_ptrs != NULL);
371 ASSERT(agno < mp->m_sb.sb_agcount);
372 ASSERT(inode_tree_ptrs[agno] != NULL);
373
374 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
375
376 ino_rec->avl_node.avl_nextino = NULL;
377 ino_rec->avl_node.avl_forw = NULL;
378 ino_rec->avl_node.avl_back = NULL;
379 }
380
381 ino_tree_node_t *
382 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
383 {
384 return((ino_tree_node_t *)
385 inode_uncertain_tree_ptrs[agno]->avl_firstino);
386 }
387
388 ino_tree_node_t *
389 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
390 {
391 return((ino_tree_node_t *)
392 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
393 }
394
395 void
396 clear_uncertain_ino_cache(xfs_agnumber_t agno)
397 {
398 last_rec[agno] = NULL;
399 }
400
401
402 /*
403 * Next comes the inode trees. One per AG, AVL trees of inode records, each
404 * inode record tracking 64 inodes
405 */
406
407 /*
408 * Set up an inode tree record for a group of inodes that will include the
409 * requested inode.
410 *
411 * This does NOT do error-check for duplicate records. The caller is
412 * responsible for checking that. Ino must be the start of an
413 * XFS_INODES_PER_CHUNK (64) inode chunk
414 *
415 * Each inode resides in a 64-inode chunk which can be part one or more chunks
416 * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
417 * chunk) when a block can hold more than one chunk (inodes per block > 64).
418 * Allocating in one chunk pieces causes us problems when it takes more than
419 * one fs block to contain an inode chunk because the chunks can start on
420 * *any* block boundary. So we assume that the caller has a clue because at
421 * this level, we don't.
422 */
423 static struct ino_tree_node *
424 add_inode(
425 struct xfs_mount *mp,
426 xfs_agnumber_t agno,
427 xfs_agino_t agino)
428 {
429 struct ino_tree_node *irec;
430
431 irec = alloc_ino_node(agino);
432 if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
433 do_warn(_("add_inode - duplicate inode range\n"));
434 return irec;
435 }
436
437 /*
438 * pull the indicated inode record out of the inode tree
439 */
440 void
441 get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
442 {
443 ASSERT(inode_tree_ptrs != NULL);
444 ASSERT(agno < mp->m_sb.sb_agcount);
445 ASSERT(inode_tree_ptrs[agno] != NULL);
446
447 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
448
449 ino_rec->avl_node.avl_nextino = NULL;
450 ino_rec->avl_node.avl_forw = NULL;
451 ino_rec->avl_node.avl_back = NULL;
452 }
453
454 /*
455 * free the designated inode record (return it to the free pool)
456 */
457 /* ARGSUSED */
458 void
459 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
460 {
461 free_ino_tree_node(ino_rec);
462 }
463
464 void
465 find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno,
466 xfs_agino_t start_ino, xfs_agino_t end_ino,
467 ino_tree_node_t **first, ino_tree_node_t **last)
468 {
469 *first = *last = NULL;
470
471 /*
472 * Is the AG inside the file system ?
473 */
474 if (agno < mp->m_sb.sb_agcount)
475 avl_findranges(inode_tree_ptrs[agno], start_ino,
476 end_ino, (avlnode_t **) first, (avlnode_t **) last);
477 }
478
479 /*
480 * if ino doesn't exist, it must be properly aligned -- on a
481 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
482 * whichever alignment is larger.
483 */
484 ino_tree_node_t *
485 set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
486 {
487 ino_tree_node_t *ino_rec;
488
489 /*
490 * check alignment -- the only way to detect this
491 * is too see if the chunk overlaps another chunk
492 * already in the tree
493 */
494 ino_rec = add_inode(mp, agno, ino);
495
496 ASSERT(ino_rec != NULL);
497 ASSERT(ino >= ino_rec->ino_startnum &&
498 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
499
500 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
501
502 return(ino_rec);
503 }
504
505 ino_tree_node_t *
506 set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
507 {
508 ino_tree_node_t *ino_rec;
509
510 ino_rec = add_inode(mp, agno, ino);
511
512 ASSERT(ino_rec != NULL);
513 ASSERT(ino >= ino_rec->ino_startnum &&
514 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
515
516 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
517
518 return(ino_rec);
519 }
520
521 void
522 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
523 {
524 ino_tree_node_t *ino_rec;
525
526 if (!uncertain) {
527 fprintf(stderr, _("good inode list is --\n"));
528 ino_rec = findfirst_inode_rec(agno);
529 } else {
530 fprintf(stderr, _("uncertain inode list is --\n"));
531 ino_rec = findfirst_uncertain_inode_rec(agno);
532 }
533
534 if (ino_rec == NULL) {
535 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
536 return;
537 }
538
539 printf(_("agno %d\n"), agno);
540
541 while(ino_rec != NULL) {
542 fprintf(stderr,
543 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
544 (unsigned long)ino_rec,
545 ino_rec->ino_startnum,
546 (unsigned long long)ino_rec->ir_free,
547 (unsigned long long)ino_rec->ino_confirmed);
548 if (ino_rec->ino_startnum == 0)
549 ino_rec = ino_rec;
550 ino_rec = next_ino_rec(ino_rec);
551 }
552 }
553
554 void
555 print_inode_list(xfs_agnumber_t agno)
556 {
557 print_inode_list_int(agno, 0);
558 }
559
560 void
561 print_uncertain_inode_list(xfs_agnumber_t agno)
562 {
563 print_inode_list_int(agno, 1);
564 }
565
566 /*
567 * set parent -- use a bitmask and a packed array. The bitmask
568 * indicate which inodes have an entry in the array. An inode that
569 * is the Nth bit set in the mask is stored in the Nth location in
570 * the array where N starts at 0.
571 */
572
573 void
574 set_inode_parent(
575 ino_tree_node_t *irec,
576 int offset,
577 xfs_ino_t parent)
578 {
579 parent_list_t *ptbl;
580 int i;
581 int cnt;
582 int target;
583 __uint64_t bitmask;
584 parent_entry_t *tmp;
585
586 if (full_ino_ex_data)
587 ptbl = irec->ino_un.ex_data->parents;
588 else
589 ptbl = irec->ino_un.plist;
590
591 if (ptbl == NULL) {
592 ptbl = (parent_list_t *)malloc(sizeof(parent_list_t));
593 if (!ptbl)
594 do_error(_("couldn't malloc parent list table\n"));
595
596 if (full_ino_ex_data)
597 irec->ino_un.ex_data->parents = ptbl;
598 else
599 irec->ino_un.plist = ptbl;
600
601 ptbl->pmask = 1LL << offset;
602 ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
603 sizeof(xfs_ino_t));
604 if (!ptbl->pentries)
605 do_error(_("couldn't memalign pentries table\n"));
606 #ifdef DEBUG
607 ptbl->cnt = 1;
608 #endif
609 ptbl->pentries[0] = parent;
610
611 return;
612 }
613
614 if (ptbl->pmask & (1LL << offset)) {
615 bitmask = 1LL;
616 target = 0;
617
618 for (i = 0; i < offset; i++) {
619 if (ptbl->pmask & bitmask)
620 target++;
621 bitmask <<= 1;
622 }
623 #ifdef DEBUG
624 ASSERT(target < ptbl->cnt);
625 #endif
626 ptbl->pentries[target] = parent;
627
628 return;
629 }
630
631 bitmask = 1LL;
632 cnt = target = 0;
633
634 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
635 if (ptbl->pmask & bitmask) {
636 cnt++;
637 if (i < offset)
638 target++;
639 }
640
641 bitmask <<= 1;
642 }
643
644 #ifdef DEBUG
645 ASSERT(cnt == ptbl->cnt);
646 #endif
647 ASSERT(cnt >= target);
648
649 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
650 if (!tmp)
651 do_error(_("couldn't memalign pentries table\n"));
652
653 memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t));
654
655 if (cnt > target)
656 memmove(tmp + target + 1, ptbl->pentries + target,
657 (cnt - target) * sizeof(parent_entry_t));
658
659 free(ptbl->pentries);
660
661 ptbl->pentries = tmp;
662
663 #ifdef DEBUG
664 ptbl->cnt++;
665 #endif
666 ptbl->pentries[target] = parent;
667 ptbl->pmask |= (1LL << offset);
668 }
669
670 xfs_ino_t
671 get_inode_parent(ino_tree_node_t *irec, int offset)
672 {
673 __uint64_t bitmask;
674 parent_list_t *ptbl;
675 int i;
676 int target;
677
678 if (full_ino_ex_data)
679 ptbl = irec->ino_un.ex_data->parents;
680 else
681 ptbl = irec->ino_un.plist;
682
683 if (ptbl->pmask & (1LL << offset)) {
684 bitmask = 1LL;
685 target = 0;
686
687 for (i = 0; i < offset; i++) {
688 if (ptbl->pmask & bitmask)
689 target++;
690 bitmask <<= 1;
691 }
692 #ifdef DEBUG
693 ASSERT(target < ptbl->cnt);
694 #endif
695 return(ptbl->pentries[target]);
696 }
697
698 return(0LL);
699 }
700
701 static void
702 alloc_ex_data(ino_tree_node_t *irec)
703 {
704 parent_list_t *ptbl;
705
706 ptbl = irec->ino_un.plist;
707 irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t));
708 if (irec->ino_un.ex_data == NULL)
709 do_error(_("could not malloc inode extra data\n"));
710
711 irec->ino_un.ex_data->parents = ptbl;
712 irec->ino_un.ex_data->counted_nlinks = calloc(1, irec->nlinkops->nlink_size);
713
714 if (irec->ino_un.ex_data->counted_nlinks == NULL)
715 do_error(_("could not malloc inode extra data\n"));
716 }
717
718 void
719 add_ino_ex_data(xfs_mount_t *mp)
720 {
721 ino_tree_node_t *ino_rec;
722 xfs_agnumber_t i;
723
724 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
725 ino_rec = findfirst_inode_rec(i);
726
727 while (ino_rec != NULL) {
728 alloc_ex_data(ino_rec);
729 ino_rec = next_ino_rec(ino_rec);
730 }
731 }
732 full_ino_ex_data = 1;
733 }
734
735 static __psunsigned_t
736 avl_ino_start(avlnode_t *node)
737 {
738 return((__psunsigned_t) ((ino_tree_node_t *) node)->ino_startnum);
739 }
740
741 static __psunsigned_t
742 avl_ino_end(avlnode_t *node)
743 {
744 return((__psunsigned_t) (
745 ((ino_tree_node_t *) node)->ino_startnum +
746 XFS_INODES_PER_CHUNK));
747 }
748
749 avlops_t avl_ino_tree_ops = {
750 avl_ino_start,
751 avl_ino_end
752 };
753
754 void
755 incore_ino_init(xfs_mount_t *mp)
756 {
757 int i;
758 int agcount = mp->m_sb.sb_agcount;
759
760 if ((inode_tree_ptrs = malloc(agcount *
761 sizeof(avltree_desc_t *))) == NULL)
762 do_error(_("couldn't malloc inode tree descriptor table\n"));
763 if ((inode_uncertain_tree_ptrs = malloc(agcount *
764 sizeof(avltree_desc_t *))) == NULL)
765 do_error(
766 _("couldn't malloc uncertain ino tree descriptor table\n"));
767
768 for (i = 0; i < agcount; i++) {
769 if ((inode_tree_ptrs[i] =
770 malloc(sizeof(avltree_desc_t))) == NULL)
771 do_error(_("couldn't malloc inode tree descriptor\n"));
772 if ((inode_uncertain_tree_ptrs[i] =
773 malloc(sizeof(avltree_desc_t))) == NULL)
774 do_error(
775 _("couldn't malloc uncertain ino tree descriptor\n"));
776 }
777 for (i = 0; i < agcount; i++) {
778 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
779 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
780 }
781
782 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
783 do_error(_("couldn't malloc uncertain inode cache area\n"));
784
785 memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount);
786
787 full_ino_ex_data = 0;
788 }
789
790 #ifdef XR_INO_REF_DEBUG
791 void
792 add_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
793 {
794 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset));
795
796 ASSERT(is_inode_refchecked(ino, ino_rec, ino_offset));
797 }
798
799 int
800 is_inode_refchecked(xfs_ino_t ino, ino_tree_node_t *ino_rec, int ino_offset)
801 {
802 return(XFS_INOPROC_IS_PROC(ino_rec, ino_offset) == 0LL ? 0 : 1);
803 }
804 #endif /* XR_INO_REF_DEBUG */