]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
xfsprogs: make static things static
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "avl.h"
9 #include "globals.h"
10 #include "incore.h"
11 #include "agheader.h"
12 #include "protos.h"
13 #include "threads.h"
14 #include "err_protos.h"
15
16 /*
17 * array of inode tree ptrs, one per ag
18 */
19 avltree_desc_t **inode_tree_ptrs;
20
21 /*
22 * ditto for uncertain inodes
23 */
24 static avltree_desc_t **inode_uncertain_tree_ptrs;
25
26 /* memory optimised nlink counting for all inodes */
27
28 static void *
29 alloc_nlink_array(uint8_t nlink_size)
30 {
31 void *ptr;
32
33 ptr = calloc(XFS_INODES_PER_CHUNK, nlink_size);
34 if (!ptr)
35 do_error(_("could not allocate nlink array\n"));
36 return ptr;
37 }
38
39 static void
40 nlink_grow_8_to_16(ino_tree_node_t *irec)
41 {
42 uint16_t *new_nlinks;
43 int i;
44
45 irec->nlink_size = sizeof(uint16_t);
46
47 new_nlinks = alloc_nlink_array(irec->nlink_size);
48 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
49 new_nlinks[i] = irec->disk_nlinks.un8[i];
50 free(irec->disk_nlinks.un8);
51 irec->disk_nlinks.un16 = new_nlinks;
52
53 if (full_ino_ex_data) {
54 new_nlinks = alloc_nlink_array(irec->nlink_size);
55 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
56 new_nlinks[i] =
57 irec->ino_un.ex_data->counted_nlinks.un8[i];
58 }
59 free(irec->ino_un.ex_data->counted_nlinks.un8);
60 irec->ino_un.ex_data->counted_nlinks.un16 = new_nlinks;
61 }
62 }
63
64 static void
65 nlink_grow_16_to_32(ino_tree_node_t *irec)
66 {
67 uint32_t *new_nlinks;
68 int i;
69
70 irec->nlink_size = sizeof(uint32_t);
71
72 new_nlinks = alloc_nlink_array(irec->nlink_size);
73 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
74 new_nlinks[i] = irec->disk_nlinks.un16[i];
75 free(irec->disk_nlinks.un16);
76 irec->disk_nlinks.un32 = new_nlinks;
77
78 if (full_ino_ex_data) {
79 new_nlinks = alloc_nlink_array(irec->nlink_size);
80
81 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
82 new_nlinks[i] =
83 irec->ino_un.ex_data->counted_nlinks.un16[i];
84 }
85 free(irec->ino_un.ex_data->counted_nlinks.un16);
86 irec->ino_un.ex_data->counted_nlinks.un32 = new_nlinks;
87 }
88 }
89
90 void add_inode_ref(struct ino_tree_node *irec, int ino_offset)
91 {
92 ASSERT(irec->ino_un.ex_data != NULL);
93
94 switch (irec->nlink_size) {
95 case sizeof(uint8_t):
96 if (irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] < 0xff) {
97 irec->ino_un.ex_data->counted_nlinks.un8[ino_offset]++;
98 break;
99 }
100 nlink_grow_8_to_16(irec);
101 /*FALLTHRU*/
102 case sizeof(uint16_t):
103 if (irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] < 0xffff) {
104 irec->ino_un.ex_data->counted_nlinks.un16[ino_offset]++;
105 break;
106 }
107 nlink_grow_16_to_32(irec);
108 /*FALLTHRU*/
109 case sizeof(uint32_t):
110 irec->ino_un.ex_data->counted_nlinks.un32[ino_offset]++;
111 break;
112 default:
113 ASSERT(0);
114 }
115 }
116
117 void drop_inode_ref(struct ino_tree_node *irec, int ino_offset)
118 {
119 uint32_t refs = 0;
120
121 ASSERT(irec->ino_un.ex_data != NULL);
122
123 switch (irec->nlink_size) {
124 case sizeof(uint8_t):
125 ASSERT(irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] > 0);
126 refs = --irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
127 break;
128 case sizeof(uint16_t):
129 ASSERT(irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] > 0);
130 refs = --irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
131 break;
132 case sizeof(uint32_t):
133 ASSERT(irec->ino_un.ex_data->counted_nlinks.un32[ino_offset] > 0);
134 refs = --irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
135 break;
136 default:
137 ASSERT(0);
138 }
139
140 if (refs == 0)
141 irec->ino_un.ex_data->ino_reached &= ~IREC_MASK(ino_offset);
142 }
143
144 uint32_t num_inode_references(struct ino_tree_node *irec, int ino_offset)
145 {
146 ASSERT(irec->ino_un.ex_data != NULL);
147
148 switch (irec->nlink_size) {
149 case sizeof(uint8_t):
150 return irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
151 case sizeof(uint16_t):
152 return irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
153 case sizeof(uint32_t):
154 return irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
155 default:
156 ASSERT(0);
157 }
158 return 0;
159 }
160
161 void set_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset,
162 uint32_t nlinks)
163 {
164 switch (irec->nlink_size) {
165 case sizeof(uint8_t):
166 if (nlinks < 0xff) {
167 irec->disk_nlinks.un8[ino_offset] = nlinks;
168 break;
169 }
170 nlink_grow_8_to_16(irec);
171 /*FALLTHRU*/
172 case sizeof(uint16_t):
173 if (nlinks < 0xffff) {
174 irec->disk_nlinks.un16[ino_offset] = nlinks;
175 break;
176 }
177 nlink_grow_16_to_32(irec);
178 /*FALLTHRU*/
179 case sizeof(uint32_t):
180 irec->disk_nlinks.un32[ino_offset] = nlinks;
181 break;
182 default:
183 ASSERT(0);
184 }
185 }
186
187 uint32_t get_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset)
188 {
189 switch (irec->nlink_size) {
190 case sizeof(uint8_t):
191 return irec->disk_nlinks.un8[ino_offset];
192 case sizeof(uint16_t):
193 return irec->disk_nlinks.un16[ino_offset];
194 case sizeof(uint32_t):
195 return irec->disk_nlinks.un32[ino_offset];
196 default:
197 ASSERT(0);
198 }
199 return 0;
200 }
201
202 static uint8_t *
203 alloc_ftypes_array(
204 struct xfs_mount *mp)
205 {
206 uint8_t *ptr;
207
208 if (!xfs_sb_version_hasftype(&mp->m_sb))
209 return NULL;
210
211 ptr = calloc(XFS_INODES_PER_CHUNK, sizeof(*ptr));
212 if (!ptr)
213 do_error(_("could not allocate ftypes array\n"));
214 return ptr;
215 }
216
217 /*
218 * Next is the uncertain inode list -- a sorted (in ascending order)
219 * list of inode records sorted on the starting inode number. There
220 * is one list per ag.
221 */
222
223 /*
224 * Common code for creating inode records for use by trees and lists.
225 * called only from add_inodes and add_inodes_uncertain
226 *
227 * IMPORTANT: all inodes (inode records) start off as free and
228 * unconfirmed.
229 */
230 static struct ino_tree_node *
231 alloc_ino_node(
232 struct xfs_mount *mp,
233 xfs_agino_t starting_ino)
234 {
235 struct ino_tree_node *irec;
236
237 irec = malloc(sizeof(*irec));
238 if (!irec)
239 do_error(_("inode map malloc failed\n"));
240
241 irec->avl_node.avl_nextino = NULL;
242 irec->avl_node.avl_forw = NULL;
243 irec->avl_node.avl_back = NULL;
244
245 irec->ino_startnum = starting_ino;
246 irec->ino_confirmed = 0;
247 irec->ino_isa_dir = 0;
248 irec->ino_was_rl = 0;
249 irec->ino_is_rl = 0;
250 irec->ir_free = (xfs_inofree_t) - 1;
251 irec->ir_sparse = 0;
252 irec->ino_un.ex_data = NULL;
253 irec->nlink_size = sizeof(uint8_t);
254 irec->disk_nlinks.un8 = alloc_nlink_array(irec->nlink_size);
255 irec->ftypes = alloc_ftypes_array(mp);
256 return irec;
257 }
258
259 static void
260 free_nlink_array(union ino_nlink nlinks, uint8_t nlink_size)
261 {
262 switch (nlink_size) {
263 case sizeof(uint8_t):
264 free(nlinks.un8);
265 break;
266 case sizeof(uint16_t):
267 free(nlinks.un16);
268 break;
269 case sizeof(uint32_t):
270 free(nlinks.un32);
271 break;
272 default:
273 ASSERT(0);
274 }
275 }
276
277 static void
278 free_ino_tree_node(
279 struct ino_tree_node *irec)
280 {
281 irec->avl_node.avl_nextino = NULL;
282 irec->avl_node.avl_forw = NULL;
283 irec->avl_node.avl_back = NULL;
284
285 free_nlink_array(irec->disk_nlinks, irec->nlink_size);
286 if (irec->ino_un.ex_data != NULL) {
287 if (full_ino_ex_data) {
288 free(irec->ino_un.ex_data->parents);
289 free_nlink_array(irec->ino_un.ex_data->counted_nlinks,
290 irec->nlink_size);
291 }
292 free(irec->ino_un.ex_data);
293
294 }
295
296 free(irec->ftypes);
297 free(irec);
298 }
299
300 /*
301 * last referenced cache for uncertain inodes
302 */
303 static ino_tree_node_t **last_rec;
304
305 /*
306 * ok, the uncertain inodes are a set of trees just like the
307 * good inodes but all starting inode records are (arbitrarily)
308 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
309 * this means we may have partials records in the tree (e.g. records
310 * without 64 confirmed uncertain inodes). Tough.
311 *
312 * free is set to 1 if the inode is thought to be free, 0 if used
313 */
314 void
315 add_aginode_uncertain(
316 struct xfs_mount *mp,
317 xfs_agnumber_t agno,
318 xfs_agino_t ino,
319 int free)
320 {
321 ino_tree_node_t *ino_rec;
322 xfs_agino_t s_ino;
323 int offset;
324
325 ASSERT(agno < glob_agcount);
326 ASSERT(last_rec != NULL);
327
328 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
329
330 /*
331 * check for a cache hit
332 */
333 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
334 offset = ino - s_ino;
335 if (free)
336 set_inode_free(last_rec[agno], offset);
337 else
338 set_inode_used(last_rec[agno], offset);
339
340 return;
341 }
342
343 /*
344 * check to see if record containing inode is already in the tree.
345 * if not, add it
346 */
347 ino_rec = (ino_tree_node_t *)
348 avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
349 if (!ino_rec) {
350 ino_rec = alloc_ino_node(mp, s_ino);
351
352 if (!avl_insert(inode_uncertain_tree_ptrs[agno],
353 &ino_rec->avl_node))
354 do_error(
355 _("add_aginode_uncertain - duplicate inode range\n"));
356 }
357
358 if (free)
359 set_inode_free(ino_rec, ino - s_ino);
360 else
361 set_inode_used(ino_rec, ino - s_ino);
362
363 /*
364 * set cache entry
365 */
366 last_rec[agno] = ino_rec;
367 }
368
369 /*
370 * like add_aginode_uncertain() only it needs an xfs_mount_t *
371 * to perform the inode number conversion.
372 */
373 void
374 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
375 {
376 add_aginode_uncertain(mp, XFS_INO_TO_AGNO(mp, ino),
377 XFS_INO_TO_AGINO(mp, ino), free);
378 }
379
380 /*
381 * pull the indicated inode record out of the uncertain inode tree
382 */
383 void
384 get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno,
385 ino_tree_node_t *ino_rec)
386 {
387 ASSERT(inode_tree_ptrs != NULL);
388 ASSERT(agno < mp->m_sb.sb_agcount);
389 ASSERT(inode_tree_ptrs[agno] != NULL);
390
391 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
392
393 ino_rec->avl_node.avl_nextino = NULL;
394 ino_rec->avl_node.avl_forw = NULL;
395 ino_rec->avl_node.avl_back = NULL;
396 }
397
398 ino_tree_node_t *
399 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
400 {
401 return((ino_tree_node_t *)
402 inode_uncertain_tree_ptrs[agno]->avl_firstino);
403 }
404
405 ino_tree_node_t *
406 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
407 {
408 return((ino_tree_node_t *)
409 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
410 }
411
412 void
413 clear_uncertain_ino_cache(xfs_agnumber_t agno)
414 {
415 last_rec[agno] = NULL;
416 }
417
418
419 /*
420 * Next comes the inode trees. One per AG, AVL trees of inode records, each
421 * inode record tracking 64 inodes
422 */
423
424 /*
425 * Set up an inode tree record for a group of inodes that will include the
426 * requested inode.
427 *
428 * This does NOT do error-check for duplicate records. The caller is
429 * responsible for checking that. Ino must be the start of an
430 * XFS_INODES_PER_CHUNK (64) inode chunk
431 *
432 * Each inode resides in a 64-inode chunk which can be part one or more chunks
433 * (max(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
434 * chunk) when a block can hold more than one chunk (inodes per block > 64).
435 * Allocating in one chunk pieces causes us problems when it takes more than
436 * one fs block to contain an inode chunk because the chunks can start on
437 * *any* block boundary. So we assume that the caller has a clue because at
438 * this level, we don't.
439 */
440 static struct ino_tree_node *
441 add_inode(
442 struct xfs_mount *mp,
443 xfs_agnumber_t agno,
444 xfs_agino_t agino)
445 {
446 struct ino_tree_node *irec;
447
448 irec = alloc_ino_node(mp, agino);
449 if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
450 do_warn(_("add_inode - duplicate inode range\n"));
451 return irec;
452 }
453
454 /*
455 * pull the indicated inode record out of the inode tree
456 */
457 void
458 get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
459 {
460 ASSERT(inode_tree_ptrs != NULL);
461 ASSERT(agno < mp->m_sb.sb_agcount);
462 ASSERT(inode_tree_ptrs[agno] != NULL);
463
464 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
465
466 ino_rec->avl_node.avl_nextino = NULL;
467 ino_rec->avl_node.avl_forw = NULL;
468 ino_rec->avl_node.avl_back = NULL;
469 }
470
471 /*
472 * free the designated inode record (return it to the free pool)
473 */
474 /* ARGSUSED */
475 void
476 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
477 {
478 free_ino_tree_node(ino_rec);
479 }
480
481 void
482 find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno,
483 xfs_agino_t start_ino, xfs_agino_t end_ino,
484 ino_tree_node_t **first, ino_tree_node_t **last)
485 {
486 *first = *last = NULL;
487
488 /*
489 * Is the AG inside the file system ?
490 */
491 if (agno < mp->m_sb.sb_agcount)
492 avl_findranges(inode_tree_ptrs[agno], start_ino,
493 end_ino, (avlnode_t **) first, (avlnode_t **) last);
494 }
495
496 /*
497 * if ino doesn't exist, it must be properly aligned -- on a
498 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
499 * whichever alignment is larger.
500 */
501 ino_tree_node_t *
502 set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
503 {
504 ino_tree_node_t *ino_rec;
505
506 /*
507 * check alignment -- the only way to detect this
508 * is too see if the chunk overlaps another chunk
509 * already in the tree
510 */
511 ino_rec = add_inode(mp, agno, ino);
512
513 ASSERT(ino_rec != NULL);
514 ASSERT(ino >= ino_rec->ino_startnum &&
515 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
516
517 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
518
519 return(ino_rec);
520 }
521
522 ino_tree_node_t *
523 set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
524 {
525 ino_tree_node_t *ino_rec;
526
527 ino_rec = add_inode(mp, agno, ino);
528
529 ASSERT(ino_rec != NULL);
530 ASSERT(ino >= ino_rec->ino_startnum &&
531 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
532
533 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
534
535 return(ino_rec);
536 }
537
538 static void
539 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
540 {
541 ino_tree_node_t *ino_rec;
542
543 if (!uncertain) {
544 fprintf(stderr, _("good inode list is --\n"));
545 ino_rec = findfirst_inode_rec(agno);
546 } else {
547 fprintf(stderr, _("uncertain inode list is --\n"));
548 ino_rec = findfirst_uncertain_inode_rec(agno);
549 }
550
551 if (ino_rec == NULL) {
552 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
553 return;
554 }
555
556 printf(_("agno %d\n"), agno);
557
558 while(ino_rec != NULL) {
559 fprintf(stderr,
560 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
561 (unsigned long)ino_rec,
562 ino_rec->ino_startnum,
563 (unsigned long long)ino_rec->ir_free,
564 (unsigned long long)ino_rec->ino_confirmed);
565 if (ino_rec->ino_startnum == 0)
566 ino_rec = ino_rec;
567 ino_rec = next_ino_rec(ino_rec);
568 }
569 }
570
571 void
572 print_inode_list(xfs_agnumber_t agno)
573 {
574 print_inode_list_int(agno, 0);
575 }
576
577 void
578 print_uncertain_inode_list(xfs_agnumber_t agno)
579 {
580 print_inode_list_int(agno, 1);
581 }
582
583 /*
584 * set parent -- use a bitmask and a packed array. The bitmask
585 * indicate which inodes have an entry in the array. An inode that
586 * is the Nth bit set in the mask is stored in the Nth location in
587 * the array where N starts at 0.
588 */
589
590 void
591 set_inode_parent(
592 ino_tree_node_t *irec,
593 int offset,
594 xfs_ino_t parent)
595 {
596 parent_list_t *ptbl;
597 int i;
598 int cnt;
599 int target;
600 uint64_t bitmask;
601 parent_entry_t *tmp;
602
603 if (full_ino_ex_data)
604 ptbl = irec->ino_un.ex_data->parents;
605 else
606 ptbl = irec->ino_un.plist;
607
608 if (ptbl == NULL) {
609 ptbl = (parent_list_t *)malloc(sizeof(parent_list_t));
610 if (!ptbl)
611 do_error(_("couldn't malloc parent list table\n"));
612
613 if (full_ino_ex_data)
614 irec->ino_un.ex_data->parents = ptbl;
615 else
616 irec->ino_un.plist = ptbl;
617
618 ptbl->pmask = 1ULL << offset;
619 ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
620 sizeof(xfs_ino_t));
621 if (!ptbl->pentries)
622 do_error(_("couldn't memalign pentries table\n"));
623 #ifdef DEBUG
624 ptbl->cnt = 1;
625 #endif
626 ptbl->pentries[0] = parent;
627
628 return;
629 }
630
631 if (ptbl->pmask & (1ULL << offset)) {
632 bitmask = 1ULL;
633 target = 0;
634
635 for (i = 0; i < offset; i++) {
636 if (ptbl->pmask & bitmask)
637 target++;
638 bitmask <<= 1;
639 }
640 #ifdef DEBUG
641 ASSERT(target < ptbl->cnt);
642 #endif
643 ptbl->pentries[target] = parent;
644
645 return;
646 }
647
648 bitmask = 1ULL;
649 cnt = target = 0;
650
651 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
652 if (ptbl->pmask & bitmask) {
653 cnt++;
654 if (i < offset)
655 target++;
656 }
657
658 bitmask <<= 1;
659 }
660
661 #ifdef DEBUG
662 ASSERT(cnt == ptbl->cnt);
663 #endif
664 ASSERT(cnt >= target);
665
666 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
667 if (!tmp)
668 do_error(_("couldn't memalign pentries table\n"));
669
670 memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t));
671
672 if (cnt > target)
673 memmove(tmp + target + 1, ptbl->pentries + target,
674 (cnt - target) * sizeof(parent_entry_t));
675
676 free(ptbl->pentries);
677
678 ptbl->pentries = tmp;
679
680 #ifdef DEBUG
681 ptbl->cnt++;
682 #endif
683 ptbl->pentries[target] = parent;
684 ptbl->pmask |= (1ULL << offset);
685 }
686
687 xfs_ino_t
688 get_inode_parent(ino_tree_node_t *irec, int offset)
689 {
690 uint64_t bitmask;
691 parent_list_t *ptbl;
692 int i;
693 int target;
694
695 if (full_ino_ex_data)
696 ptbl = irec->ino_un.ex_data->parents;
697 else
698 ptbl = irec->ino_un.plist;
699
700 if (ptbl->pmask & (1ULL << offset)) {
701 bitmask = 1ULL;
702 target = 0;
703
704 for (i = 0; i < offset; i++) {
705 if (ptbl->pmask & bitmask)
706 target++;
707 bitmask <<= 1;
708 }
709 #ifdef DEBUG
710 ASSERT(target < ptbl->cnt);
711 #endif
712 return(ptbl->pentries[target]);
713 }
714
715 return(0LL);
716 }
717
718 void
719 alloc_ex_data(ino_tree_node_t *irec)
720 {
721 parent_list_t *ptbl;
722
723 ptbl = irec->ino_un.plist;
724 irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t));
725 if (irec->ino_un.ex_data == NULL)
726 do_error(_("could not malloc inode extra data\n"));
727
728 irec->ino_un.ex_data->parents = ptbl;
729
730 switch (irec->nlink_size) {
731 case sizeof(uint8_t):
732 irec->ino_un.ex_data->counted_nlinks.un8 =
733 alloc_nlink_array(irec->nlink_size);
734 break;
735 case sizeof(uint16_t):
736 irec->ino_un.ex_data->counted_nlinks.un16 =
737 alloc_nlink_array(irec->nlink_size);
738 break;
739 case sizeof(uint32_t):
740 irec->ino_un.ex_data->counted_nlinks.un32 =
741 alloc_nlink_array(irec->nlink_size);
742 break;
743 default:
744 ASSERT(0);
745 }
746 }
747
748 void
749 add_ino_ex_data(xfs_mount_t *mp)
750 {
751 ino_tree_node_t *ino_rec;
752 xfs_agnumber_t i;
753
754 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
755 ino_rec = findfirst_inode_rec(i);
756
757 while (ino_rec != NULL) {
758 alloc_ex_data(ino_rec);
759 ino_rec = next_ino_rec(ino_rec);
760 }
761 }
762 full_ino_ex_data = 1;
763 }
764
765 static uintptr_t
766 avl_ino_start(avlnode_t *node)
767 {
768 return((uintptr_t) ((ino_tree_node_t *) node)->ino_startnum);
769 }
770
771 static uintptr_t
772 avl_ino_end(avlnode_t *node)
773 {
774 return((uintptr_t) (
775 ((ino_tree_node_t *) node)->ino_startnum +
776 XFS_INODES_PER_CHUNK));
777 }
778
779 static avlops_t avl_ino_tree_ops = {
780 avl_ino_start,
781 avl_ino_end
782 };
783
784 void
785 incore_ino_init(xfs_mount_t *mp)
786 {
787 int i;
788 int agcount = mp->m_sb.sb_agcount;
789
790 if ((inode_tree_ptrs = malloc(agcount *
791 sizeof(avltree_desc_t *))) == NULL)
792 do_error(_("couldn't malloc inode tree descriptor table\n"));
793 if ((inode_uncertain_tree_ptrs = malloc(agcount *
794 sizeof(avltree_desc_t *))) == NULL)
795 do_error(
796 _("couldn't malloc uncertain ino tree descriptor table\n"));
797
798 for (i = 0; i < agcount; i++) {
799 if ((inode_tree_ptrs[i] =
800 malloc(sizeof(avltree_desc_t))) == NULL)
801 do_error(_("couldn't malloc inode tree descriptor\n"));
802 if ((inode_uncertain_tree_ptrs[i] =
803 malloc(sizeof(avltree_desc_t))) == NULL)
804 do_error(
805 _("couldn't malloc uncertain ino tree descriptor\n"));
806 }
807 for (i = 0; i < agcount; i++) {
808 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
809 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
810 }
811
812 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
813 do_error(_("couldn't malloc uncertain inode cache area\n"));
814
815 memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount);
816
817 full_ino_ex_data = 0;
818 }