]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
xfsprogs: simplify internal includes
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include "libxfs.h"
20 #include "avl.h"
21 #include "globals.h"
22 #include "incore.h"
23 #include "agheader.h"
24 #include "protos.h"
25 #include "threads.h"
26 #include "err_protos.h"
27
28 /*
29 * array of inode tree ptrs, one per ag
30 */
31 avltree_desc_t **inode_tree_ptrs;
32
33 /*
34 * ditto for uncertain inodes
35 */
36 static avltree_desc_t **inode_uncertain_tree_ptrs;
37
38 /* memory optimised nlink counting for all inodes */
39
40 static void *
41 alloc_nlink_array(__uint8_t nlink_size)
42 {
43 void *ptr;
44
45 ptr = calloc(XFS_INODES_PER_CHUNK, nlink_size);
46 if (!ptr)
47 do_error(_("could not allocate nlink array\n"));
48 return ptr;
49 }
50
51 static void
52 nlink_grow_8_to_16(ino_tree_node_t *irec)
53 {
54 __uint16_t *new_nlinks;
55 int i;
56
57 irec->nlink_size = sizeof(__uint16_t);
58
59 new_nlinks = alloc_nlink_array(irec->nlink_size);
60 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
61 new_nlinks[i] = irec->disk_nlinks.un8[i];
62 free(irec->disk_nlinks.un8);
63 irec->disk_nlinks.un16 = new_nlinks;
64
65 if (full_ino_ex_data) {
66 new_nlinks = alloc_nlink_array(irec->nlink_size);
67 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
68 new_nlinks[i] =
69 irec->ino_un.ex_data->counted_nlinks.un8[i];
70 }
71 free(irec->ino_un.ex_data->counted_nlinks.un8);
72 irec->ino_un.ex_data->counted_nlinks.un16 = new_nlinks;
73 }
74 }
75
76 static void
77 nlink_grow_16_to_32(ino_tree_node_t *irec)
78 {
79 __uint32_t *new_nlinks;
80 int i;
81
82 irec->nlink_size = sizeof(__uint32_t);
83
84 new_nlinks = alloc_nlink_array(irec->nlink_size);
85 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
86 new_nlinks[i] = irec->disk_nlinks.un16[i];
87 free(irec->disk_nlinks.un16);
88 irec->disk_nlinks.un32 = new_nlinks;
89
90 if (full_ino_ex_data) {
91 new_nlinks = alloc_nlink_array(irec->nlink_size);
92
93 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
94 new_nlinks[i] =
95 irec->ino_un.ex_data->counted_nlinks.un16[i];
96 }
97 free(irec->ino_un.ex_data->counted_nlinks.un16);
98 irec->ino_un.ex_data->counted_nlinks.un32 = new_nlinks;
99 }
100 }
101
102 void add_inode_ref(struct ino_tree_node *irec, int ino_offset)
103 {
104 ASSERT(irec->ino_un.ex_data != NULL);
105
106 switch (irec->nlink_size) {
107 case sizeof(__uint8_t):
108 if (irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] < 0xff) {
109 irec->ino_un.ex_data->counted_nlinks.un8[ino_offset]++;
110 break;
111 }
112 nlink_grow_8_to_16(irec);
113 /*FALLTHRU*/
114 case sizeof(__uint16_t):
115 if (irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] < 0xffff) {
116 irec->ino_un.ex_data->counted_nlinks.un16[ino_offset]++;
117 break;
118 }
119 nlink_grow_16_to_32(irec);
120 /*FALLTHRU*/
121 case sizeof(__uint32_t):
122 irec->ino_un.ex_data->counted_nlinks.un32[ino_offset]++;
123 break;
124 default:
125 ASSERT(0);
126 }
127 }
128
129 void drop_inode_ref(struct ino_tree_node *irec, int ino_offset)
130 {
131 __uint32_t refs = 0;
132
133 ASSERT(irec->ino_un.ex_data != NULL);
134
135 switch (irec->nlink_size) {
136 case sizeof(__uint8_t):
137 ASSERT(irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] > 0);
138 refs = --irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
139 break;
140 case sizeof(__uint16_t):
141 ASSERT(irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] > 0);
142 refs = --irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
143 break;
144 case sizeof(__uint32_t):
145 ASSERT(irec->ino_un.ex_data->counted_nlinks.un32[ino_offset] > 0);
146 refs = --irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
147 break;
148 default:
149 ASSERT(0);
150 }
151
152 if (refs == 0)
153 irec->ino_un.ex_data->ino_reached &= ~IREC_MASK(ino_offset);
154 }
155
156 __uint32_t num_inode_references(struct ino_tree_node *irec, int ino_offset)
157 {
158 ASSERT(irec->ino_un.ex_data != NULL);
159
160 switch (irec->nlink_size) {
161 case sizeof(__uint8_t):
162 return irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
163 case sizeof(__uint16_t):
164 return irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
165 case sizeof(__uint32_t):
166 return irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
167 default:
168 ASSERT(0);
169 }
170 return 0;
171 }
172
173 void set_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset,
174 __uint32_t nlinks)
175 {
176 switch (irec->nlink_size) {
177 case sizeof(__uint8_t):
178 if (nlinks < 0xff) {
179 irec->disk_nlinks.un8[ino_offset] = nlinks;
180 break;
181 }
182 nlink_grow_8_to_16(irec);
183 /*FALLTHRU*/
184 case sizeof(__uint16_t):
185 if (nlinks < 0xffff) {
186 irec->disk_nlinks.un16[ino_offset] = nlinks;
187 break;
188 }
189 nlink_grow_16_to_32(irec);
190 /*FALLTHRU*/
191 case sizeof(__uint32_t):
192 irec->disk_nlinks.un32[ino_offset] = nlinks;
193 break;
194 default:
195 ASSERT(0);
196 }
197 }
198
199 __uint32_t get_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset)
200 {
201 switch (irec->nlink_size) {
202 case sizeof(__uint8_t):
203 return irec->disk_nlinks.un8[ino_offset];
204 case sizeof(__uint16_t):
205 return irec->disk_nlinks.un16[ino_offset];
206 case sizeof(__uint32_t):
207 return irec->disk_nlinks.un32[ino_offset];
208 default:
209 ASSERT(0);
210 }
211 return 0;
212 }
213
214 static __uint8_t *
215 alloc_ftypes_array(
216 struct xfs_mount *mp)
217 {
218 __uint8_t *ptr;
219
220 if (!xfs_sb_version_hasftype(&mp->m_sb))
221 return NULL;
222
223 ptr = calloc(XFS_INODES_PER_CHUNK, sizeof(*ptr));
224 if (!ptr)
225 do_error(_("could not allocate ftypes array\n"));
226 return ptr;
227 }
228
229 /*
230 * Next is the uncertain inode list -- a sorted (in ascending order)
231 * list of inode records sorted on the starting inode number. There
232 * is one list per ag.
233 */
234
235 /*
236 * Common code for creating inode records for use by trees and lists.
237 * called only from add_inodes and add_inodes_uncertain
238 *
239 * IMPORTANT: all inodes (inode records) start off as free and
240 * unconfirmed.
241 */
242 static struct ino_tree_node *
243 alloc_ino_node(
244 struct xfs_mount *mp,
245 xfs_agino_t starting_ino)
246 {
247 struct ino_tree_node *irec;
248
249 irec = malloc(sizeof(*irec));
250 if (!irec)
251 do_error(_("inode map malloc failed\n"));
252
253 irec->avl_node.avl_nextino = NULL;
254 irec->avl_node.avl_forw = NULL;
255 irec->avl_node.avl_back = NULL;
256
257 irec->ino_startnum = starting_ino;
258 irec->ino_confirmed = 0;
259 irec->ino_isa_dir = 0;
260 irec->ir_free = (xfs_inofree_t) - 1;
261 irec->ir_sparse = 0;
262 irec->ino_un.ex_data = NULL;
263 irec->nlink_size = sizeof(__uint8_t);
264 irec->disk_nlinks.un8 = alloc_nlink_array(irec->nlink_size);
265 irec->ftypes = alloc_ftypes_array(mp);
266 return irec;
267 }
268
269 static void
270 free_nlink_array(union ino_nlink nlinks, __uint8_t nlink_size)
271 {
272 switch (nlink_size) {
273 case sizeof(__uint8_t):
274 free(nlinks.un8);
275 break;
276 case sizeof(__uint16_t):
277 free(nlinks.un16);
278 break;
279 case sizeof(__uint32_t):
280 free(nlinks.un32);
281 break;
282 default:
283 ASSERT(0);
284 }
285 }
286
287 static void
288 free_ino_tree_node(
289 struct ino_tree_node *irec)
290 {
291 irec->avl_node.avl_nextino = NULL;
292 irec->avl_node.avl_forw = NULL;
293 irec->avl_node.avl_back = NULL;
294
295 free_nlink_array(irec->disk_nlinks, irec->nlink_size);
296 if (irec->ino_un.ex_data != NULL) {
297 if (full_ino_ex_data) {
298 free(irec->ino_un.ex_data->parents);
299 free_nlink_array(irec->ino_un.ex_data->counted_nlinks,
300 irec->nlink_size);
301 }
302 free(irec->ino_un.ex_data);
303
304 }
305
306 free(irec->ftypes);
307 free(irec);
308 }
309
310 /*
311 * last referenced cache for uncertain inodes
312 */
313 static ino_tree_node_t **last_rec;
314
315 /*
316 * ok, the uncertain inodes are a set of trees just like the
317 * good inodes but all starting inode records are (arbitrarily)
318 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
319 * this means we may have partials records in the tree (e.g. records
320 * without 64 confirmed uncertain inodes). Tough.
321 *
322 * free is set to 1 if the inode is thought to be free, 0 if used
323 */
324 void
325 add_aginode_uncertain(
326 struct xfs_mount *mp,
327 xfs_agnumber_t agno,
328 xfs_agino_t ino,
329 int free)
330 {
331 ino_tree_node_t *ino_rec;
332 xfs_agino_t s_ino;
333 int offset;
334
335 ASSERT(agno < glob_agcount);
336 ASSERT(last_rec != NULL);
337
338 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
339
340 /*
341 * check for a cache hit
342 */
343 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
344 offset = ino - s_ino;
345 if (free)
346 set_inode_free(last_rec[agno], offset);
347 else
348 set_inode_used(last_rec[agno], offset);
349
350 return;
351 }
352
353 /*
354 * check to see if record containing inode is already in the tree.
355 * if not, add it
356 */
357 ino_rec = (ino_tree_node_t *)
358 avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
359 if (!ino_rec) {
360 ino_rec = alloc_ino_node(mp, s_ino);
361
362 if (!avl_insert(inode_uncertain_tree_ptrs[agno],
363 &ino_rec->avl_node))
364 do_error(
365 _("add_aginode_uncertain - duplicate inode range\n"));
366 }
367
368 if (free)
369 set_inode_free(ino_rec, ino - s_ino);
370 else
371 set_inode_used(ino_rec, ino - s_ino);
372
373 /*
374 * set cache entry
375 */
376 last_rec[agno] = ino_rec;
377 }
378
379 /*
380 * like add_aginode_uncertain() only it needs an xfs_mount_t *
381 * to perform the inode number conversion.
382 */
383 void
384 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
385 {
386 add_aginode_uncertain(mp, XFS_INO_TO_AGNO(mp, ino),
387 XFS_INO_TO_AGINO(mp, ino), free);
388 }
389
390 /*
391 * pull the indicated inode record out of the uncertain inode tree
392 */
393 void
394 get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno,
395 ino_tree_node_t *ino_rec)
396 {
397 ASSERT(inode_tree_ptrs != NULL);
398 ASSERT(agno < mp->m_sb.sb_agcount);
399 ASSERT(inode_tree_ptrs[agno] != NULL);
400
401 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
402
403 ino_rec->avl_node.avl_nextino = NULL;
404 ino_rec->avl_node.avl_forw = NULL;
405 ino_rec->avl_node.avl_back = NULL;
406 }
407
408 ino_tree_node_t *
409 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
410 {
411 return((ino_tree_node_t *)
412 inode_uncertain_tree_ptrs[agno]->avl_firstino);
413 }
414
415 ino_tree_node_t *
416 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
417 {
418 return((ino_tree_node_t *)
419 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
420 }
421
422 void
423 clear_uncertain_ino_cache(xfs_agnumber_t agno)
424 {
425 last_rec[agno] = NULL;
426 }
427
428
429 /*
430 * Next comes the inode trees. One per AG, AVL trees of inode records, each
431 * inode record tracking 64 inodes
432 */
433
434 /*
435 * Set up an inode tree record for a group of inodes that will include the
436 * requested inode.
437 *
438 * This does NOT do error-check for duplicate records. The caller is
439 * responsible for checking that. Ino must be the start of an
440 * XFS_INODES_PER_CHUNK (64) inode chunk
441 *
442 * Each inode resides in a 64-inode chunk which can be part one or more chunks
443 * (MAX(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
444 * chunk) when a block can hold more than one chunk (inodes per block > 64).
445 * Allocating in one chunk pieces causes us problems when it takes more than
446 * one fs block to contain an inode chunk because the chunks can start on
447 * *any* block boundary. So we assume that the caller has a clue because at
448 * this level, we don't.
449 */
450 static struct ino_tree_node *
451 add_inode(
452 struct xfs_mount *mp,
453 xfs_agnumber_t agno,
454 xfs_agino_t agino)
455 {
456 struct ino_tree_node *irec;
457
458 irec = alloc_ino_node(mp, agino);
459 if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
460 do_warn(_("add_inode - duplicate inode range\n"));
461 return irec;
462 }
463
464 /*
465 * pull the indicated inode record out of the inode tree
466 */
467 void
468 get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
469 {
470 ASSERT(inode_tree_ptrs != NULL);
471 ASSERT(agno < mp->m_sb.sb_agcount);
472 ASSERT(inode_tree_ptrs[agno] != NULL);
473
474 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
475
476 ino_rec->avl_node.avl_nextino = NULL;
477 ino_rec->avl_node.avl_forw = NULL;
478 ino_rec->avl_node.avl_back = NULL;
479 }
480
481 /*
482 * free the designated inode record (return it to the free pool)
483 */
484 /* ARGSUSED */
485 void
486 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
487 {
488 free_ino_tree_node(ino_rec);
489 }
490
491 void
492 find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno,
493 xfs_agino_t start_ino, xfs_agino_t end_ino,
494 ino_tree_node_t **first, ino_tree_node_t **last)
495 {
496 *first = *last = NULL;
497
498 /*
499 * Is the AG inside the file system ?
500 */
501 if (agno < mp->m_sb.sb_agcount)
502 avl_findranges(inode_tree_ptrs[agno], start_ino,
503 end_ino, (avlnode_t **) first, (avlnode_t **) last);
504 }
505
506 /*
507 * if ino doesn't exist, it must be properly aligned -- on a
508 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
509 * whichever alignment is larger.
510 */
511 ino_tree_node_t *
512 set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
513 {
514 ino_tree_node_t *ino_rec;
515
516 /*
517 * check alignment -- the only way to detect this
518 * is too see if the chunk overlaps another chunk
519 * already in the tree
520 */
521 ino_rec = add_inode(mp, agno, ino);
522
523 ASSERT(ino_rec != NULL);
524 ASSERT(ino >= ino_rec->ino_startnum &&
525 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
526
527 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
528
529 return(ino_rec);
530 }
531
532 ino_tree_node_t *
533 set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
534 {
535 ino_tree_node_t *ino_rec;
536
537 ino_rec = add_inode(mp, agno, ino);
538
539 ASSERT(ino_rec != NULL);
540 ASSERT(ino >= ino_rec->ino_startnum &&
541 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
542
543 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
544
545 return(ino_rec);
546 }
547
548 void
549 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
550 {
551 ino_tree_node_t *ino_rec;
552
553 if (!uncertain) {
554 fprintf(stderr, _("good inode list is --\n"));
555 ino_rec = findfirst_inode_rec(agno);
556 } else {
557 fprintf(stderr, _("uncertain inode list is --\n"));
558 ino_rec = findfirst_uncertain_inode_rec(agno);
559 }
560
561 if (ino_rec == NULL) {
562 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
563 return;
564 }
565
566 printf(_("agno %d\n"), agno);
567
568 while(ino_rec != NULL) {
569 fprintf(stderr,
570 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
571 (unsigned long)ino_rec,
572 ino_rec->ino_startnum,
573 (unsigned long long)ino_rec->ir_free,
574 (unsigned long long)ino_rec->ino_confirmed);
575 if (ino_rec->ino_startnum == 0)
576 ino_rec = ino_rec;
577 ino_rec = next_ino_rec(ino_rec);
578 }
579 }
580
581 void
582 print_inode_list(xfs_agnumber_t agno)
583 {
584 print_inode_list_int(agno, 0);
585 }
586
587 void
588 print_uncertain_inode_list(xfs_agnumber_t agno)
589 {
590 print_inode_list_int(agno, 1);
591 }
592
593 /*
594 * set parent -- use a bitmask and a packed array. The bitmask
595 * indicate which inodes have an entry in the array. An inode that
596 * is the Nth bit set in the mask is stored in the Nth location in
597 * the array where N starts at 0.
598 */
599
600 void
601 set_inode_parent(
602 ino_tree_node_t *irec,
603 int offset,
604 xfs_ino_t parent)
605 {
606 parent_list_t *ptbl;
607 int i;
608 int cnt;
609 int target;
610 __uint64_t bitmask;
611 parent_entry_t *tmp;
612
613 if (full_ino_ex_data)
614 ptbl = irec->ino_un.ex_data->parents;
615 else
616 ptbl = irec->ino_un.plist;
617
618 if (ptbl == NULL) {
619 ptbl = (parent_list_t *)malloc(sizeof(parent_list_t));
620 if (!ptbl)
621 do_error(_("couldn't malloc parent list table\n"));
622
623 if (full_ino_ex_data)
624 irec->ino_un.ex_data->parents = ptbl;
625 else
626 irec->ino_un.plist = ptbl;
627
628 ptbl->pmask = 1LL << offset;
629 ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
630 sizeof(xfs_ino_t));
631 if (!ptbl->pentries)
632 do_error(_("couldn't memalign pentries table\n"));
633 #ifdef DEBUG
634 ptbl->cnt = 1;
635 #endif
636 ptbl->pentries[0] = parent;
637
638 return;
639 }
640
641 if (ptbl->pmask & (1LL << offset)) {
642 bitmask = 1LL;
643 target = 0;
644
645 for (i = 0; i < offset; i++) {
646 if (ptbl->pmask & bitmask)
647 target++;
648 bitmask <<= 1;
649 }
650 #ifdef DEBUG
651 ASSERT(target < ptbl->cnt);
652 #endif
653 ptbl->pentries[target] = parent;
654
655 return;
656 }
657
658 bitmask = 1LL;
659 cnt = target = 0;
660
661 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
662 if (ptbl->pmask & bitmask) {
663 cnt++;
664 if (i < offset)
665 target++;
666 }
667
668 bitmask <<= 1;
669 }
670
671 #ifdef DEBUG
672 ASSERT(cnt == ptbl->cnt);
673 #endif
674 ASSERT(cnt >= target);
675
676 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
677 if (!tmp)
678 do_error(_("couldn't memalign pentries table\n"));
679
680 memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t));
681
682 if (cnt > target)
683 memmove(tmp + target + 1, ptbl->pentries + target,
684 (cnt - target) * sizeof(parent_entry_t));
685
686 free(ptbl->pentries);
687
688 ptbl->pentries = tmp;
689
690 #ifdef DEBUG
691 ptbl->cnt++;
692 #endif
693 ptbl->pentries[target] = parent;
694 ptbl->pmask |= (1LL << offset);
695 }
696
697 xfs_ino_t
698 get_inode_parent(ino_tree_node_t *irec, int offset)
699 {
700 __uint64_t bitmask;
701 parent_list_t *ptbl;
702 int i;
703 int target;
704
705 if (full_ino_ex_data)
706 ptbl = irec->ino_un.ex_data->parents;
707 else
708 ptbl = irec->ino_un.plist;
709
710 if (ptbl->pmask & (1LL << offset)) {
711 bitmask = 1LL;
712 target = 0;
713
714 for (i = 0; i < offset; i++) {
715 if (ptbl->pmask & bitmask)
716 target++;
717 bitmask <<= 1;
718 }
719 #ifdef DEBUG
720 ASSERT(target < ptbl->cnt);
721 #endif
722 return(ptbl->pentries[target]);
723 }
724
725 return(0LL);
726 }
727
728 void
729 alloc_ex_data(ino_tree_node_t *irec)
730 {
731 parent_list_t *ptbl;
732
733 ptbl = irec->ino_un.plist;
734 irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t));
735 if (irec->ino_un.ex_data == NULL)
736 do_error(_("could not malloc inode extra data\n"));
737
738 irec->ino_un.ex_data->parents = ptbl;
739
740 switch (irec->nlink_size) {
741 case sizeof(__uint8_t):
742 irec->ino_un.ex_data->counted_nlinks.un8 =
743 alloc_nlink_array(irec->nlink_size);
744 break;
745 case sizeof(__uint16_t):
746 irec->ino_un.ex_data->counted_nlinks.un16 =
747 alloc_nlink_array(irec->nlink_size);
748 break;
749 case sizeof(__uint32_t):
750 irec->ino_un.ex_data->counted_nlinks.un32 =
751 alloc_nlink_array(irec->nlink_size);
752 break;
753 default:
754 ASSERT(0);
755 }
756 }
757
758 void
759 add_ino_ex_data(xfs_mount_t *mp)
760 {
761 ino_tree_node_t *ino_rec;
762 xfs_agnumber_t i;
763
764 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
765 ino_rec = findfirst_inode_rec(i);
766
767 while (ino_rec != NULL) {
768 alloc_ex_data(ino_rec);
769 ino_rec = next_ino_rec(ino_rec);
770 }
771 }
772 full_ino_ex_data = 1;
773 }
774
775 static uintptr_t
776 avl_ino_start(avlnode_t *node)
777 {
778 return((uintptr_t) ((ino_tree_node_t *) node)->ino_startnum);
779 }
780
781 static uintptr_t
782 avl_ino_end(avlnode_t *node)
783 {
784 return((uintptr_t) (
785 ((ino_tree_node_t *) node)->ino_startnum +
786 XFS_INODES_PER_CHUNK));
787 }
788
789 avlops_t avl_ino_tree_ops = {
790 avl_ino_start,
791 avl_ino_end
792 };
793
794 void
795 incore_ino_init(xfs_mount_t *mp)
796 {
797 int i;
798 int agcount = mp->m_sb.sb_agcount;
799
800 if ((inode_tree_ptrs = malloc(agcount *
801 sizeof(avltree_desc_t *))) == NULL)
802 do_error(_("couldn't malloc inode tree descriptor table\n"));
803 if ((inode_uncertain_tree_ptrs = malloc(agcount *
804 sizeof(avltree_desc_t *))) == NULL)
805 do_error(
806 _("couldn't malloc uncertain ino tree descriptor table\n"));
807
808 for (i = 0; i < agcount; i++) {
809 if ((inode_tree_ptrs[i] =
810 malloc(sizeof(avltree_desc_t))) == NULL)
811 do_error(_("couldn't malloc inode tree descriptor\n"));
812 if ((inode_uncertain_tree_ptrs[i] =
813 malloc(sizeof(avltree_desc_t))) == NULL)
814 do_error(
815 _("couldn't malloc uncertain ino tree descriptor\n"));
816 }
817 for (i = 0; i < agcount; i++) {
818 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
819 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
820 }
821
822 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
823 do_error(_("couldn't malloc uncertain inode cache area\n"));
824
825 memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount);
826
827 full_ino_ex_data = 0;
828 }