]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore_ino.c
xfs_{copy,db,logprint,repair}: replace xfs_sb_version checks with feature flag checks
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "avl.h"
9 #include "globals.h"
10 #include "incore.h"
11 #include "agheader.h"
12 #include "protos.h"
13 #include "threads.h"
14 #include "err_protos.h"
15
16 /*
17 * array of inode tree ptrs, one per ag
18 */
19 avltree_desc_t **inode_tree_ptrs;
20
21 /*
22 * ditto for uncertain inodes
23 */
24 static avltree_desc_t **inode_uncertain_tree_ptrs;
25
26 /* memory optimised nlink counting for all inodes */
27
28 static void *
29 alloc_nlink_array(uint8_t nlink_size)
30 {
31 void *ptr;
32
33 ptr = calloc(XFS_INODES_PER_CHUNK, nlink_size);
34 if (!ptr)
35 do_error(_("could not allocate nlink array\n"));
36 return ptr;
37 }
38
39 static void
40 nlink_grow_8_to_16(ino_tree_node_t *irec)
41 {
42 uint16_t *new_nlinks;
43 int i;
44
45 irec->nlink_size = sizeof(uint16_t);
46
47 new_nlinks = alloc_nlink_array(irec->nlink_size);
48 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
49 new_nlinks[i] = irec->disk_nlinks.un8[i];
50 free(irec->disk_nlinks.un8);
51 irec->disk_nlinks.un16 = new_nlinks;
52
53 if (full_ino_ex_data) {
54 new_nlinks = alloc_nlink_array(irec->nlink_size);
55 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
56 new_nlinks[i] =
57 irec->ino_un.ex_data->counted_nlinks.un8[i];
58 }
59 free(irec->ino_un.ex_data->counted_nlinks.un8);
60 irec->ino_un.ex_data->counted_nlinks.un16 = new_nlinks;
61 }
62 }
63
64 static void
65 nlink_grow_16_to_32(ino_tree_node_t *irec)
66 {
67 uint32_t *new_nlinks;
68 int i;
69
70 irec->nlink_size = sizeof(uint32_t);
71
72 new_nlinks = alloc_nlink_array(irec->nlink_size);
73 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
74 new_nlinks[i] = irec->disk_nlinks.un16[i];
75 free(irec->disk_nlinks.un16);
76 irec->disk_nlinks.un32 = new_nlinks;
77
78 if (full_ino_ex_data) {
79 new_nlinks = alloc_nlink_array(irec->nlink_size);
80
81 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
82 new_nlinks[i] =
83 irec->ino_un.ex_data->counted_nlinks.un16[i];
84 }
85 free(irec->ino_un.ex_data->counted_nlinks.un16);
86 irec->ino_un.ex_data->counted_nlinks.un32 = new_nlinks;
87 }
88 }
89
90 void add_inode_ref(struct ino_tree_node *irec, int ino_offset)
91 {
92 ASSERT(irec->ino_un.ex_data != NULL);
93
94 pthread_mutex_lock(&irec->lock);
95 switch (irec->nlink_size) {
96 case sizeof(uint8_t):
97 if (irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] < 0xff) {
98 irec->ino_un.ex_data->counted_nlinks.un8[ino_offset]++;
99 break;
100 }
101 nlink_grow_8_to_16(irec);
102 /*FALLTHRU*/
103 case sizeof(uint16_t):
104 if (irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] < 0xffff) {
105 irec->ino_un.ex_data->counted_nlinks.un16[ino_offset]++;
106 break;
107 }
108 nlink_grow_16_to_32(irec);
109 /*FALLTHRU*/
110 case sizeof(uint32_t):
111 irec->ino_un.ex_data->counted_nlinks.un32[ino_offset]++;
112 break;
113 default:
114 ASSERT(0);
115 }
116 pthread_mutex_unlock(&irec->lock);
117 }
118
119 void drop_inode_ref(struct ino_tree_node *irec, int ino_offset)
120 {
121 uint32_t refs = 0;
122
123 ASSERT(irec->ino_un.ex_data != NULL);
124
125 pthread_mutex_lock(&irec->lock);
126 switch (irec->nlink_size) {
127 case sizeof(uint8_t):
128 ASSERT(irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] > 0);
129 refs = --irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
130 break;
131 case sizeof(uint16_t):
132 ASSERT(irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] > 0);
133 refs = --irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
134 break;
135 case sizeof(uint32_t):
136 ASSERT(irec->ino_un.ex_data->counted_nlinks.un32[ino_offset] > 0);
137 refs = --irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
138 break;
139 default:
140 ASSERT(0);
141 }
142
143 if (refs == 0)
144 irec->ino_un.ex_data->ino_reached &= ~IREC_MASK(ino_offset);
145 pthread_mutex_unlock(&irec->lock);
146 }
147
148 uint32_t num_inode_references(struct ino_tree_node *irec, int ino_offset)
149 {
150 ASSERT(irec->ino_un.ex_data != NULL);
151
152 switch (irec->nlink_size) {
153 case sizeof(uint8_t):
154 return irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
155 case sizeof(uint16_t):
156 return irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
157 case sizeof(uint32_t):
158 return irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
159 default:
160 ASSERT(0);
161 }
162 return 0;
163 }
164
165 void set_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset,
166 uint32_t nlinks)
167 {
168 pthread_mutex_lock(&irec->lock);
169 switch (irec->nlink_size) {
170 case sizeof(uint8_t):
171 if (nlinks < 0xff) {
172 irec->disk_nlinks.un8[ino_offset] = nlinks;
173 break;
174 }
175 nlink_grow_8_to_16(irec);
176 /*FALLTHRU*/
177 case sizeof(uint16_t):
178 if (nlinks < 0xffff) {
179 irec->disk_nlinks.un16[ino_offset] = nlinks;
180 break;
181 }
182 nlink_grow_16_to_32(irec);
183 /*FALLTHRU*/
184 case sizeof(uint32_t):
185 irec->disk_nlinks.un32[ino_offset] = nlinks;
186 break;
187 default:
188 ASSERT(0);
189 }
190 pthread_mutex_unlock(&irec->lock);
191 }
192
193 uint32_t get_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset)
194 {
195 switch (irec->nlink_size) {
196 case sizeof(uint8_t):
197 return irec->disk_nlinks.un8[ino_offset];
198 case sizeof(uint16_t):
199 return irec->disk_nlinks.un16[ino_offset];
200 case sizeof(uint32_t):
201 return irec->disk_nlinks.un32[ino_offset];
202 default:
203 ASSERT(0);
204 }
205 return 0;
206 }
207
208 static uint8_t *
209 alloc_ftypes_array(
210 struct xfs_mount *mp)
211 {
212 uint8_t *ptr;
213
214 if (!xfs_has_ftype(mp))
215 return NULL;
216
217 ptr = calloc(XFS_INODES_PER_CHUNK, sizeof(*ptr));
218 if (!ptr)
219 do_error(_("could not allocate ftypes array\n"));
220 return ptr;
221 }
222
223 /*
224 * Next is the uncertain inode list -- a sorted (in ascending order)
225 * list of inode records sorted on the starting inode number. There
226 * is one list per ag.
227 */
228
229 /*
230 * Common code for creating inode records for use by trees and lists.
231 * called only from add_inodes and add_inodes_uncertain
232 *
233 * IMPORTANT: all inodes (inode records) start off as free and
234 * unconfirmed.
235 */
236 static struct ino_tree_node *
237 alloc_ino_node(
238 struct xfs_mount *mp,
239 xfs_agino_t starting_ino)
240 {
241 struct ino_tree_node *irec;
242
243 irec = malloc(sizeof(*irec));
244 if (!irec)
245 do_error(_("inode map malloc failed\n"));
246
247 irec->avl_node.avl_nextino = NULL;
248 irec->avl_node.avl_forw = NULL;
249 irec->avl_node.avl_back = NULL;
250
251 irec->ino_startnum = starting_ino;
252 irec->ino_confirmed = 0;
253 irec->ino_isa_dir = 0;
254 irec->ino_was_rl = 0;
255 irec->ino_is_rl = 0;
256 irec->ir_free = (xfs_inofree_t) - 1;
257 irec->ir_sparse = 0;
258 irec->ino_un.ex_data = NULL;
259 irec->nlink_size = sizeof(uint8_t);
260 irec->disk_nlinks.un8 = alloc_nlink_array(irec->nlink_size);
261 irec->ftypes = alloc_ftypes_array(mp);
262 pthread_mutex_init(&irec->lock, NULL);
263 return irec;
264 }
265
266 static void
267 free_nlink_array(union ino_nlink nlinks, uint8_t nlink_size)
268 {
269 switch (nlink_size) {
270 case sizeof(uint8_t):
271 free(nlinks.un8);
272 break;
273 case sizeof(uint16_t):
274 free(nlinks.un16);
275 break;
276 case sizeof(uint32_t):
277 free(nlinks.un32);
278 break;
279 default:
280 ASSERT(0);
281 }
282 }
283
284 static void
285 free_ino_tree_node(
286 struct ino_tree_node *irec)
287 {
288 irec->avl_node.avl_nextino = NULL;
289 irec->avl_node.avl_forw = NULL;
290 irec->avl_node.avl_back = NULL;
291
292 free_nlink_array(irec->disk_nlinks, irec->nlink_size);
293 if (irec->ino_un.ex_data != NULL) {
294 if (full_ino_ex_data) {
295 free(irec->ino_un.ex_data->parents);
296 free_nlink_array(irec->ino_un.ex_data->counted_nlinks,
297 irec->nlink_size);
298 }
299 free(irec->ino_un.ex_data);
300
301 }
302
303 free(irec->ftypes);
304 pthread_mutex_destroy(&irec->lock);
305 free(irec);
306 }
307
308 /*
309 * last referenced cache for uncertain inodes
310 */
311 static ino_tree_node_t **last_rec;
312
313 /*
314 * ok, the uncertain inodes are a set of trees just like the
315 * good inodes but all starting inode records are (arbitrarily)
316 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
317 * this means we may have partials records in the tree (e.g. records
318 * without 64 confirmed uncertain inodes). Tough.
319 *
320 * free is set to 1 if the inode is thought to be free, 0 if used
321 */
322 void
323 add_aginode_uncertain(
324 struct xfs_mount *mp,
325 xfs_agnumber_t agno,
326 xfs_agino_t ino,
327 int free)
328 {
329 ino_tree_node_t *ino_rec;
330 xfs_agino_t s_ino;
331 int offset;
332
333 ASSERT(agno < glob_agcount);
334 ASSERT(last_rec != NULL);
335
336 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
337
338 /*
339 * check for a cache hit
340 */
341 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
342 offset = ino - s_ino;
343 if (free)
344 set_inode_free(last_rec[agno], offset);
345 else
346 set_inode_used(last_rec[agno], offset);
347
348 return;
349 }
350
351 /*
352 * check to see if record containing inode is already in the tree.
353 * if not, add it
354 */
355 ino_rec = (ino_tree_node_t *)
356 avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
357 if (!ino_rec) {
358 ino_rec = alloc_ino_node(mp, s_ino);
359
360 if (!avl_insert(inode_uncertain_tree_ptrs[agno],
361 &ino_rec->avl_node))
362 do_error(
363 _("add_aginode_uncertain - duplicate inode range\n"));
364 }
365
366 if (free)
367 set_inode_free(ino_rec, ino - s_ino);
368 else
369 set_inode_used(ino_rec, ino - s_ino);
370
371 /*
372 * set cache entry
373 */
374 last_rec[agno] = ino_rec;
375 }
376
377 /*
378 * like add_aginode_uncertain() only it needs an xfs_mount_t *
379 * to perform the inode number conversion.
380 */
381 void
382 add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
383 {
384 add_aginode_uncertain(mp, XFS_INO_TO_AGNO(mp, ino),
385 XFS_INO_TO_AGINO(mp, ino), free);
386 }
387
388 /*
389 * pull the indicated inode record out of the uncertain inode tree
390 */
391 void
392 get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno,
393 ino_tree_node_t *ino_rec)
394 {
395 ASSERT(inode_tree_ptrs != NULL);
396 ASSERT(agno < mp->m_sb.sb_agcount);
397 ASSERT(inode_tree_ptrs[agno] != NULL);
398
399 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
400
401 ino_rec->avl_node.avl_nextino = NULL;
402 ino_rec->avl_node.avl_forw = NULL;
403 ino_rec->avl_node.avl_back = NULL;
404 }
405
406 ino_tree_node_t *
407 findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
408 {
409 return((ino_tree_node_t *)
410 inode_uncertain_tree_ptrs[agno]->avl_firstino);
411 }
412
413 ino_tree_node_t *
414 find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
415 {
416 return((ino_tree_node_t *)
417 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
418 }
419
420 void
421 clear_uncertain_ino_cache(xfs_agnumber_t agno)
422 {
423 last_rec[agno] = NULL;
424 }
425
426
427 /*
428 * Next comes the inode trees. One per AG, AVL trees of inode records, each
429 * inode record tracking 64 inodes
430 */
431
432 /*
433 * Set up an inode tree record for a group of inodes that will include the
434 * requested inode.
435 *
436 * This does NOT do error-check for duplicate records. The caller is
437 * responsible for checking that. Ino must be the start of an
438 * XFS_INODES_PER_CHUNK (64) inode chunk
439 *
440 * Each inode resides in a 64-inode chunk which can be part one or more chunks
441 * (max(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
442 * chunk) when a block can hold more than one chunk (inodes per block > 64).
443 * Allocating in one chunk pieces causes us problems when it takes more than
444 * one fs block to contain an inode chunk because the chunks can start on
445 * *any* block boundary. So we assume that the caller has a clue because at
446 * this level, we don't.
447 */
448 static struct ino_tree_node *
449 add_inode(
450 struct xfs_mount *mp,
451 xfs_agnumber_t agno,
452 xfs_agino_t agino)
453 {
454 struct ino_tree_node *irec;
455
456 irec = alloc_ino_node(mp, agino);
457 if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
458 do_warn(_("add_inode - duplicate inode range\n"));
459 return irec;
460 }
461
462 /*
463 * pull the indicated inode record out of the inode tree
464 */
465 void
466 get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
467 {
468 ASSERT(inode_tree_ptrs != NULL);
469 ASSERT(agno < mp->m_sb.sb_agcount);
470 ASSERT(inode_tree_ptrs[agno] != NULL);
471
472 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
473
474 ino_rec->avl_node.avl_nextino = NULL;
475 ino_rec->avl_node.avl_forw = NULL;
476 ino_rec->avl_node.avl_back = NULL;
477 }
478
479 /*
480 * free the designated inode record (return it to the free pool)
481 */
482 /* ARGSUSED */
483 void
484 free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
485 {
486 free_ino_tree_node(ino_rec);
487 }
488
489 void
490 find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno,
491 xfs_agino_t start_ino, xfs_agino_t end_ino,
492 ino_tree_node_t **first, ino_tree_node_t **last)
493 {
494 *first = *last = NULL;
495
496 /*
497 * Is the AG inside the file system ?
498 */
499 if (agno < mp->m_sb.sb_agcount)
500 avl_findranges(inode_tree_ptrs[agno], start_ino,
501 end_ino, (avlnode_t **) first, (avlnode_t **) last);
502 }
503
504 /*
505 * if ino doesn't exist, it must be properly aligned -- on a
506 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
507 * whichever alignment is larger.
508 */
509 ino_tree_node_t *
510 set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
511 {
512 ino_tree_node_t *ino_rec;
513
514 /*
515 * check alignment -- the only way to detect this
516 * is too see if the chunk overlaps another chunk
517 * already in the tree
518 */
519 ino_rec = add_inode(mp, agno, ino);
520
521 ASSERT(ino_rec != NULL);
522 ASSERT(ino >= ino_rec->ino_startnum &&
523 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
524
525 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
526
527 return(ino_rec);
528 }
529
530 ino_tree_node_t *
531 set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
532 {
533 ino_tree_node_t *ino_rec;
534
535 ino_rec = add_inode(mp, agno, ino);
536
537 ASSERT(ino_rec != NULL);
538 ASSERT(ino >= ino_rec->ino_startnum &&
539 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
540
541 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
542
543 return(ino_rec);
544 }
545
546 static void
547 print_inode_list_int(xfs_agnumber_t agno, int uncertain)
548 {
549 ino_tree_node_t *ino_rec;
550
551 if (!uncertain) {
552 fprintf(stderr, _("good inode list is --\n"));
553 ino_rec = findfirst_inode_rec(agno);
554 } else {
555 fprintf(stderr, _("uncertain inode list is --\n"));
556 ino_rec = findfirst_uncertain_inode_rec(agno);
557 }
558
559 if (ino_rec == NULL) {
560 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
561 return;
562 }
563
564 printf(_("agno %d\n"), agno);
565
566 while(ino_rec != NULL) {
567 fprintf(stderr,
568 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
569 (unsigned long)ino_rec,
570 ino_rec->ino_startnum,
571 (unsigned long long)ino_rec->ir_free,
572 (unsigned long long)ino_rec->ino_confirmed);
573 if (ino_rec->ino_startnum == 0)
574 ino_rec = ino_rec;
575 ino_rec = next_ino_rec(ino_rec);
576 }
577 }
578
579 void
580 print_inode_list(xfs_agnumber_t agno)
581 {
582 print_inode_list_int(agno, 0);
583 }
584
585 void
586 print_uncertain_inode_list(xfs_agnumber_t agno)
587 {
588 print_inode_list_int(agno, 1);
589 }
590
591 /*
592 * set parent -- use a bitmask and a packed array. The bitmask
593 * indicate which inodes have an entry in the array. An inode that
594 * is the Nth bit set in the mask is stored in the Nth location in
595 * the array where N starts at 0.
596 */
597
598 void
599 set_inode_parent(
600 ino_tree_node_t *irec,
601 int offset,
602 xfs_ino_t parent)
603 {
604 parent_list_t *ptbl;
605 int i;
606 int cnt;
607 int target;
608 uint64_t bitmask;
609 parent_entry_t *tmp;
610
611 pthread_mutex_lock(&irec->lock);
612 if (full_ino_ex_data)
613 ptbl = irec->ino_un.ex_data->parents;
614 else
615 ptbl = irec->ino_un.plist;
616
617 if (ptbl == NULL) {
618 ptbl = (parent_list_t *)malloc(sizeof(parent_list_t));
619 if (!ptbl)
620 do_error(_("couldn't malloc parent list table\n"));
621
622 if (full_ino_ex_data)
623 irec->ino_un.ex_data->parents = ptbl;
624 else
625 irec->ino_un.plist = ptbl;
626
627 ptbl->pmask = 1ULL << offset;
628 ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
629 sizeof(xfs_ino_t));
630 if (!ptbl->pentries)
631 do_error(_("couldn't memalign pentries table\n"));
632 #ifdef DEBUG
633 ptbl->cnt = 1;
634 #endif
635 ptbl->pentries[0] = parent;
636
637 pthread_mutex_unlock(&irec->lock);
638 return;
639 }
640
641 if (ptbl->pmask & (1ULL << offset)) {
642 bitmask = 1ULL;
643 target = 0;
644
645 for (i = 0; i < offset; i++) {
646 if (ptbl->pmask & bitmask)
647 target++;
648 bitmask <<= 1;
649 }
650 #ifdef DEBUG
651 ASSERT(target < ptbl->cnt);
652 #endif
653 ptbl->pentries[target] = parent;
654
655 pthread_mutex_unlock(&irec->lock);
656 return;
657 }
658
659 bitmask = 1ULL;
660 cnt = target = 0;
661
662 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
663 if (ptbl->pmask & bitmask) {
664 cnt++;
665 if (i < offset)
666 target++;
667 }
668
669 bitmask <<= 1;
670 }
671
672 #ifdef DEBUG
673 ASSERT(cnt == ptbl->cnt);
674 #endif
675 ASSERT(cnt >= target);
676
677 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
678 if (!tmp)
679 do_error(_("couldn't memalign pentries table\n"));
680
681 memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t));
682
683 if (cnt > target)
684 memmove(tmp + target + 1, ptbl->pentries + target,
685 (cnt - target) * sizeof(parent_entry_t));
686
687 free(ptbl->pentries);
688
689 ptbl->pentries = tmp;
690
691 #ifdef DEBUG
692 ptbl->cnt++;
693 #endif
694 ptbl->pentries[target] = parent;
695 ptbl->pmask |= (1ULL << offset);
696 pthread_mutex_unlock(&irec->lock);
697 }
698
699 xfs_ino_t
700 get_inode_parent(ino_tree_node_t *irec, int offset)
701 {
702 uint64_t bitmask;
703 parent_list_t *ptbl;
704 int i;
705 int target;
706
707 pthread_mutex_lock(&irec->lock);
708 if (full_ino_ex_data)
709 ptbl = irec->ino_un.ex_data->parents;
710 else
711 ptbl = irec->ino_un.plist;
712
713 if (ptbl->pmask & (1ULL << offset)) {
714 bitmask = 1ULL;
715 target = 0;
716
717 for (i = 0; i < offset; i++) {
718 if (ptbl->pmask & bitmask)
719 target++;
720 bitmask <<= 1;
721 }
722 #ifdef DEBUG
723 ASSERT(target < ptbl->cnt);
724 #endif
725 pthread_mutex_unlock(&irec->lock);
726 return(ptbl->pentries[target]);
727 }
728
729 pthread_mutex_unlock(&irec->lock);
730 return(0LL);
731 }
732
733 void
734 alloc_ex_data(ino_tree_node_t *irec)
735 {
736 parent_list_t *ptbl;
737
738 ptbl = irec->ino_un.plist;
739 irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t));
740 if (irec->ino_un.ex_data == NULL)
741 do_error(_("could not malloc inode extra data\n"));
742
743 irec->ino_un.ex_data->parents = ptbl;
744
745 switch (irec->nlink_size) {
746 case sizeof(uint8_t):
747 irec->ino_un.ex_data->counted_nlinks.un8 =
748 alloc_nlink_array(irec->nlink_size);
749 break;
750 case sizeof(uint16_t):
751 irec->ino_un.ex_data->counted_nlinks.un16 =
752 alloc_nlink_array(irec->nlink_size);
753 break;
754 case sizeof(uint32_t):
755 irec->ino_un.ex_data->counted_nlinks.un32 =
756 alloc_nlink_array(irec->nlink_size);
757 break;
758 default:
759 ASSERT(0);
760 }
761 }
762
763 void
764 add_ino_ex_data(xfs_mount_t *mp)
765 {
766 ino_tree_node_t *ino_rec;
767 xfs_agnumber_t i;
768
769 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
770 ino_rec = findfirst_inode_rec(i);
771
772 while (ino_rec != NULL) {
773 alloc_ex_data(ino_rec);
774 ino_rec = next_ino_rec(ino_rec);
775 }
776 }
777 full_ino_ex_data = 1;
778 }
779
780 static uintptr_t
781 avl_ino_start(avlnode_t *node)
782 {
783 return((uintptr_t) ((ino_tree_node_t *) node)->ino_startnum);
784 }
785
786 static uintptr_t
787 avl_ino_end(avlnode_t *node)
788 {
789 return((uintptr_t) (
790 ((ino_tree_node_t *) node)->ino_startnum +
791 XFS_INODES_PER_CHUNK));
792 }
793
794 static avlops_t avl_ino_tree_ops = {
795 avl_ino_start,
796 avl_ino_end
797 };
798
799 void
800 incore_ino_init(xfs_mount_t *mp)
801 {
802 int i;
803 int agcount = mp->m_sb.sb_agcount;
804
805 if ((inode_tree_ptrs = malloc(agcount *
806 sizeof(avltree_desc_t *))) == NULL)
807 do_error(_("couldn't malloc inode tree descriptor table\n"));
808 if ((inode_uncertain_tree_ptrs = malloc(agcount *
809 sizeof(avltree_desc_t *))) == NULL)
810 do_error(
811 _("couldn't malloc uncertain ino tree descriptor table\n"));
812
813 for (i = 0; i < agcount; i++) {
814 if ((inode_tree_ptrs[i] =
815 malloc(sizeof(avltree_desc_t))) == NULL)
816 do_error(_("couldn't malloc inode tree descriptor\n"));
817 if ((inode_uncertain_tree_ptrs[i] =
818 malloc(sizeof(avltree_desc_t))) == NULL)
819 do_error(
820 _("couldn't malloc uncertain ino tree descriptor\n"));
821 }
822 for (i = 0; i < agcount; i++) {
823 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
824 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
825 }
826
827 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
828 do_error(_("couldn't malloc uncertain inode cache area\n"));
829
830 memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount);
831
832 full_ino_ex_data = 0;
833 }