]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/incore_ino.c
libxfs: refactor manage_zones()
[thirdparty/xfsprogs-dev.git] / repair / incore_ino.c
CommitLineData
959ef981 1// SPDX-License-Identifier: GPL-2.0
2bd0ea18 2/*
da23017d
NS
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
2bd0ea18
NS
5 */
6
6b803e5a 7#include "libxfs.h"
2bd0ea18
NS
8#include "avl.h"
9#include "globals.h"
10#include "incore.h"
11#include "agheader.h"
12#include "protos.h"
3b6ac903 13#include "threads.h"
2bd0ea18
NS
14#include "err_protos.h"
15
2bd0ea18
NS
16/*
17 * array of inode tree ptrs, one per ag
18 */
1e77098c 19avltree_desc_t **inode_tree_ptrs;
2bd0ea18
NS
20
21/*
22 * ditto for uncertain inodes
23 */
24static avltree_desc_t **inode_uncertain_tree_ptrs;
25
0f012a4c
BN
26/* memory optimised nlink counting for all inodes */
27
edfb350c 28static void *
14f8b681 29alloc_nlink_array(uint8_t nlink_size)
0f012a4c 30{
edfb350c 31 void *ptr;
0f012a4c 32
edfb350c
CH
33 ptr = calloc(XFS_INODES_PER_CHUNK, nlink_size);
34 if (!ptr)
35 do_error(_("could not allocate nlink array\n"));
36 return ptr;
0f012a4c
BN
37}
38
edfb350c
CH
39static void
40nlink_grow_8_to_16(ino_tree_node_t *irec)
0f012a4c 41{
14f8b681 42 uint16_t *new_nlinks;
edfb350c 43 int i;
0f012a4c 44
14f8b681 45 irec->nlink_size = sizeof(uint16_t);
0f012a4c 46
edfb350c
CH
47 new_nlinks = alloc_nlink_array(irec->nlink_size);
48 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
49 new_nlinks[i] = irec->disk_nlinks.un8[i];
50 free(irec->disk_nlinks.un8);
51 irec->disk_nlinks.un16 = new_nlinks;
0f012a4c 52
edfb350c
CH
53 if (full_ino_ex_data) {
54 new_nlinks = alloc_nlink_array(irec->nlink_size);
55 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
56 new_nlinks[i] =
57 irec->ino_un.ex_data->counted_nlinks.un8[i];
58 }
59 free(irec->ino_un.ex_data->counted_nlinks.un8);
60 irec->ino_un.ex_data->counted_nlinks.un16 = new_nlinks;
61 }
0f012a4c
BN
62}
63
0f012a4c 64static void
edfb350c 65nlink_grow_16_to_32(ino_tree_node_t *irec)
0f012a4c 66{
14f8b681 67 uint32_t *new_nlinks;
edfb350c 68 int i;
0f012a4c 69
14f8b681 70 irec->nlink_size = sizeof(uint32_t);
0f012a4c 71
edfb350c
CH
72 new_nlinks = alloc_nlink_array(irec->nlink_size);
73 for (i = 0; i < XFS_INODES_PER_CHUNK; i++)
74 new_nlinks[i] = irec->disk_nlinks.un16[i];
75 free(irec->disk_nlinks.un16);
76 irec->disk_nlinks.un32 = new_nlinks;
0f012a4c 77
edfb350c
CH
78 if (full_ino_ex_data) {
79 new_nlinks = alloc_nlink_array(irec->nlink_size);
0f012a4c 80
edfb350c
CH
81 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
82 new_nlinks[i] =
83 irec->ino_un.ex_data->counted_nlinks.un16[i];
84 }
85 free(irec->ino_un.ex_data->counted_nlinks.un16);
86 irec->ino_un.ex_data->counted_nlinks.un32 = new_nlinks;
0f012a4c 87 }
0f012a4c
BN
88}
89
edfb350c 90void add_inode_ref(struct ino_tree_node *irec, int ino_offset)
0f012a4c 91{
edfb350c 92 ASSERT(irec->ino_un.ex_data != NULL);
0f012a4c 93
edfb350c 94 switch (irec->nlink_size) {
14f8b681 95 case sizeof(uint8_t):
edfb350c
CH
96 if (irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] < 0xff) {
97 irec->ino_un.ex_data->counted_nlinks.un8[ino_offset]++;
98 break;
99 }
0f012a4c 100 nlink_grow_8_to_16(irec);
edfb350c 101 /*FALLTHRU*/
14f8b681 102 case sizeof(uint16_t):
edfb350c
CH
103 if (irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] < 0xffff) {
104 irec->ino_un.ex_data->counted_nlinks.un16[ino_offset]++;
105 break;
106 }
107 nlink_grow_16_to_32(irec);
108 /*FALLTHRU*/
14f8b681 109 case sizeof(uint32_t):
edfb350c
CH
110 irec->ino_un.ex_data->counted_nlinks.un32[ino_offset]++;
111 break;
112 default:
113 ASSERT(0);
114 }
0f012a4c
BN
115}
116
edfb350c 117void drop_inode_ref(struct ino_tree_node *irec, int ino_offset)
0f012a4c 118{
14f8b681 119 uint32_t refs = 0;
0f012a4c 120
edfb350c 121 ASSERT(irec->ino_un.ex_data != NULL);
0f012a4c 122
edfb350c 123 switch (irec->nlink_size) {
14f8b681 124 case sizeof(uint8_t):
edfb350c
CH
125 ASSERT(irec->ino_un.ex_data->counted_nlinks.un8[ino_offset] > 0);
126 refs = --irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
127 break;
14f8b681 128 case sizeof(uint16_t):
edfb350c
CH
129 ASSERT(irec->ino_un.ex_data->counted_nlinks.un16[ino_offset] > 0);
130 refs = --irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
131 break;
14f8b681 132 case sizeof(uint32_t):
edfb350c
CH
133 ASSERT(irec->ino_un.ex_data->counted_nlinks.un32[ino_offset] > 0);
134 refs = --irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
135 break;
136 default:
137 ASSERT(0);
0f012a4c 138 }
0f012a4c 139
edfb350c
CH
140 if (refs == 0)
141 irec->ino_un.ex_data->ino_reached &= ~IREC_MASK(ino_offset);
0f012a4c
BN
142}
143
14f8b681 144uint32_t num_inode_references(struct ino_tree_node *irec, int ino_offset)
0f012a4c 145{
edfb350c 146 ASSERT(irec->ino_un.ex_data != NULL);
0f012a4c 147
edfb350c 148 switch (irec->nlink_size) {
14f8b681 149 case sizeof(uint8_t):
edfb350c 150 return irec->ino_un.ex_data->counted_nlinks.un8[ino_offset];
14f8b681 151 case sizeof(uint16_t):
edfb350c 152 return irec->ino_un.ex_data->counted_nlinks.un16[ino_offset];
14f8b681 153 case sizeof(uint32_t):
edfb350c
CH
154 return irec->ino_un.ex_data->counted_nlinks.un32[ino_offset];
155 default:
156 ASSERT(0);
0f012a4c 157 }
6bddecbc 158 return 0;
0f012a4c
BN
159}
160
edfb350c 161void set_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset,
14f8b681 162 uint32_t nlinks)
0f012a4c 163{
edfb350c 164 switch (irec->nlink_size) {
14f8b681 165 case sizeof(uint8_t):
edfb350c
CH
166 if (nlinks < 0xff) {
167 irec->disk_nlinks.un8[ino_offset] = nlinks;
168 break;
169 }
170 nlink_grow_8_to_16(irec);
171 /*FALLTHRU*/
14f8b681 172 case sizeof(uint16_t):
edfb350c
CH
173 if (nlinks < 0xffff) {
174 irec->disk_nlinks.un16[ino_offset] = nlinks;
175 break;
176 }
177 nlink_grow_16_to_32(irec);
178 /*FALLTHRU*/
14f8b681 179 case sizeof(uint32_t):
edfb350c
CH
180 irec->disk_nlinks.un32[ino_offset] = nlinks;
181 break;
182 default:
183 ASSERT(0);
184 }
185}
0f012a4c 186
14f8b681 187uint32_t get_inode_disk_nlinks(struct ino_tree_node *irec, int ino_offset)
edfb350c
CH
188{
189 switch (irec->nlink_size) {
14f8b681 190 case sizeof(uint8_t):
edfb350c 191 return irec->disk_nlinks.un8[ino_offset];
14f8b681 192 case sizeof(uint16_t):
edfb350c 193 return irec->disk_nlinks.un16[ino_offset];
14f8b681 194 case sizeof(uint32_t):
edfb350c
CH
195 return irec->disk_nlinks.un32[ino_offset];
196 default:
197 ASSERT(0);
0f012a4c 198 }
6bddecbc 199 return 0;
0f012a4c
BN
200}
201
14f8b681 202static uint8_t *
aaca101b
DC
203alloc_ftypes_array(
204 struct xfs_mount *mp)
205{
14f8b681 206 uint8_t *ptr;
aaca101b
DC
207
208 if (!xfs_sb_version_hasftype(&mp->m_sb))
209 return NULL;
210
211 ptr = calloc(XFS_INODES_PER_CHUNK, sizeof(*ptr));
212 if (!ptr)
213 do_error(_("could not allocate ftypes array\n"));
214 return ptr;
215}
216
2bd0ea18 217/*
ffc82585 218 * Next is the uncertain inode list -- a sorted (in ascending order)
2bd0ea18
NS
219 * list of inode records sorted on the starting inode number. There
220 * is one list per ag.
221 */
222
223/*
ffc82585 224 * Common code for creating inode records for use by trees and lists.
2bd0ea18
NS
225 * called only from add_inodes and add_inodes_uncertain
226 *
227 * IMPORTANT: all inodes (inode records) start off as free and
228 * unconfirmed.
229 */
ffc82585
CH
230static struct ino_tree_node *
231alloc_ino_node(
aaca101b 232 struct xfs_mount *mp,
0f012a4c 233 xfs_agino_t starting_ino)
2bd0ea18 234{
ffc82585 235 struct ino_tree_node *irec;
2bd0ea18 236
ffc82585
CH
237 irec = malloc(sizeof(*irec));
238 if (!irec)
239 do_error(_("inode map malloc failed\n"));
2bd0ea18 240
ffc82585
CH
241 irec->avl_node.avl_nextino = NULL;
242 irec->avl_node.avl_forw = NULL;
243 irec->avl_node.avl_back = NULL;
2bd0ea18 244
ffc82585
CH
245 irec->ino_startnum = starting_ino;
246 irec->ino_confirmed = 0;
247 irec->ino_isa_dir = 0;
7e174ec7
DW
248 irec->ino_was_rl = 0;
249 irec->ino_is_rl = 0;
ffc82585 250 irec->ir_free = (xfs_inofree_t) - 1;
c749bd55 251 irec->ir_sparse = 0;
ffc82585 252 irec->ino_un.ex_data = NULL;
14f8b681 253 irec->nlink_size = sizeof(uint8_t);
edfb350c 254 irec->disk_nlinks.un8 = alloc_nlink_array(irec->nlink_size);
aaca101b 255 irec->ftypes = alloc_ftypes_array(mp);
ffc82585 256 return irec;
2bd0ea18
NS
257}
258
edfb350c 259static void
14f8b681 260free_nlink_array(union ino_nlink nlinks, uint8_t nlink_size)
edfb350c
CH
261{
262 switch (nlink_size) {
14f8b681 263 case sizeof(uint8_t):
edfb350c
CH
264 free(nlinks.un8);
265 break;
14f8b681 266 case sizeof(uint16_t):
edfb350c
CH
267 free(nlinks.un16);
268 break;
14f8b681 269 case sizeof(uint32_t):
edfb350c
CH
270 free(nlinks.un32);
271 break;
272 default:
273 ASSERT(0);
274 }
275}
276
2bd0ea18 277static void
ffc82585
CH
278free_ino_tree_node(
279 struct ino_tree_node *irec)
2bd0ea18 280{
ffc82585
CH
281 irec->avl_node.avl_nextino = NULL;
282 irec->avl_node.avl_forw = NULL;
283 irec->avl_node.avl_back = NULL;
520cf040 284
edfb350c 285 free_nlink_array(irec->disk_nlinks, irec->nlink_size);
ffc82585 286 if (irec->ino_un.ex_data != NULL) {
0f012a4c 287 if (full_ino_ex_data) {
ffc82585 288 free(irec->ino_un.ex_data->parents);
edfb350c
CH
289 free_nlink_array(irec->ino_un.ex_data->counted_nlinks,
290 irec->nlink_size);
0f012a4c 291 }
ffc82585 292 free(irec->ino_un.ex_data);
0f012a4c 293
2bd0ea18 294 }
ffc82585 295
aaca101b 296 free(irec->ftypes);
ffc82585 297 free(irec);
2bd0ea18
NS
298}
299
300/*
301 * last referenced cache for uncertain inodes
302 */
303static ino_tree_node_t **last_rec;
304
305/*
306 * ok, the uncertain inodes are a set of trees just like the
307 * good inodes but all starting inode records are (arbitrarily)
308 * aligned on XFS_CHUNK_PER_INODE boundaries to prevent overlaps.
309 * this means we may have partials records in the tree (e.g. records
310 * without 64 confirmed uncertain inodes). Tough.
311 *
312 * free is set to 1 if the inode is thought to be free, 0 if used
313 */
314void
aaca101b
DC
315add_aginode_uncertain(
316 struct xfs_mount *mp,
317 xfs_agnumber_t agno,
318 xfs_agino_t ino,
319 int free)
2bd0ea18
NS
320{
321 ino_tree_node_t *ino_rec;
322 xfs_agino_t s_ino;
323 int offset;
324
325 ASSERT(agno < glob_agcount);
326 ASSERT(last_rec != NULL);
327
328 s_ino = rounddown(ino, XFS_INODES_PER_CHUNK);
329
330 /*
331 * check for a cache hit
332 */
333 if (last_rec[agno] != NULL && last_rec[agno]->ino_startnum == s_ino) {
334 offset = ino - s_ino;
335 if (free)
336 set_inode_free(last_rec[agno], offset);
337 else
338 set_inode_used(last_rec[agno], offset);
339
340 return;
341 }
342
343 /*
344 * check to see if record containing inode is already in the tree.
345 * if not, add it
346 */
ffc82585
CH
347 ino_rec = (ino_tree_node_t *)
348 avl_findrange(inode_uncertain_tree_ptrs[agno], s_ino);
349 if (!ino_rec) {
aaca101b 350 ino_rec = alloc_ino_node(mp, s_ino);
ffc82585
CH
351
352 if (!avl_insert(inode_uncertain_tree_ptrs[agno],
353 &ino_rec->avl_node))
354 do_error(
355 _("add_aginode_uncertain - duplicate inode range\n"));
2bd0ea18
NS
356 }
357
358 if (free)
359 set_inode_free(ino_rec, ino - s_ino);
360 else
361 set_inode_used(ino_rec, ino - s_ino);
362
363 /*
364 * set cache entry
365 */
366 last_rec[agno] = ino_rec;
2bd0ea18
NS
367}
368
369/*
370 * like add_aginode_uncertain() only it needs an xfs_mount_t *
371 * to perform the inode number conversion.
372 */
373void
374add_inode_uncertain(xfs_mount_t *mp, xfs_ino_t ino, int free)
375{
aaca101b 376 add_aginode_uncertain(mp, XFS_INO_TO_AGNO(mp, ino),
2bd0ea18
NS
377 XFS_INO_TO_AGINO(mp, ino), free);
378}
379
380/*
381 * pull the indicated inode record out of the uncertain inode tree
382 */
383void
1ae311d5
LC
384get_uncertain_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno,
385 ino_tree_node_t *ino_rec)
2bd0ea18
NS
386{
387 ASSERT(inode_tree_ptrs != NULL);
1ae311d5 388 ASSERT(agno < mp->m_sb.sb_agcount);
2bd0ea18
NS
389 ASSERT(inode_tree_ptrs[agno] != NULL);
390
391 avl_delete(inode_uncertain_tree_ptrs[agno], &ino_rec->avl_node);
392
393 ino_rec->avl_node.avl_nextino = NULL;
394 ino_rec->avl_node.avl_forw = NULL;
395 ino_rec->avl_node.avl_back = NULL;
396}
397
398ino_tree_node_t *
399findfirst_uncertain_inode_rec(xfs_agnumber_t agno)
400{
401 return((ino_tree_node_t *)
402 inode_uncertain_tree_ptrs[agno]->avl_firstino);
403}
404
2d9475a4
NS
405ino_tree_node_t *
406find_uncertain_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
407{
408 return((ino_tree_node_t *)
409 avl_findrange(inode_uncertain_tree_ptrs[agno], ino));
410}
411
2bd0ea18
NS
412void
413clear_uncertain_ino_cache(xfs_agnumber_t agno)
414{
415 last_rec[agno] = NULL;
2bd0ea18
NS
416}
417
418
419/*
ffc82585
CH
420 * Next comes the inode trees. One per AG, AVL trees of inode records, each
421 * inode record tracking 64 inodes
2bd0ea18 422 */
ffc82585 423
2bd0ea18 424/*
ffc82585
CH
425 * Set up an inode tree record for a group of inodes that will include the
426 * requested inode.
2bd0ea18 427 *
ffc82585
CH
428 * This does NOT do error-check for duplicate records. The caller is
429 * responsible for checking that. Ino must be the start of an
430 * XFS_INODES_PER_CHUNK (64) inode chunk
2bd0ea18 431 *
ffc82585 432 * Each inode resides in a 64-inode chunk which can be part one or more chunks
68d16907 433 * (max(64, inodes-per-block). The fs allocates in chunks (as opposed to 1
ffc82585
CH
434 * chunk) when a block can hold more than one chunk (inodes per block > 64).
435 * Allocating in one chunk pieces causes us problems when it takes more than
436 * one fs block to contain an inode chunk because the chunks can start on
437 * *any* block boundary. So we assume that the caller has a clue because at
438 * this level, we don't.
2bd0ea18 439 */
ffc82585
CH
440static struct ino_tree_node *
441add_inode(
442 struct xfs_mount *mp,
443 xfs_agnumber_t agno,
444 xfs_agino_t agino)
2bd0ea18 445{
ffc82585 446 struct ino_tree_node *irec;
2bd0ea18 447
aaca101b 448 irec = alloc_ino_node(mp, agino);
ffc82585 449 if (!avl_insert(inode_tree_ptrs[agno], &irec->avl_node))
507f4e33 450 do_warn(_("add_inode - duplicate inode range\n"));
ffc82585 451 return irec;
2bd0ea18
NS
452}
453
454/*
455 * pull the indicated inode record out of the inode tree
456 */
457void
1ae311d5 458get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
2bd0ea18
NS
459{
460 ASSERT(inode_tree_ptrs != NULL);
1ae311d5 461 ASSERT(agno < mp->m_sb.sb_agcount);
2bd0ea18
NS
462 ASSERT(inode_tree_ptrs[agno] != NULL);
463
464 avl_delete(inode_tree_ptrs[agno], &ino_rec->avl_node);
465
466 ino_rec->avl_node.avl_nextino = NULL;
467 ino_rec->avl_node.avl_forw = NULL;
468 ino_rec->avl_node.avl_back = NULL;
469}
470
471/*
472 * free the designated inode record (return it to the free pool)
473 */
474/* ARGSUSED */
475void
476free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec)
477{
478 free_ino_tree_node(ino_rec);
2bd0ea18
NS
479}
480
2bd0ea18 481void
1ae311d5
LC
482find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno,
483 xfs_agino_t start_ino, xfs_agino_t end_ino,
484 ino_tree_node_t **first, ino_tree_node_t **last)
2bd0ea18
NS
485{
486 *first = *last = NULL;
487
1ae311d5
LC
488 /*
489 * Is the AG inside the file system ?
490 */
491 if (agno < mp->m_sb.sb_agcount)
492 avl_findranges(inode_tree_ptrs[agno], start_ino,
493 end_ino, (avlnode_t **) first, (avlnode_t **) last);
2bd0ea18
NS
494}
495
496/*
497 * if ino doesn't exist, it must be properly aligned -- on a
498 * filesystem block boundary or XFS_INODES_PER_CHUNK boundary,
499 * whichever alignment is larger.
500 */
501ino_tree_node_t *
1ae311d5 502set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
2bd0ea18
NS
503{
504 ino_tree_node_t *ino_rec;
505
506 /*
507 * check alignment -- the only way to detect this
508 * is too see if the chunk overlaps another chunk
509 * already in the tree
510 */
1ae311d5 511 ino_rec = add_inode(mp, agno, ino);
2bd0ea18
NS
512
513 ASSERT(ino_rec != NULL);
514 ASSERT(ino >= ino_rec->ino_startnum &&
515 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
516
517 set_inode_used(ino_rec, ino - ino_rec->ino_startnum);
518
519 return(ino_rec);
520}
521
522ino_tree_node_t *
1ae311d5 523set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino)
2bd0ea18
NS
524{
525 ino_tree_node_t *ino_rec;
526
1ae311d5 527 ino_rec = add_inode(mp, agno, ino);
2bd0ea18
NS
528
529 ASSERT(ino_rec != NULL);
530 ASSERT(ino >= ino_rec->ino_startnum &&
531 ino - ino_rec->ino_startnum < XFS_INODES_PER_CHUNK);
532
533 set_inode_free(ino_rec, ino - ino_rec->ino_startnum);
534
535 return(ino_rec);
536}
537
00ff2b10 538static void
2bd0ea18
NS
539print_inode_list_int(xfs_agnumber_t agno, int uncertain)
540{
541 ino_tree_node_t *ino_rec;
542
543 if (!uncertain) {
507f4e33 544 fprintf(stderr, _("good inode list is --\n"));
2bd0ea18
NS
545 ino_rec = findfirst_inode_rec(agno);
546 } else {
507f4e33 547 fprintf(stderr, _("uncertain inode list is --\n"));
2bd0ea18
NS
548 ino_rec = findfirst_uncertain_inode_rec(agno);
549 }
550
551 if (ino_rec == NULL) {
507f4e33 552 fprintf(stderr, _("agno %d -- no inodes\n"), agno);
2bd0ea18
NS
553 return;
554 }
555
507f4e33 556 printf(_("agno %d\n"), agno);
2bd0ea18
NS
557
558 while(ino_rec != NULL) {
559 fprintf(stderr,
507f4e33 560 _("\tptr = %lx, start = 0x%x, free = 0x%llx, confirmed = 0x%llx\n"),
5b64e00a 561 (unsigned long)ino_rec,
2bd0ea18 562 ino_rec->ino_startnum,
5b64e00a
NS
563 (unsigned long long)ino_rec->ir_free,
564 (unsigned long long)ino_rec->ino_confirmed);
2bd0ea18
NS
565 if (ino_rec->ino_startnum == 0)
566 ino_rec = ino_rec;
567 ino_rec = next_ino_rec(ino_rec);
568 }
569}
570
571void
572print_inode_list(xfs_agnumber_t agno)
573{
574 print_inode_list_int(agno, 0);
575}
576
577void
578print_uncertain_inode_list(xfs_agnumber_t agno)
579{
580 print_inode_list_int(agno, 1);
581}
582
583/*
584 * set parent -- use a bitmask and a packed array. The bitmask
585 * indicate which inodes have an entry in the array. An inode that
586 * is the Nth bit set in the mask is stored in the Nth location in
587 * the array where N starts at 0.
588 */
6fa00c33 589
2bd0ea18 590void
6fa00c33
BN
591set_inode_parent(
592 ino_tree_node_t *irec,
593 int offset,
594 xfs_ino_t parent)
595{
596 parent_list_t *ptbl;
597 int i;
598 int cnt;
599 int target;
14f8b681 600 uint64_t bitmask;
6fa00c33 601 parent_entry_t *tmp;
2bd0ea18 602
6fa00c33
BN
603 if (full_ino_ex_data)
604 ptbl = irec->ino_un.ex_data->parents;
605 else
606 ptbl = irec->ino_un.plist;
2bd0ea18 607
6fa00c33
BN
608 if (ptbl == NULL) {
609 ptbl = (parent_list_t *)malloc(sizeof(parent_list_t));
610 if (!ptbl)
507f4e33 611 do_error(_("couldn't malloc parent list table\n"));
dfc130f3 612
6fa00c33
BN
613 if (full_ino_ex_data)
614 irec->ino_un.ex_data->parents = ptbl;
615 else
616 irec->ino_un.plist = ptbl;
617
c782bf02 618 ptbl->pmask = 1ULL << offset;
6fa00c33
BN
619 ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
620 sizeof(xfs_ino_t));
621 if (!ptbl->pentries)
dfc130f3 622 do_error(_("couldn't memalign pentries table\n"));
2bd0ea18 623#ifdef DEBUG
6fa00c33 624 ptbl->cnt = 1;
2bd0ea18 625#endif
6fa00c33 626 ptbl->pentries[0] = parent;
2bd0ea18
NS
627
628 return;
629 }
630
c782bf02
ES
631 if (ptbl->pmask & (1ULL << offset)) {
632 bitmask = 1ULL;
2bd0ea18
NS
633 target = 0;
634
635 for (i = 0; i < offset; i++) {
6fa00c33 636 if (ptbl->pmask & bitmask)
2bd0ea18
NS
637 target++;
638 bitmask <<= 1;
639 }
640#ifdef DEBUG
6fa00c33 641 ASSERT(target < ptbl->cnt);
2bd0ea18 642#endif
6fa00c33 643 ptbl->pentries[target] = parent;
2bd0ea18
NS
644
645 return;
646 }
647
c782bf02 648 bitmask = 1ULL;
2bd0ea18
NS
649 cnt = target = 0;
650
651 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
6fa00c33 652 if (ptbl->pmask & bitmask) {
2bd0ea18
NS
653 cnt++;
654 if (i < offset)
655 target++;
656 }
657
658 bitmask <<= 1;
659 }
660
661#ifdef DEBUG
6fa00c33 662 ASSERT(cnt == ptbl->cnt);
2bd0ea18
NS
663#endif
664 ASSERT(cnt >= target);
665
666 tmp = (xfs_ino_t*)memalign(sizeof(xfs_ino_t), (cnt + 1) * sizeof(xfs_ino_t));
dfc130f3
RC
667 if (!tmp)
668 do_error(_("couldn't memalign pentries table\n"));
2bd0ea18 669
dab9b8d6 670 memmove(tmp, ptbl->pentries, target * sizeof(parent_entry_t));
2bd0ea18
NS
671
672 if (cnt > target)
dab9b8d6 673 memmove(tmp + target + 1, ptbl->pentries + target,
2bd0ea18
NS
674 (cnt - target) * sizeof(parent_entry_t));
675
6fa00c33 676 free(ptbl->pentries);
2bd0ea18 677
6fa00c33 678 ptbl->pentries = tmp;
2bd0ea18
NS
679
680#ifdef DEBUG
6fa00c33 681 ptbl->cnt++;
2bd0ea18 682#endif
6fa00c33 683 ptbl->pentries[target] = parent;
c782bf02 684 ptbl->pmask |= (1ULL << offset);
2bd0ea18
NS
685}
686
2bd0ea18
NS
687xfs_ino_t
688get_inode_parent(ino_tree_node_t *irec, int offset)
689{
14f8b681 690 uint64_t bitmask;
2bd0ea18
NS
691 parent_list_t *ptbl;
692 int i;
693 int target;
694
0f012a4c
BN
695 if (full_ino_ex_data)
696 ptbl = irec->ino_un.ex_data->parents;
2bd0ea18
NS
697 else
698 ptbl = irec->ino_un.plist;
699
c782bf02
ES
700 if (ptbl->pmask & (1ULL << offset)) {
701 bitmask = 1ULL;
2bd0ea18
NS
702 target = 0;
703
704 for (i = 0; i < offset; i++) {
705 if (ptbl->pmask & bitmask)
706 target++;
707 bitmask <<= 1;
708 }
709#ifdef DEBUG
710 ASSERT(target < ptbl->cnt);
711#endif
712 return(ptbl->pentries[target]);
713 }
714
715 return(0LL);
716}
717
3ac87fbf 718void
0f012a4c 719alloc_ex_data(ino_tree_node_t *irec)
2bd0ea18 720{
0f012a4c 721 parent_list_t *ptbl;
2bd0ea18 722
0f012a4c
BN
723 ptbl = irec->ino_un.plist;
724 irec->ino_un.ex_data = (ino_ex_data_t *)calloc(1, sizeof(ino_ex_data_t));
725 if (irec->ino_un.ex_data == NULL)
726 do_error(_("could not malloc inode extra data\n"));
dfc130f3 727
0f012a4c 728 irec->ino_un.ex_data->parents = ptbl;
2bd0ea18 729
edfb350c 730 switch (irec->nlink_size) {
14f8b681 731 case sizeof(uint8_t):
edfb350c
CH
732 irec->ino_un.ex_data->counted_nlinks.un8 =
733 alloc_nlink_array(irec->nlink_size);
734 break;
14f8b681 735 case sizeof(uint16_t):
edfb350c
CH
736 irec->ino_un.ex_data->counted_nlinks.un16 =
737 alloc_nlink_array(irec->nlink_size);
738 break;
14f8b681 739 case sizeof(uint32_t):
edfb350c
CH
740 irec->ino_un.ex_data->counted_nlinks.un32 =
741 alloc_nlink_array(irec->nlink_size);
742 break;
743 default:
744 ASSERT(0);
745 }
2bd0ea18
NS
746}
747
748void
0f012a4c 749add_ino_ex_data(xfs_mount_t *mp)
2bd0ea18 750{
0f012a4c
BN
751 ino_tree_node_t *ino_rec;
752 xfs_agnumber_t i;
2bd0ea18
NS
753
754 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
755 ino_rec = findfirst_inode_rec(i);
756
757 while (ino_rec != NULL) {
0f012a4c 758 alloc_ex_data(ino_rec);
2bd0ea18
NS
759 ino_rec = next_ino_rec(ino_rec);
760 }
761 }
0f012a4c 762 full_ino_ex_data = 1;
2bd0ea18
NS
763}
764
ee6cd73e 765static uintptr_t
2bd0ea18
NS
766avl_ino_start(avlnode_t *node)
767{
ee6cd73e 768 return((uintptr_t) ((ino_tree_node_t *) node)->ino_startnum);
2bd0ea18
NS
769}
770
ee6cd73e 771static uintptr_t
2bd0ea18
NS
772avl_ino_end(avlnode_t *node)
773{
ee6cd73e 774 return((uintptr_t) (
2bd0ea18
NS
775 ((ino_tree_node_t *) node)->ino_startnum +
776 XFS_INODES_PER_CHUNK));
777}
778
00ff2b10 779static avlops_t avl_ino_tree_ops = {
2bd0ea18
NS
780 avl_ino_start,
781 avl_ino_end
782};
783
784void
785incore_ino_init(xfs_mount_t *mp)
786{
787 int i;
788 int agcount = mp->m_sb.sb_agcount;
789
790 if ((inode_tree_ptrs = malloc(agcount *
791 sizeof(avltree_desc_t *))) == NULL)
507f4e33 792 do_error(_("couldn't malloc inode tree descriptor table\n"));
2bd0ea18
NS
793 if ((inode_uncertain_tree_ptrs = malloc(agcount *
794 sizeof(avltree_desc_t *))) == NULL)
507f4e33
NS
795 do_error(
796 _("couldn't malloc uncertain ino tree descriptor table\n"));
2bd0ea18
NS
797
798 for (i = 0; i < agcount; i++) {
799 if ((inode_tree_ptrs[i] =
800 malloc(sizeof(avltree_desc_t))) == NULL)
507f4e33 801 do_error(_("couldn't malloc inode tree descriptor\n"));
2bd0ea18
NS
802 if ((inode_uncertain_tree_ptrs[i] =
803 malloc(sizeof(avltree_desc_t))) == NULL)
804 do_error(
507f4e33 805 _("couldn't malloc uncertain ino tree descriptor\n"));
2bd0ea18
NS
806 }
807 for (i = 0; i < agcount; i++) {
808 avl_init_tree(inode_tree_ptrs[i], &avl_ino_tree_ops);
809 avl_init_tree(inode_uncertain_tree_ptrs[i], &avl_ino_tree_ops);
810 }
811
2bd0ea18 812 if ((last_rec = malloc(sizeof(ino_tree_node_t *) * agcount)) == NULL)
507f4e33 813 do_error(_("couldn't malloc uncertain inode cache area\n"));
2bd0ea18 814
dab9b8d6 815 memset(last_rec, 0, sizeof(ino_tree_node_t *) * agcount);
2bd0ea18 816
0f012a4c 817 full_ino_ex_data = 0;
2bd0ea18 818}