]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore.h
These changes fall into three categores which are:
[thirdparty/xfsprogs-dev.git] / repair / incore.h
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 /*
20 * contains definition information. implementation (code)
21 * is spread out in separate files.
22 */
23
24 /*
25 * block allocation lists
26 */
27 typedef struct ba_rec {
28 void *addr;
29 struct ba_rec *next;
30 } ba_rec_t;
31
32 void record_allocation(ba_rec_t *addr, ba_rec_t *list);
33 void free_allocations(ba_rec_t *list);
34
35 /*
36 * block bit map defs -- track state of each filesystem block.
37 * ba_bmap is an array of bitstrings declared in the globals.h file.
38 * the bitstrings are broken up into 64-bit chunks. one bitstring per AG.
39 */
40 #define BA_BMAP_SIZE(x) (howmany(x, 4))
41
42 void set_bmap_rt(xfs_drfsbno_t numblocks);
43 void set_bmap_log(xfs_mount_t *mp);
44 void set_bmap_fs(xfs_mount_t *mp);
45 void teardown_bmap(xfs_mount_t *mp);
46
47 void teardown_rt_bmap(xfs_mount_t *mp);
48 void teardown_ag_bmap(xfs_mount_t *mp, xfs_agnumber_t agno);
49 void teardown_bmap_finish(xfs_mount_t *mp);
50
51 /* blocks are numbered from zero */
52
53 /* block records fit into __uint64_t's units */
54
55 #define XR_BB_UNIT 64 /* number of bits/unit */
56 #define XR_BB 4 /* bits per block record */
57 #define XR_BB_NUM (XR_BB_UNIT/XR_BB) /* number of records per unit */
58 #define XR_BB_MASK 0xF /* block record mask */
59
60 /*
61 * bitstring ops -- set/get block states, either in filesystem
62 * bno's or in agbno's. turns out that fsbno addressing is
63 * more convenient when dealing with bmap extracted addresses
64 * and agbno addressing is more convenient when dealing with
65 * meta-data extracted addresses. So the fsbno versions use
66 * mtype (which can be one of the block map types above) to
67 * set the correct block map while the agbno versions assume
68 * you want to use the regular block map.
69 */
70
71 #if defined(XR_BMAP_TRACE) || defined(XR_BMAP_DBG)
72 /*
73 * implemented as functions for debugging purposes
74 */
75 int get_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
76 xfs_agblock_t ag_blockno);
77 void set_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
78 xfs_agblock_t ag_blockno, int state);
79
80 int get_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno);
81 void set_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno, int state);
82 #else
83 /*
84 * implemented as macros for performance purposes
85 */
86
87 #define get_agbno_state(mp, agno, ag_blockno) \
88 ((int) (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) \
89 >> (((ag_blockno)%XR_BB_NUM)*XR_BB)) \
90 & XR_BB_MASK)
91 #define set_agbno_state(mp, agno, ag_blockno, state) \
92 *(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) = \
93 ((*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) & \
94 (~((__uint64_t) XR_BB_MASK << (((ag_blockno)%XR_BB_NUM)*XR_BB)))) | \
95 (((__uint64_t) (state)) << (((ag_blockno)%XR_BB_NUM)*XR_BB)))
96
97 #define get_fsbno_state(mp, blockno) \
98 get_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
99 XFS_FSB_TO_AGBNO(mp, (blockno)))
100 #define set_fsbno_state(mp, blockno, state) \
101 set_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
102 XFS_FSB_TO_AGBNO(mp, (blockno)), (state))
103
104
105 #define get_agbno_rec(mp, agno, ag_blockno) \
106 (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM))
107 #endif /* XR_BMAP_TRACE */
108
109 /*
110 * these work in real-time extents (e.g. fsbno == rt extent number)
111 */
112 #define get_rtbno_state(mp, fsbno) \
113 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) >> \
114 (((fsbno)%XR_BB_NUM)*XR_BB)) & XR_BB_MASK)
115 #define set_rtbno_state(mp, fsbno, state) \
116 *(rt_ba_bmap + (fsbno)/XR_BB_NUM) = \
117 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) & \
118 (~((__uint64_t) XR_BB_MASK << (((fsbno)%XR_BB_NUM)*XR_BB)))) | \
119 (((__uint64_t) (state)) << (((fsbno)%XR_BB_NUM)*XR_BB)))
120
121
122 /*
123 * extent tree definitions
124 * right now, there are 3 trees per AG, a bno tree, a bcnt tree
125 * and a tree for dup extents. If the code is modified in the
126 * future to use an extent tree instead of a bitmask for tracking
127 * fs blocks, then we could lose the dup extent tree if we labelled
128 * each extent with the inode that owned it.
129 */
130
131 typedef unsigned char extent_state_t;
132
133 typedef struct extent_tree_node {
134 avlnode_t avl_node;
135 xfs_agblock_t ex_startblock; /* starting block (agbno) */
136 xfs_extlen_t ex_blockcount; /* number of blocks in extent */
137 extent_state_t ex_state; /* see state flags below */
138
139 struct extent_tree_node *next; /* for bcnt extent lists */
140 #if 0
141 xfs_ino_t ex_inode; /* owner, NULL if free or */
142 /* multiply allocated */
143 #endif
144 } extent_tree_node_t;
145
146 typedef struct rt_extent_tree_node {
147 avlnode_t avl_node;
148 xfs_drtbno_t rt_startblock; /* starting realtime block */
149 xfs_extlen_t rt_blockcount; /* number of blocks in extent */
150 extent_state_t rt_state; /* see state flags below */
151
152 #if 0
153 xfs_ino_t ex_inode; /* owner, NULL if free or */
154 /* multiply allocated */
155 #endif
156 } rt_extent_tree_node_t;
157
158 /* extent states, prefix with XR_ to avoid conflict with buffer cache defines */
159
160 #define XR_E_UNKNOWN 0 /* unknown state */
161 #define XR_E_FREE1 1 /* free block (marked by one fs space tree) */
162 #define XR_E_FREE 2 /* free block (marked by both fs space trees) */
163 #define XR_E_INUSE 3 /* extent used by file/dir data or metadata */
164 #define XR_E_INUSE_FS 4 /* extent used by fs ag header or log */
165 #define XR_E_MULT 5 /* extent is multiply referenced */
166 #define XR_E_INO 6 /* extent used by inodes (inode blocks) */
167 #define XR_E_FS_MAP 7 /* extent used by fs space/inode maps */
168 #define XR_E_BAD_STATE 8
169
170 /* extent states, in 64 bit word chunks */
171 #define XR_E_UNKNOWN_LL 0x0000000000000000LL
172 #define XR_E_FREE1_LL 0x1111111111111111LL
173 #define XR_E_FREE_LL 0x2222222222222222LL
174 #define XR_E_INUSE_LL 0x3333333333333333LL
175 #define XR_E_INUSE_FS_LL 0x4444444444444444LL
176 #define XR_E_MULT_LL 0x5555555555555555LL
177 #define XR_E_INO_LL 0x6666666666666666LL
178 #define XR_E_FS_MAP_LL 0x7777777777777777LL
179
180 /* separate state bit, OR'ed into high (4th) bit of ex_state field */
181
182 #define XR_E_WRITTEN 0x8 /* extent has been written out, can't reclaim */
183 #define good_state(state) (((state) & (~XR_E_WRITTEN)) >= XR_E_UNKNOWN && \
184 ((state) & (~XR_E_WRITTEN) < XF_E_BAD_STATE))
185 #define written(state) ((state) & XR_E_WRITTEN)
186 #define set_written(state) (state) &= XR_E_WRITTEN
187
188 /*
189 * bno extent tree functions
190 */
191 void
192 add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
193 xfs_extlen_t blockcount);
194
195 extent_tree_node_t *
196 findfirst_bno_extent(xfs_agnumber_t agno);
197
198 extent_tree_node_t *
199 find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t agbno);
200
201 extent_tree_node_t *
202 findfirst_bno_extent(xfs_agnumber_t agno);
203
204 #define findnext_bno_extent(exent_ptr) \
205 ((extent_tree_node_t *) ((exent_ptr)->avl_node.avl_nextino))
206
207 void
208 get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext);
209
210 /*
211 * bcnt tree functions
212 */
213 void
214 add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
215 xfs_extlen_t blockcount);
216
217 extent_tree_node_t *
218 findfirst_bcnt_extent(xfs_agnumber_t agno);
219
220 extent_tree_node_t *
221 find_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t agbno);
222
223 extent_tree_node_t *
224 findbiggest_bcnt_extent(xfs_agnumber_t agno);
225
226 extent_tree_node_t *
227 findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext);
228
229 extent_tree_node_t *
230 get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
231 xfs_extlen_t blockcount);
232
233 /*
234 * duplicate extent tree functions
235 */
236 void add_dup_extent(xfs_agnumber_t agno,
237 xfs_agblock_t startblock,
238 xfs_extlen_t blockcount);
239
240 extern avltree_desc_t **extent_tree_ptrs;
241 /* ARGSUSED */
242 static __inline int
243 search_dup_extent(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_agblock_t agbno)
244 {
245 ASSERT(agno < glob_agcount);
246
247 if (avl_findrange(extent_tree_ptrs[agno], agbno) != NULL)
248 return(1);
249
250 return(0);
251 }
252
253 void add_rt_dup_extent(xfs_drtbno_t startblock,
254 xfs_extlen_t blockcount);
255
256 int search_rt_dup_extent(xfs_mount_t *mp,
257 xfs_drtbno_t bno);
258
259 /*
260 * extent/tree recyling and deletion routines
261 */
262
263 /*
264 * return an extent node to the extent node free list
265 */
266 void release_extent_tree_node(extent_tree_node_t *node);
267
268 /*
269 * recycle all the nodes in the per-AG tree
270 */
271 void release_dup_extent_tree(xfs_agnumber_t agno);
272 void release_agbno_extent_tree(xfs_agnumber_t agno);
273 void release_agbcnt_extent_tree(xfs_agnumber_t agno);
274
275 /*
276 * realtime duplicate extent tree - this one actually frees the memory
277 */
278 void free_rt_dup_extent_tree(xfs_mount_t *mp);
279
280 /*
281 * per-AG extent trees shutdown routine -- all (bno, bcnt and dup)
282 * at once. this one actually frees the memory instead of just recyling
283 * the nodes.
284 */
285 void incore_ext_teardown(xfs_mount_t *mp);
286
287 /*
288 * inode definitions
289 */
290
291 /* inode types */
292
293 #define XR_INO_UNKNOWN 0 /* unknown */
294 #define XR_INO_DIR 1 /* directory */
295 #define XR_INO_RTDATA 2 /* realtime file */
296 #define XR_INO_RTBITMAP 3 /* realtime bitmap inode */
297 #define XR_INO_RTSUM 4 /* realtime summary inode */
298 #define XR_INO_DATA 5 /* regular file */
299 #define XR_INO_SYMLINK 6 /* symlink */
300 #define XR_INO_CHRDEV 7 /* character device */
301 #define XR_INO_BLKDEV 8 /* block device */
302 #define XR_INO_SOCK 9 /* socket */
303 #define XR_INO_FIFO 10 /* fifo */
304 #define XR_INO_MOUNTPOINT 11 /* mountpoint */
305
306 /* inode allocation tree */
307
308 /*
309 * Inodes in the inode allocation trees are allocated in chunks.
310 * Those groups can be easily duplicated in our trees.
311 * Disconnected inodes are harder. We can do one of two
312 * things in that case: if we know the inode allocation btrees
313 * are good, then we can disallow directory references to unknown
314 * inode chunks. If the inode allocation trees have been trashed or
315 * we feel like being aggressive, then as we hit unknown inodes,
316 * we can search on the disk for all contiguous inodes and see if
317 * they fit into chunks. Before putting them into the inode tree,
318 * we can scan each inode starting at the earliest inode to see which
319 * ones are good. This protects us from the pathalogical case of
320 * inodes appearing in user-data. We still may have to mark the
321 * inodes as "possibly fake" so that if a file claims the blocks,
322 * we decide to believe the inodes, especially if they're not
323 * connected.
324 */
325
326 #define PLIST_CHUNK_SIZE 4
327
328 typedef xfs_ino_t parent_entry_t;
329
330 typedef struct parent_list {
331 __uint64_t pmask;
332 parent_entry_t *pentries;
333 #ifdef DEBUG
334 short cnt;
335 #endif
336 } parent_list_t;
337
338 typedef struct backptrs {
339 __uint64_t ino_reached; /* bit == 1 if reached */
340 __uint64_t ino_processed; /* reference checked bit mask */
341 __uint32_t nlinks[XFS_INODES_PER_CHUNK];
342 parent_list_t *parents;
343 } backptrs_t;
344
345 typedef struct ino_tree_node {
346 avlnode_t avl_node;
347 xfs_agino_t ino_startnum; /* starting inode # */
348 xfs_inofree_t ir_free; /* inode free bit mask */
349 __uint64_t ino_confirmed; /* confirmed bitmask */
350 __uint64_t ino_isa_dir; /* bit == 1 if a directory */
351 union {
352 backptrs_t *backptrs;
353 parent_list_t *plist;
354 } ino_un;
355 } ino_tree_node_t;
356
357 #define INOS_PER_IREC (sizeof(__uint64_t) * NBBY)
358 void add_ino_backptrs(xfs_mount_t *mp);
359
360 /*
361 * return an inode record to the free inode record pool
362 */
363 void free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec);
364
365 /*
366 * get pulls the inode record from the good inode tree
367 */
368 void get_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec);
369
370 extern avltree_desc_t **inode_tree_ptrs;
371 static __inline ino_tree_node_t *
372 findfirst_inode_rec(xfs_agnumber_t agno)
373 {
374 return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino);
375 }
376 static __inline ino_tree_node_t *
377 find_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
378 {
379 return((ino_tree_node_t *)
380 avl_findrange(inode_tree_ptrs[agno], ino));
381 }
382 void find_inode_rec_range(xfs_agnumber_t agno,
383 xfs_agino_t start_ino, xfs_agino_t end_ino,
384 ino_tree_node_t **first, ino_tree_node_t **last);
385
386 /*
387 * set inode states -- setting an inode to used or free also
388 * automatically marks it as "existing". Note -- all the inode
389 * add/set/get routines assume a valid inode number.
390 */
391 ino_tree_node_t *set_inode_used_alloc(xfs_agnumber_t agno, xfs_agino_t ino);
392 ino_tree_node_t *set_inode_free_alloc(xfs_agnumber_t agno, xfs_agino_t ino);
393
394 void print_inode_list(xfs_agnumber_t agno);
395 void print_uncertain_inode_list(xfs_agnumber_t agno);
396
397 /*
398 * separate trees for uncertain inodes (they may not exist).
399 */
400 ino_tree_node_t *findfirst_uncertain_inode_rec(xfs_agnumber_t agno);
401 ino_tree_node_t *find_uncertain_inode_rec(xfs_agnumber_t agno,
402 xfs_agino_t ino);
403 void add_inode_uncertain(xfs_mount_t *mp,
404 xfs_ino_t ino, int free);
405 void add_aginode_uncertain(xfs_agnumber_t agno,
406 xfs_agino_t agino, int free);
407 void get_uncertain_inode_rec(xfs_agnumber_t agno,
408 ino_tree_node_t *ino_rec);
409 void clear_uncertain_ino_cache(xfs_agnumber_t agno);
410
411 /*
412 * return next in-order inode tree node. takes an "ino_tree_node_t *"
413 */
414 #define next_ino_rec(ino_node_ptr) \
415 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_nextino))
416 /*
417 * return the next linked inode (forward avl tree link)-- meant to be used
418 * by linked list routines (uncertain inode routines/records)
419 */
420 #define next_link_rec(ino_node_ptr) \
421 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_forw))
422
423 /*
424 * Bit manipulations for processed field
425 */
426 #define XFS_INOPROC_MASK(i) ((__uint64_t)1 << (i))
427 #define XFS_INOPROC_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
428
429 #define XFS_INOPROC_IS_PROC(rp, i) \
430 (((rp)->ino_un.backptrs->ino_processed & XFS_INOPROC_MASK((i))) == 0LL \
431 ? 0 : 1)
432 #define XFS_INOPROC_SET_PROC(rp, i) \
433 ((rp)->ino_un.backptrs->ino_processed |= XFS_INOPROC_MASK((i)))
434 /*
435 #define XFS_INOPROC_CLR_PROC(rp, i) \
436 ((rp)->ino_un.backptrs->ino_processed &= ~XFS_INOPROC_MASK((i)))
437 */
438
439 /*
440 * same for ir_confirmed.
441 */
442 #define XFS_INOCF_MASK(i) ((__uint64_t)1 << (i))
443 #define XFS_INOCF_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
444
445 #define XFS_INOCF_IS_CF(rp, i) \
446 (((rp)->ino_confirmed & XFS_INOCF_MASK((i))) == 0LL \
447 ? 0 : 1)
448 #define XFS_INOCF_SET_CF(rp, i) \
449 ((rp)->ino_confirmed |= XFS_INOCF_MASK((i)))
450 #define XFS_INOCF_CLR_CF(rp, i) \
451 ((rp)->ino_confirmed &= ~XFS_INOCF_MASK((i)))
452
453 /*
454 * same for backptr->ino_reached
455 */
456 #define XFS_INO_RCHD_MASK(i) ((__uint64_t)1 << (i))
457
458 #define XFS_INO_RCHD_IS_RCHD(rp, i) \
459 (((rp)->ino_un.backptrs->ino_reached & XFS_INO_RCHD_MASK((i))) == 0LL \
460 ? 0 : 1)
461 #define XFS_INO_RCHD_SET_RCHD(rp, i) \
462 ((rp)->ino_un.backptrs->ino_reached |= XFS_INO_RCHD_MASK((i)))
463 #define XFS_INO_RCHD_CLR_RCHD(rp, i) \
464 ((rp)->ino_un.backptrs->ino_reached &= ~XFS_INO_RCHD_MASK((i)))
465 /*
466 * set/clear/test is inode a directory inode
467 */
468 #define XFS_INO_ISADIR_MASK(i) ((__uint64_t)1 << (i))
469
470 #define inode_isadir(ino_rec, ino_offset) \
471 (((ino_rec)->ino_isa_dir & XFS_INO_ISADIR_MASK((ino_offset))) == 0LL \
472 ? 0 : 1)
473 #define set_inode_isadir(ino_rec, ino_offset) \
474 ((ino_rec)->ino_isa_dir |= XFS_INO_ISADIR_MASK((ino_offset)))
475 #define clear_inode_isadir(ino_rec, ino_offset) \
476 ((ino_rec)->ino_isa_dir &= ~XFS_INO_ISADIR_MASK((ino_offset)))
477
478
479 /*
480 * set/clear/test is inode known to be valid (although perhaps corrupt)
481 */
482 #define clear_inode_confirmed(ino_rec, ino_offset) \
483 XFS_INOCF_CLR_CF((ino_rec), (ino_offset))
484
485 #define set_inode_confirmed(ino_rec, ino_offset) \
486 XFS_INOCF_SET_CF((ino_rec), (ino_offset))
487
488 #define is_inode_confirmed(ino_rec, ino_offset) \
489 XFS_INOCF_IS_CF(ino_rec, ino_offset)
490
491 /*
492 * set/clear/test is inode free or used
493 */
494 #define set_inode_free(ino_rec, ino_offset) \
495 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
496 XFS_INOBT_SET_FREE((ino_rec), (ino_offset))
497
498 #define set_inode_used(ino_rec, ino_offset) \
499 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
500 XFS_INOBT_CLR_FREE((ino_rec), (ino_offset))
501
502 #define is_inode_used(ino_rec, ino_offset) \
503 !XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
504
505 #define is_inode_free(ino_rec, ino_offset) \
506 XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
507
508 /*
509 * add_inode_reached() is set on inode I only if I has been reached
510 * by an inode P claiming to be the parent and if I is a directory,
511 * the .. link in the I says that P is I's parent.
512 *
513 * add_inode_ref() is called every time a link to an inode is
514 * detected and drop_inode_ref() is called every time a link to
515 * an inode that we've counted is removed.
516 */
517
518 static __inline int
519 is_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
520 {
521 ASSERT(ino_rec->ino_un.backptrs != NULL);
522 return(XFS_INO_RCHD_IS_RCHD(ino_rec, ino_offset));
523 }
524
525 static __inline void
526 add_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
527 {
528 ASSERT(ino_rec->ino_un.backptrs != NULL);
529
530 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
531 XFS_INO_RCHD_SET_RCHD(ino_rec, ino_offset);
532
533 ASSERT(is_inode_reached(ino_rec, ino_offset));
534 }
535
536 static __inline void
537 add_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
538 {
539 ASSERT(ino_rec->ino_un.backptrs != NULL);
540
541 ino_rec->ino_un.backptrs->nlinks[ino_offset]++;
542 }
543
544 static __inline void
545 drop_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
546 {
547 ASSERT(ino_rec->ino_un.backptrs != NULL);
548 ASSERT(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
549
550 if (--ino_rec->ino_un.backptrs->nlinks[ino_offset] == 0)
551 XFS_INO_RCHD_CLR_RCHD(ino_rec, ino_offset);
552 }
553
554 static __inline int
555 is_inode_referenced(ino_tree_node_t *ino_rec, int ino_offset)
556 {
557 ASSERT(ino_rec->ino_un.backptrs != NULL);
558 return(ino_rec->ino_un.backptrs->nlinks[ino_offset] > 0);
559 }
560
561 static __inline __uint32_t
562 num_inode_references(ino_tree_node_t *ino_rec, int ino_offset)
563 {
564 ASSERT(ino_rec->ino_un.backptrs != NULL);
565 return(ino_rec->ino_un.backptrs->nlinks[ino_offset]);
566 }
567
568 /*
569 * has an inode been processed for phase 6 (reference count checking)?
570 * add_inode_refchecked() is set on an inode when it gets traversed
571 * during the reference count phase (6). It's set so that if the inode
572 * is a directory, it's traversed (and it's links counted) only once.
573 */
574 #ifndef XR_INO_REF_DEBUG
575 #define add_inode_refchecked(ino, ino_rec, ino_offset) \
576 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset))
577 #define is_inode_refchecked(ino, ino_rec, ino_offset) \
578 (XFS_INOPROC_IS_PROC(ino_rec, ino_offset) == 0LL ? 0 : 1)
579 #else
580 void add_inode_refchecked(xfs_ino_t ino,
581 ino_tree_node_t *ino_rec, int ino_offset);
582 int is_inode_refchecked(xfs_ino_t ino,
583 ino_tree_node_t *ino_rec, int ino_offset);
584 #endif /* XR_INO_REF_DEBUG */
585
586 /*
587 * set/get inode number of parent -- works for directory inodes only
588 */
589 void set_inode_parent(ino_tree_node_t *irec, int ino_offset,
590 xfs_ino_t ino);
591 #if 0
592 void clear_inode_parent(ino_tree_node_t *irec, int offset);
593 #endif
594 xfs_ino_t get_inode_parent(ino_tree_node_t *irec, int ino_offset);
595
596 /*
597 * bmap cursor for tracking and fixing bmap btrees. All xfs btrees number
598 * the levels with 0 being the leaf and every level up being 1 greater.
599 */
600
601 #define XR_MAX_BMLEVELS 10 /* XXX - rcc need to verify number */
602
603 typedef struct bm_level_state {
604 xfs_dfsbno_t fsbno;
605 xfs_dfsbno_t left_fsbno;
606 xfs_dfsbno_t right_fsbno;
607 __uint64_t first_key;
608 __uint64_t last_key;
609 /*
610 int level;
611 __uint64_t prev_last_key;
612 xfs_buf_t *bp;
613 xfs_bmbt_block_t *block;
614 */
615 } bm_level_state_t;
616
617 typedef struct bm_cursor {
618 int num_levels;
619 xfs_ino_t ino;
620 xfs_dinode_t *dip;
621 bm_level_state_t level[XR_MAX_BMLEVELS];
622 } bmap_cursor_t;
623
624 void init_bm_cursor(bmap_cursor_t *cursor, int num_level);