2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #ifndef XFS_REPAIR_INCORE_H
20 #define XFS_REPAIR_INCORE_H
24 * contains definition information. implementation (code)
25 * is spread out in separate files.
29 * block allocation lists
31 typedef struct ba_rec
{
36 void record_allocation(ba_rec_t
*addr
, ba_rec_t
*list
);
37 void free_allocations(ba_rec_t
*list
);
40 * block bit map defs -- track state of each filesystem block.
41 * ba_bmap is an array of bitstrings declared in the globals.h file.
42 * the bitstrings are broken up into 64-bit chunks. one bitstring per AG.
44 #define BA_BMAP_SIZE(x) (howmany(x, 4))
46 void set_bmap_rt(xfs_drfsbno_t numblocks
);
47 void set_bmap_log(xfs_mount_t
*mp
);
48 void set_bmap_fs(xfs_mount_t
*mp
);
49 void teardown_bmap(xfs_mount_t
*mp
);
51 void teardown_rt_bmap(xfs_mount_t
*mp
);
52 void teardown_ag_bmap(xfs_mount_t
*mp
, xfs_agnumber_t agno
);
53 void teardown_bmap_finish(xfs_mount_t
*mp
);
55 /* blocks are numbered from zero */
57 /* block records fit into __uint64_t's units */
59 #define XR_BB_UNIT 64 /* number of bits/unit */
60 #define XR_BB 4 /* bits per block record */
61 #define XR_BB_NUM (XR_BB_UNIT/XR_BB) /* number of records per unit */
62 #define XR_BB_MASK 0xF /* block record mask */
65 * bitstring ops -- set/get block states, either in filesystem
66 * bno's or in agbno's. turns out that fsbno addressing is
67 * more convenient when dealing with bmap extracted addresses
68 * and agbno addressing is more convenient when dealing with
69 * meta-data extracted addresses. So the fsbno versions use
70 * mtype (which can be one of the block map types above) to
71 * set the correct block map while the agbno versions assume
72 * you want to use the regular block map.
75 #if defined(XR_BMAP_TRACE) || defined(XR_BMAP_DBG)
77 * implemented as functions for debugging purposes
79 int get_agbno_state(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
80 xfs_agblock_t ag_blockno
);
81 void set_agbno_state(xfs_mount_t
*mp
, xfs_agnumber_t agno
,
82 xfs_agblock_t ag_blockno
, int state
);
84 int get_fsbno_state(xfs_mount_t
*mp
, xfs_dfsbno_t blockno
);
85 void set_fsbno_state(xfs_mount_t
*mp
, xfs_dfsbno_t blockno
, int state
);
88 * implemented as macros for performance purposes
91 #define get_agbno_state(mp, agno, ag_blockno) \
92 ((int) (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) \
93 >> (((ag_blockno)%XR_BB_NUM)*XR_BB)) \
95 #define set_agbno_state(mp, agno, ag_blockno, state) \
96 *(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) = \
97 ((*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) & \
98 (~((__uint64_t) XR_BB_MASK << (((ag_blockno)%XR_BB_NUM)*XR_BB)))) | \
99 (((__uint64_t) (state)) << (((ag_blockno)%XR_BB_NUM)*XR_BB)))
101 #define get_fsbno_state(mp, blockno) \
102 get_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
103 XFS_FSB_TO_AGBNO(mp, (blockno)))
104 #define set_fsbno_state(mp, blockno, state) \
105 set_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
106 XFS_FSB_TO_AGBNO(mp, (blockno)), (state))
109 #define get_agbno_rec(mp, agno, ag_blockno) \
110 (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM))
111 #endif /* XR_BMAP_TRACE */
114 * these work in real-time extents (e.g. fsbno == rt extent number)
116 #define get_rtbno_state(mp, fsbno) \
117 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) >> \
118 (((fsbno)%XR_BB_NUM)*XR_BB)) & XR_BB_MASK)
119 #define set_rtbno_state(mp, fsbno, state) \
120 *(rt_ba_bmap + (fsbno)/XR_BB_NUM) = \
121 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) & \
122 (~((__uint64_t) XR_BB_MASK << (((fsbno)%XR_BB_NUM)*XR_BB)))) | \
123 (((__uint64_t) (state)) << (((fsbno)%XR_BB_NUM)*XR_BB)))
127 * extent tree definitions
128 * right now, there are 3 trees per AG, a bno tree, a bcnt tree
129 * and a tree for dup extents. If the code is modified in the
130 * future to use an extent tree instead of a bitmask for tracking
131 * fs blocks, then we could lose the dup extent tree if we labelled
132 * each extent with the inode that owned it.
135 typedef unsigned char extent_state_t
;
137 typedef struct extent_tree_node
{
139 xfs_agblock_t ex_startblock
; /* starting block (agbno) */
140 xfs_extlen_t ex_blockcount
; /* number of blocks in extent */
141 extent_state_t ex_state
; /* see state flags below */
143 struct extent_tree_node
*next
; /* for bcnt extent lists */
144 struct extent_tree_node
*last
; /* for bcnt extent list anchors */
146 xfs_ino_t ex_inode
; /* owner, NULL if free or */
147 /* multiply allocated */
149 } extent_tree_node_t
;
151 typedef struct rt_extent_tree_node
{
153 xfs_drtbno_t rt_startblock
; /* starting realtime block */
154 xfs_extlen_t rt_blockcount
; /* number of blocks in extent */
155 extent_state_t rt_state
; /* see state flags below */
158 xfs_ino_t ex_inode
; /* owner, NULL if free or */
159 /* multiply allocated */
161 } rt_extent_tree_node_t
;
163 /* extent states, prefix with XR_ to avoid conflict with buffer cache defines */
165 #define XR_E_UNKNOWN 0 /* unknown state */
166 #define XR_E_FREE1 1 /* free block (marked by one fs space tree) */
167 #define XR_E_FREE 2 /* free block (marked by both fs space trees) */
168 #define XR_E_INUSE 3 /* extent used by file/dir data or metadata */
169 #define XR_E_INUSE_FS 4 /* extent used by fs ag header or log */
170 #define XR_E_MULT 5 /* extent is multiply referenced */
171 #define XR_E_INO 6 /* extent used by inodes (inode blocks) */
172 #define XR_E_FS_MAP 7 /* extent used by fs space/inode maps */
173 #define XR_E_BAD_STATE 8
175 /* extent states, in 64 bit word chunks */
176 #define XR_E_UNKNOWN_LL 0x0000000000000000LL
177 #define XR_E_FREE1_LL 0x1111111111111111LL
178 #define XR_E_FREE_LL 0x2222222222222222LL
179 #define XR_E_INUSE_LL 0x3333333333333333LL
180 #define XR_E_INUSE_FS_LL 0x4444444444444444LL
181 #define XR_E_MULT_LL 0x5555555555555555LL
182 #define XR_E_INO_LL 0x6666666666666666LL
183 #define XR_E_FS_MAP_LL 0x7777777777777777LL
185 /* separate state bit, OR'ed into high (4th) bit of ex_state field */
187 #define XR_E_WRITTEN 0x8 /* extent has been written out, can't reclaim */
188 #define good_state(state) (((state) & (~XR_E_WRITTEN)) >= XR_E_UNKNOWN && \
189 ((state) & (~XR_E_WRITTEN) < XF_E_BAD_STATE))
190 #define written(state) ((state) & XR_E_WRITTEN)
191 #define set_written(state) (state) &= XR_E_WRITTEN
194 * bno extent tree functions
197 add_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
198 xfs_extlen_t blockcount
);
201 findfirst_bno_extent(xfs_agnumber_t agno
);
204 find_bno_extent(xfs_agnumber_t agno
, xfs_agblock_t agbno
);
207 findfirst_bno_extent(xfs_agnumber_t agno
);
209 #define findnext_bno_extent(exent_ptr) \
210 ((extent_tree_node_t *) ((exent_ptr)->avl_node.avl_nextino))
213 get_bno_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
);
216 * bcnt tree functions
219 add_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
220 xfs_extlen_t blockcount
);
223 findfirst_bcnt_extent(xfs_agnumber_t agno
);
226 find_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t agbno
);
229 findbiggest_bcnt_extent(xfs_agnumber_t agno
);
232 findnext_bcnt_extent(xfs_agnumber_t agno
, extent_tree_node_t
*ext
);
235 get_bcnt_extent(xfs_agnumber_t agno
, xfs_agblock_t startblock
,
236 xfs_extlen_t blockcount
);
239 * duplicate extent tree functions
241 void add_dup_extent(xfs_agnumber_t agno
,
242 xfs_agblock_t startblock
,
243 xfs_extlen_t blockcount
);
245 extern avltree_desc_t
**extent_tree_ptrs
;
248 search_dup_extent(xfs_mount_t
*mp
, xfs_agnumber_t agno
, xfs_agblock_t agbno
)
250 ASSERT(agno
< glob_agcount
);
252 if (avl_findrange(extent_tree_ptrs
[agno
], agbno
) != NULL
)
258 void add_rt_dup_extent(xfs_drtbno_t startblock
,
259 xfs_extlen_t blockcount
);
261 int search_rt_dup_extent(xfs_mount_t
*mp
,
265 * extent/tree recyling and deletion routines
269 * return an extent node to the extent node free list
271 void release_extent_tree_node(extent_tree_node_t
*node
);
274 * recycle all the nodes in the per-AG tree
276 void release_dup_extent_tree(xfs_agnumber_t agno
);
277 void release_agbno_extent_tree(xfs_agnumber_t agno
);
278 void release_agbcnt_extent_tree(xfs_agnumber_t agno
);
281 * realtime duplicate extent tree - this one actually frees the memory
283 void free_rt_dup_extent_tree(xfs_mount_t
*mp
);
286 * per-AG extent trees shutdown routine -- all (bno, bcnt and dup)
287 * at once. this one actually frees the memory instead of just recyling
290 void incore_ext_teardown(xfs_mount_t
*mp
);
298 #define XR_INO_UNKNOWN 0 /* unknown */
299 #define XR_INO_DIR 1 /* directory */
300 #define XR_INO_RTDATA 2 /* realtime file */
301 #define XR_INO_RTBITMAP 3 /* realtime bitmap inode */
302 #define XR_INO_RTSUM 4 /* realtime summary inode */
303 #define XR_INO_DATA 5 /* regular file */
304 #define XR_INO_SYMLINK 6 /* symlink */
305 #define XR_INO_CHRDEV 7 /* character device */
306 #define XR_INO_BLKDEV 8 /* block device */
307 #define XR_INO_SOCK 9 /* socket */
308 #define XR_INO_FIFO 10 /* fifo */
309 #define XR_INO_MOUNTPOINT 11 /* mountpoint */
311 /* inode allocation tree */
314 * Inodes in the inode allocation trees are allocated in chunks.
315 * Those groups can be easily duplicated in our trees.
316 * Disconnected inodes are harder. We can do one of two
317 * things in that case: if we know the inode allocation btrees
318 * are good, then we can disallow directory references to unknown
319 * inode chunks. If the inode allocation trees have been trashed or
320 * we feel like being aggressive, then as we hit unknown inodes,
321 * we can search on the disk for all contiguous inodes and see if
322 * they fit into chunks. Before putting them into the inode tree,
323 * we can scan each inode starting at the earliest inode to see which
324 * ones are good. This protects us from the pathalogical case of
325 * inodes appearing in user-data. We still may have to mark the
326 * inodes as "possibly fake" so that if a file claims the blocks,
327 * we decide to believe the inodes, especially if they're not
331 #define PLIST_CHUNK_SIZE 4
333 typedef xfs_ino_t parent_entry_t
;
337 typedef struct parent_list
{
339 parent_entry_t
*pentries
;
345 typedef struct ino_ex_data
{
346 __uint64_t ino_reached
; /* bit == 1 if reached */
347 __uint64_t ino_processed
; /* reference checked bit mask */
348 parent_list_t
*parents
;
349 __uint8_t
*counted_nlinks
;/* counted nlinks in P6 */
352 typedef struct ino_tree_node
{
354 xfs_agino_t ino_startnum
; /* starting inode # */
355 xfs_inofree_t ir_free
; /* inode free bit mask */
356 __uint64_t ino_confirmed
; /* confirmed bitmask */
357 __uint64_t ino_isa_dir
; /* bit == 1 if a directory */
358 struct nlink_ops
*nlinkops
; /* pointer to current nlink ops */
359 __uint8_t
*disk_nlinks
; /* on-disk nlinks, set in P3 */
361 ino_ex_data_t
*ex_data
; /* phases 6,7 */
362 parent_list_t
*plist
; /* phases 2-5 */
366 typedef struct nlink_ops
{
367 const int nlink_size
;
368 void (*disk_nlink_set
)(ino_tree_node_t
*, int, __uint32_t
);
369 __uint32_t (*disk_nlink_get
)(ino_tree_node_t
*, int);
370 __uint32_t (*counted_nlink_get
)(ino_tree_node_t
*, int);
371 __uint32_t (*counted_nlink_inc
)(ino_tree_node_t
*, int);
372 __uint32_t (*counted_nlink_dec
)(ino_tree_node_t
*, int);
376 #define INOS_PER_IREC (sizeof(__uint64_t) * NBBY)
377 void add_ino_ex_data(xfs_mount_t
*mp
);
380 * return an inode record to the free inode record pool
382 void free_inode_rec(xfs_agnumber_t agno
, ino_tree_node_t
*ino_rec
);
385 * get pulls the inode record from the good inode tree
387 void get_inode_rec(xfs_agnumber_t agno
, ino_tree_node_t
*ino_rec
);
389 extern avltree_desc_t
**inode_tree_ptrs
;
390 static inline ino_tree_node_t
*
391 findfirst_inode_rec(xfs_agnumber_t agno
)
393 return((ino_tree_node_t
*) inode_tree_ptrs
[agno
]->avl_firstino
);
395 static inline ino_tree_node_t
*
396 find_inode_rec(xfs_agnumber_t agno
, xfs_agino_t ino
)
398 return((ino_tree_node_t
*)
399 avl_findrange(inode_tree_ptrs
[agno
], ino
));
401 void find_inode_rec_range(xfs_agnumber_t agno
,
402 xfs_agino_t start_ino
, xfs_agino_t end_ino
,
403 ino_tree_node_t
**first
, ino_tree_node_t
**last
);
406 * set inode states -- setting an inode to used or free also
407 * automatically marks it as "existing". Note -- all the inode
408 * add/set/get routines assume a valid inode number.
410 ino_tree_node_t
*set_inode_used_alloc(xfs_agnumber_t agno
, xfs_agino_t ino
);
411 ino_tree_node_t
*set_inode_free_alloc(xfs_agnumber_t agno
, xfs_agino_t ino
);
413 void print_inode_list(xfs_agnumber_t agno
);
414 void print_uncertain_inode_list(xfs_agnumber_t agno
);
417 * separate trees for uncertain inodes (they may not exist).
419 ino_tree_node_t
*findfirst_uncertain_inode_rec(xfs_agnumber_t agno
);
420 ino_tree_node_t
*find_uncertain_inode_rec(xfs_agnumber_t agno
,
422 void add_inode_uncertain(xfs_mount_t
*mp
,
423 xfs_ino_t ino
, int free
);
424 void add_aginode_uncertain(xfs_agnumber_t agno
,
425 xfs_agino_t agino
, int free
);
426 void get_uncertain_inode_rec(xfs_agnumber_t agno
,
427 ino_tree_node_t
*ino_rec
);
428 void clear_uncertain_ino_cache(xfs_agnumber_t agno
);
431 * return next in-order inode tree node. takes an "ino_tree_node_t *"
433 #define next_ino_rec(ino_node_ptr) \
434 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_nextino))
436 * return the next linked inode (forward avl tree link)-- meant to be used
437 * by linked list routines (uncertain inode routines/records)
439 #define next_link_rec(ino_node_ptr) \
440 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_forw))
443 * Bit manipulations for processed field
445 #define XFS_INOPROC_MASK(i) ((__uint64_t)1 << (i))
446 #define XFS_INOPROC_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
448 #define XFS_INOPROC_IS_PROC(rp, i) \
449 (((rp)->ino_un.ex_data->ino_processed & XFS_INOPROC_MASK((i))) == 0LL \
451 #define XFS_INOPROC_SET_PROC(rp, i) \
452 ((rp)->ino_un.ex_data->ino_processed |= XFS_INOPROC_MASK((i)))
454 #define XFS_INOPROC_CLR_PROC(rp, i) \
455 ((rp)->ino_un.ex_data->ino_processed &= ~XFS_INOPROC_MASK((i)))
459 * same for ir_confirmed.
461 #define XFS_INOCF_MASK(i) ((__uint64_t)1 << (i))
462 #define XFS_INOCF_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
464 #define XFS_INOCF_IS_CF(rp, i) \
465 (((rp)->ino_confirmed & XFS_INOCF_MASK((i))) == 0LL \
467 #define XFS_INOCF_SET_CF(rp, i) \
468 ((rp)->ino_confirmed |= XFS_INOCF_MASK((i)))
469 #define XFS_INOCF_CLR_CF(rp, i) \
470 ((rp)->ino_confirmed &= ~XFS_INOCF_MASK((i)))
473 * same for backptr->ino_reached
475 #define XFS_INO_RCHD_MASK(i) ((__uint64_t)1 << (i))
477 #define XFS_INO_RCHD_IS_RCHD(rp, i) \
478 (((rp)->ino_un.ex_data->ino_reached & XFS_INO_RCHD_MASK((i))) == 0LL \
480 #define XFS_INO_RCHD_SET_RCHD(rp, i) \
481 ((rp)->ino_un.ex_data->ino_reached |= XFS_INO_RCHD_MASK((i)))
482 #define XFS_INO_RCHD_CLR_RCHD(rp, i) \
483 ((rp)->ino_un.ex_data->ino_reached &= ~XFS_INO_RCHD_MASK((i)))
485 * set/clear/test is inode a directory inode
487 #define XFS_INO_ISADIR_MASK(i) ((__uint64_t)1 << (i))
489 #define inode_isadir(ino_rec, ino_offset) \
490 (((ino_rec)->ino_isa_dir & XFS_INO_ISADIR_MASK((ino_offset))) == 0LL \
492 #define set_inode_isadir(ino_rec, ino_offset) \
493 ((ino_rec)->ino_isa_dir |= XFS_INO_ISADIR_MASK((ino_offset)))
494 #define clear_inode_isadir(ino_rec, ino_offset) \
495 ((ino_rec)->ino_isa_dir &= ~XFS_INO_ISADIR_MASK((ino_offset)))
499 * set/clear/test is inode known to be valid (although perhaps corrupt)
501 #define clear_inode_confirmed(ino_rec, ino_offset) \
502 XFS_INOCF_CLR_CF((ino_rec), (ino_offset))
504 #define set_inode_confirmed(ino_rec, ino_offset) \
505 XFS_INOCF_SET_CF((ino_rec), (ino_offset))
507 #define is_inode_confirmed(ino_rec, ino_offset) \
508 XFS_INOCF_IS_CF(ino_rec, ino_offset)
511 * set/clear/test is inode free or used
513 #define set_inode_free(ino_rec, ino_offset) \
514 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
515 XFS_INOBT_SET_FREE((ino_rec), (ino_offset))
517 #define set_inode_used(ino_rec, ino_offset) \
518 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
519 XFS_INOBT_CLR_FREE((ino_rec), (ino_offset))
521 #define is_inode_used(ino_rec, ino_offset) \
522 !XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
524 #define is_inode_free(ino_rec, ino_offset) \
525 XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
528 * add_inode_reached() is set on inode I only if I has been reached
529 * by an inode P claiming to be the parent and if I is a directory,
530 * the .. link in the I says that P is I's parent.
532 * add_inode_ref() is called every time a link to an inode is
533 * detected and drop_inode_ref() is called every time a link to
534 * an inode that we've counted is removed.
538 is_inode_reached(ino_tree_node_t
*ino_rec
, int ino_offset
)
540 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
541 return(XFS_INO_RCHD_IS_RCHD(ino_rec
, ino_offset
));
545 add_inode_reached(ino_tree_node_t
*ino_rec
, int ino_offset
)
547 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
549 (*ino_rec
->nlinkops
->counted_nlink_inc
)(ino_rec
, ino_offset
);
550 XFS_INO_RCHD_SET_RCHD(ino_rec
, ino_offset
);
552 ASSERT(is_inode_reached(ino_rec
, ino_offset
));
556 add_inode_ref(ino_tree_node_t
*ino_rec
, int ino_offset
)
558 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
560 (*ino_rec
->nlinkops
->counted_nlink_inc
)(ino_rec
, ino_offset
);
564 drop_inode_ref(ino_tree_node_t
*ino_rec
, int ino_offset
)
566 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
568 if ((*ino_rec
->nlinkops
->counted_nlink_dec
)(ino_rec
, ino_offset
) == 0)
569 XFS_INO_RCHD_CLR_RCHD(ino_rec
, ino_offset
);
573 is_inode_referenced(ino_tree_node_t
*ino_rec
, int ino_offset
)
575 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
577 return (*ino_rec
->nlinkops
->counted_nlink_get
)(ino_rec
, ino_offset
) > 0;
580 static inline __uint32_t
581 num_inode_references(ino_tree_node_t
*ino_rec
, int ino_offset
)
583 ASSERT(ino_rec
->ino_un
.ex_data
!= NULL
);
585 return (*ino_rec
->nlinkops
->counted_nlink_get
)(ino_rec
, ino_offset
);
589 set_inode_disk_nlinks(ino_tree_node_t
*ino_rec
, int ino_offset
, __uint32_t nlinks
)
591 (*ino_rec
->nlinkops
->disk_nlink_set
)(ino_rec
, ino_offset
, nlinks
);
594 static inline __uint32_t
595 get_inode_disk_nlinks(ino_tree_node_t
*ino_rec
, int ino_offset
)
597 return (*ino_rec
->nlinkops
->disk_nlink_get
)(ino_rec
, ino_offset
);
601 * has an inode been processed for phase 6 (reference count checking)?
602 * add_inode_refchecked() is set on an inode when it gets traversed
603 * during the reference count phase (6). It's set so that if the inode
604 * is a directory, it's traversed (and it's links counted) only once.
606 #ifndef XR_INO_REF_DEBUG
607 #define add_inode_refchecked(ino, ino_rec, ino_offset) \
608 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset))
609 #define is_inode_refchecked(ino, ino_rec, ino_offset) \
610 (XFS_INOPROC_IS_PROC(ino_rec, ino_offset) != 0LL)
612 void add_inode_refchecked(xfs_ino_t ino
,
613 ino_tree_node_t
*ino_rec
, int ino_offset
);
614 int is_inode_refchecked(xfs_ino_t ino
,
615 ino_tree_node_t
*ino_rec
, int ino_offset
);
616 #endif /* XR_INO_REF_DEBUG */
619 * set/get inode number of parent -- works for directory inodes only
621 void set_inode_parent(ino_tree_node_t
*irec
, int ino_offset
,
623 xfs_ino_t
get_inode_parent(ino_tree_node_t
*irec
, int ino_offset
);
626 * bmap cursor for tracking and fixing bmap btrees. All xfs btrees number
627 * the levels with 0 being the leaf and every level up being 1 greater.
630 #define XR_MAX_BMLEVELS 10 /* XXX - rcc need to verify number */
632 typedef struct bm_level_state
{
634 xfs_dfsbno_t left_fsbno
;
635 xfs_dfsbno_t right_fsbno
;
636 __uint64_t first_key
;
640 __uint64_t prev_last_key;
642 xfs_bmbt_block_t *block;
646 typedef struct bm_cursor
{
650 bm_level_state_t level
[XR_MAX_BMLEVELS
];
653 void init_bm_cursor(bmap_cursor_t
*cursor
, int num_level
);
655 #endif /* XFS_REPAIR_INCORE_H */