]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/incore.h
Revert "Merge branch 'xfsprogs-dev'"
[thirdparty/xfsprogs-dev.git] / repair / incore.h
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #ifndef XFS_REPAIR_INCORE_H
20 #define XFS_REPAIR_INCORE_H
21
22 #include "avl.h"
23 /*
24 * contains definition information. implementation (code)
25 * is spread out in separate files.
26 */
27
28 /*
29 * block allocation lists
30 */
31 typedef struct ba_rec {
32 void *addr;
33 struct ba_rec *next;
34 } ba_rec_t;
35
36 void record_allocation(ba_rec_t *addr, ba_rec_t *list);
37 void free_allocations(ba_rec_t *list);
38
39 /*
40 * block bit map defs -- track state of each filesystem block.
41 * ba_bmap is an array of bitstrings declared in the globals.h file.
42 * the bitstrings are broken up into 64-bit chunks. one bitstring per AG.
43 */
44 #define BA_BMAP_SIZE(x) (howmany(x, 4))
45
46 void set_bmap_rt(xfs_drfsbno_t numblocks);
47 void set_bmap_log(xfs_mount_t *mp);
48 void set_bmap_fs(xfs_mount_t *mp);
49 void teardown_bmap(xfs_mount_t *mp);
50
51 void teardown_rt_bmap(xfs_mount_t *mp);
52 void teardown_ag_bmap(xfs_mount_t *mp, xfs_agnumber_t agno);
53 void teardown_bmap_finish(xfs_mount_t *mp);
54
55 /* blocks are numbered from zero */
56
57 /* block records fit into __uint64_t's units */
58
59 #define XR_BB_UNIT 64 /* number of bits/unit */
60 #define XR_BB 4 /* bits per block record */
61 #define XR_BB_NUM (XR_BB_UNIT/XR_BB) /* number of records per unit */
62 #define XR_BB_MASK 0xF /* block record mask */
63
64 /*
65 * bitstring ops -- set/get block states, either in filesystem
66 * bno's or in agbno's. turns out that fsbno addressing is
67 * more convenient when dealing with bmap extracted addresses
68 * and agbno addressing is more convenient when dealing with
69 * meta-data extracted addresses. So the fsbno versions use
70 * mtype (which can be one of the block map types above) to
71 * set the correct block map while the agbno versions assume
72 * you want to use the regular block map.
73 */
74
75 #if defined(XR_BMAP_TRACE) || defined(XR_BMAP_DBG)
76 /*
77 * implemented as functions for debugging purposes
78 */
79 int get_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
80 xfs_agblock_t ag_blockno);
81 void set_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
82 xfs_agblock_t ag_blockno, int state);
83
84 int get_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno);
85 void set_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno, int state);
86 #else
87 /*
88 * implemented as macros for performance purposes
89 */
90
91 #define get_agbno_state(mp, agno, ag_blockno) \
92 ((int) (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) \
93 >> (((ag_blockno)%XR_BB_NUM)*XR_BB)) \
94 & XR_BB_MASK)
95 #define set_agbno_state(mp, agno, ag_blockno, state) \
96 *(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) = \
97 ((*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) & \
98 (~((__uint64_t) XR_BB_MASK << (((ag_blockno)%XR_BB_NUM)*XR_BB)))) | \
99 (((__uint64_t) (state)) << (((ag_blockno)%XR_BB_NUM)*XR_BB)))
100
101 #define get_fsbno_state(mp, blockno) \
102 get_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
103 XFS_FSB_TO_AGBNO(mp, (blockno)))
104 #define set_fsbno_state(mp, blockno, state) \
105 set_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
106 XFS_FSB_TO_AGBNO(mp, (blockno)), (state))
107
108
109 #define get_agbno_rec(mp, agno, ag_blockno) \
110 (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM))
111 #endif /* XR_BMAP_TRACE */
112
113 /*
114 * these work in real-time extents (e.g. fsbno == rt extent number)
115 */
116 #define get_rtbno_state(mp, fsbno) \
117 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) >> \
118 (((fsbno)%XR_BB_NUM)*XR_BB)) & XR_BB_MASK)
119 #define set_rtbno_state(mp, fsbno, state) \
120 *(rt_ba_bmap + (fsbno)/XR_BB_NUM) = \
121 ((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) & \
122 (~((__uint64_t) XR_BB_MASK << (((fsbno)%XR_BB_NUM)*XR_BB)))) | \
123 (((__uint64_t) (state)) << (((fsbno)%XR_BB_NUM)*XR_BB)))
124
125
126 /*
127 * extent tree definitions
128 * right now, there are 3 trees per AG, a bno tree, a bcnt tree
129 * and a tree for dup extents. If the code is modified in the
130 * future to use an extent tree instead of a bitmask for tracking
131 * fs blocks, then we could lose the dup extent tree if we labelled
132 * each extent with the inode that owned it.
133 */
134
135 typedef unsigned char extent_state_t;
136
137 typedef struct extent_tree_node {
138 avlnode_t avl_node;
139 xfs_agblock_t ex_startblock; /* starting block (agbno) */
140 xfs_extlen_t ex_blockcount; /* number of blocks in extent */
141 extent_state_t ex_state; /* see state flags below */
142
143 struct extent_tree_node *next; /* for bcnt extent lists */
144 struct extent_tree_node *last; /* for bcnt extent list anchors */
145 #if 0
146 xfs_ino_t ex_inode; /* owner, NULL if free or */
147 /* multiply allocated */
148 #endif
149 } extent_tree_node_t;
150
151 typedef struct rt_extent_tree_node {
152 avlnode_t avl_node;
153 xfs_drtbno_t rt_startblock; /* starting realtime block */
154 xfs_extlen_t rt_blockcount; /* number of blocks in extent */
155 extent_state_t rt_state; /* see state flags below */
156
157 #if 0
158 xfs_ino_t ex_inode; /* owner, NULL if free or */
159 /* multiply allocated */
160 #endif
161 } rt_extent_tree_node_t;
162
163 /* extent states, prefix with XR_ to avoid conflict with buffer cache defines */
164
165 #define XR_E_UNKNOWN 0 /* unknown state */
166 #define XR_E_FREE1 1 /* free block (marked by one fs space tree) */
167 #define XR_E_FREE 2 /* free block (marked by both fs space trees) */
168 #define XR_E_INUSE 3 /* extent used by file/dir data or metadata */
169 #define XR_E_INUSE_FS 4 /* extent used by fs ag header or log */
170 #define XR_E_MULT 5 /* extent is multiply referenced */
171 #define XR_E_INO 6 /* extent used by inodes (inode blocks) */
172 #define XR_E_FS_MAP 7 /* extent used by fs space/inode maps */
173 #define XR_E_BAD_STATE 8
174
175 /* extent states, in 64 bit word chunks */
176 #define XR_E_UNKNOWN_LL 0x0000000000000000LL
177 #define XR_E_FREE1_LL 0x1111111111111111LL
178 #define XR_E_FREE_LL 0x2222222222222222LL
179 #define XR_E_INUSE_LL 0x3333333333333333LL
180 #define XR_E_INUSE_FS_LL 0x4444444444444444LL
181 #define XR_E_MULT_LL 0x5555555555555555LL
182 #define XR_E_INO_LL 0x6666666666666666LL
183 #define XR_E_FS_MAP_LL 0x7777777777777777LL
184
185 /* separate state bit, OR'ed into high (4th) bit of ex_state field */
186
187 #define XR_E_WRITTEN 0x8 /* extent has been written out, can't reclaim */
188 #define good_state(state) (((state) & (~XR_E_WRITTEN)) >= XR_E_UNKNOWN && \
189 ((state) & (~XR_E_WRITTEN) < XF_E_BAD_STATE))
190 #define written(state) ((state) & XR_E_WRITTEN)
191 #define set_written(state) (state) &= XR_E_WRITTEN
192
193 /*
194 * bno extent tree functions
195 */
196 void
197 add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
198 xfs_extlen_t blockcount);
199
200 extent_tree_node_t *
201 findfirst_bno_extent(xfs_agnumber_t agno);
202
203 extent_tree_node_t *
204 find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t agbno);
205
206 extent_tree_node_t *
207 findfirst_bno_extent(xfs_agnumber_t agno);
208
209 #define findnext_bno_extent(exent_ptr) \
210 ((extent_tree_node_t *) ((exent_ptr)->avl_node.avl_nextino))
211
212 void
213 get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext);
214
215 /*
216 * bcnt tree functions
217 */
218 void
219 add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
220 xfs_extlen_t blockcount);
221
222 extent_tree_node_t *
223 findfirst_bcnt_extent(xfs_agnumber_t agno);
224
225 extent_tree_node_t *
226 find_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t agbno);
227
228 extent_tree_node_t *
229 findbiggest_bcnt_extent(xfs_agnumber_t agno);
230
231 extent_tree_node_t *
232 findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext);
233
234 extent_tree_node_t *
235 get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock,
236 xfs_extlen_t blockcount);
237
238 /*
239 * duplicate extent tree functions
240 */
241 void add_dup_extent(xfs_agnumber_t agno,
242 xfs_agblock_t startblock,
243 xfs_extlen_t blockcount);
244
245 extern avltree_desc_t **extent_tree_ptrs;
246 /* ARGSUSED */
247 static inline int
248 search_dup_extent(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_agblock_t agbno)
249 {
250 ASSERT(agno < glob_agcount);
251
252 if (avl_findrange(extent_tree_ptrs[agno], agbno) != NULL)
253 return(1);
254
255 return(0);
256 }
257
258 void add_rt_dup_extent(xfs_drtbno_t startblock,
259 xfs_extlen_t blockcount);
260
261 int search_rt_dup_extent(xfs_mount_t *mp,
262 xfs_drtbno_t bno);
263
264 /*
265 * extent/tree recyling and deletion routines
266 */
267
268 /*
269 * return an extent node to the extent node free list
270 */
271 void release_extent_tree_node(extent_tree_node_t *node);
272
273 /*
274 * recycle all the nodes in the per-AG tree
275 */
276 void release_dup_extent_tree(xfs_agnumber_t agno);
277 void release_agbno_extent_tree(xfs_agnumber_t agno);
278 void release_agbcnt_extent_tree(xfs_agnumber_t agno);
279
280 /*
281 * realtime duplicate extent tree - this one actually frees the memory
282 */
283 void free_rt_dup_extent_tree(xfs_mount_t *mp);
284
285 /*
286 * per-AG extent trees shutdown routine -- all (bno, bcnt and dup)
287 * at once. this one actually frees the memory instead of just recyling
288 * the nodes.
289 */
290 void incore_ext_teardown(xfs_mount_t *mp);
291
292 /*
293 * inode definitions
294 */
295
296 /* inode types */
297
298 #define XR_INO_UNKNOWN 0 /* unknown */
299 #define XR_INO_DIR 1 /* directory */
300 #define XR_INO_RTDATA 2 /* realtime file */
301 #define XR_INO_RTBITMAP 3 /* realtime bitmap inode */
302 #define XR_INO_RTSUM 4 /* realtime summary inode */
303 #define XR_INO_DATA 5 /* regular file */
304 #define XR_INO_SYMLINK 6 /* symlink */
305 #define XR_INO_CHRDEV 7 /* character device */
306 #define XR_INO_BLKDEV 8 /* block device */
307 #define XR_INO_SOCK 9 /* socket */
308 #define XR_INO_FIFO 10 /* fifo */
309 #define XR_INO_MOUNTPOINT 11 /* mountpoint */
310
311 /* inode allocation tree */
312
313 /*
314 * Inodes in the inode allocation trees are allocated in chunks.
315 * Those groups can be easily duplicated in our trees.
316 * Disconnected inodes are harder. We can do one of two
317 * things in that case: if we know the inode allocation btrees
318 * are good, then we can disallow directory references to unknown
319 * inode chunks. If the inode allocation trees have been trashed or
320 * we feel like being aggressive, then as we hit unknown inodes,
321 * we can search on the disk for all contiguous inodes and see if
322 * they fit into chunks. Before putting them into the inode tree,
323 * we can scan each inode starting at the earliest inode to see which
324 * ones are good. This protects us from the pathalogical case of
325 * inodes appearing in user-data. We still may have to mark the
326 * inodes as "possibly fake" so that if a file claims the blocks,
327 * we decide to believe the inodes, especially if they're not
328 * connected.
329 */
330
331 #define PLIST_CHUNK_SIZE 4
332
333 typedef xfs_ino_t parent_entry_t;
334
335 struct nlink_ops;
336
337 typedef struct parent_list {
338 __uint64_t pmask;
339 parent_entry_t *pentries;
340 #ifdef DEBUG
341 short cnt;
342 #endif
343 } parent_list_t;
344
345 typedef struct ino_ex_data {
346 __uint64_t ino_reached; /* bit == 1 if reached */
347 __uint64_t ino_processed; /* reference checked bit mask */
348 parent_list_t *parents;
349 __uint8_t *counted_nlinks;/* counted nlinks in P6 */
350 } ino_ex_data_t;
351
352 typedef struct ino_tree_node {
353 avlnode_t avl_node;
354 xfs_agino_t ino_startnum; /* starting inode # */
355 xfs_inofree_t ir_free; /* inode free bit mask */
356 __uint64_t ino_confirmed; /* confirmed bitmask */
357 __uint64_t ino_isa_dir; /* bit == 1 if a directory */
358 struct nlink_ops *nlinkops; /* pointer to current nlink ops */
359 __uint8_t *disk_nlinks; /* on-disk nlinks, set in P3 */
360 union {
361 ino_ex_data_t *ex_data; /* phases 6,7 */
362 parent_list_t *plist; /* phases 2-5 */
363 } ino_un;
364 } ino_tree_node_t;
365
366 typedef struct nlink_ops {
367 const int nlink_size;
368 void (*disk_nlink_set)(ino_tree_node_t *, int, __uint32_t);
369 __uint32_t (*disk_nlink_get)(ino_tree_node_t *, int);
370 __uint32_t (*counted_nlink_get)(ino_tree_node_t *, int);
371 __uint32_t (*counted_nlink_inc)(ino_tree_node_t *, int);
372 __uint32_t (*counted_nlink_dec)(ino_tree_node_t *, int);
373 } nlink_ops_t;
374
375
376 #define INOS_PER_IREC (sizeof(__uint64_t) * NBBY)
377 void add_ino_ex_data(xfs_mount_t *mp);
378
379 /*
380 * return an inode record to the free inode record pool
381 */
382 void free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec);
383
384 /*
385 * get pulls the inode record from the good inode tree
386 */
387 void get_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec);
388
389 extern avltree_desc_t **inode_tree_ptrs;
390 static inline ino_tree_node_t *
391 findfirst_inode_rec(xfs_agnumber_t agno)
392 {
393 return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino);
394 }
395 static inline ino_tree_node_t *
396 find_inode_rec(xfs_agnumber_t agno, xfs_agino_t ino)
397 {
398 return((ino_tree_node_t *)
399 avl_findrange(inode_tree_ptrs[agno], ino));
400 }
401 void find_inode_rec_range(xfs_agnumber_t agno,
402 xfs_agino_t start_ino, xfs_agino_t end_ino,
403 ino_tree_node_t **first, ino_tree_node_t **last);
404
405 /*
406 * set inode states -- setting an inode to used or free also
407 * automatically marks it as "existing". Note -- all the inode
408 * add/set/get routines assume a valid inode number.
409 */
410 ino_tree_node_t *set_inode_used_alloc(xfs_agnumber_t agno, xfs_agino_t ino);
411 ino_tree_node_t *set_inode_free_alloc(xfs_agnumber_t agno, xfs_agino_t ino);
412
413 void print_inode_list(xfs_agnumber_t agno);
414 void print_uncertain_inode_list(xfs_agnumber_t agno);
415
416 /*
417 * separate trees for uncertain inodes (they may not exist).
418 */
419 ino_tree_node_t *findfirst_uncertain_inode_rec(xfs_agnumber_t agno);
420 ino_tree_node_t *find_uncertain_inode_rec(xfs_agnumber_t agno,
421 xfs_agino_t ino);
422 void add_inode_uncertain(xfs_mount_t *mp,
423 xfs_ino_t ino, int free);
424 void add_aginode_uncertain(xfs_agnumber_t agno,
425 xfs_agino_t agino, int free);
426 void get_uncertain_inode_rec(xfs_agnumber_t agno,
427 ino_tree_node_t *ino_rec);
428 void clear_uncertain_ino_cache(xfs_agnumber_t agno);
429
430 /*
431 * return next in-order inode tree node. takes an "ino_tree_node_t *"
432 */
433 #define next_ino_rec(ino_node_ptr) \
434 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_nextino))
435 /*
436 * return the next linked inode (forward avl tree link)-- meant to be used
437 * by linked list routines (uncertain inode routines/records)
438 */
439 #define next_link_rec(ino_node_ptr) \
440 ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_forw))
441
442 /*
443 * Bit manipulations for processed field
444 */
445 #define XFS_INOPROC_MASK(i) ((__uint64_t)1 << (i))
446 #define XFS_INOPROC_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
447
448 #define XFS_INOPROC_IS_PROC(rp, i) \
449 (((rp)->ino_un.ex_data->ino_processed & XFS_INOPROC_MASK((i))) == 0LL \
450 ? 0 : 1)
451 #define XFS_INOPROC_SET_PROC(rp, i) \
452 ((rp)->ino_un.ex_data->ino_processed |= XFS_INOPROC_MASK((i)))
453 /*
454 #define XFS_INOPROC_CLR_PROC(rp, i) \
455 ((rp)->ino_un.ex_data->ino_processed &= ~XFS_INOPROC_MASK((i)))
456 */
457
458 /*
459 * same for ir_confirmed.
460 */
461 #define XFS_INOCF_MASK(i) ((__uint64_t)1 << (i))
462 #define XFS_INOCF_MASKN(i,n) ((__uint64_t)((1 << (n)) - 1) << (i))
463
464 #define XFS_INOCF_IS_CF(rp, i) \
465 (((rp)->ino_confirmed & XFS_INOCF_MASK((i))) == 0LL \
466 ? 0 : 1)
467 #define XFS_INOCF_SET_CF(rp, i) \
468 ((rp)->ino_confirmed |= XFS_INOCF_MASK((i)))
469 #define XFS_INOCF_CLR_CF(rp, i) \
470 ((rp)->ino_confirmed &= ~XFS_INOCF_MASK((i)))
471
472 /*
473 * same for backptr->ino_reached
474 */
475 #define XFS_INO_RCHD_MASK(i) ((__uint64_t)1 << (i))
476
477 #define XFS_INO_RCHD_IS_RCHD(rp, i) \
478 (((rp)->ino_un.ex_data->ino_reached & XFS_INO_RCHD_MASK((i))) == 0LL \
479 ? 0 : 1)
480 #define XFS_INO_RCHD_SET_RCHD(rp, i) \
481 ((rp)->ino_un.ex_data->ino_reached |= XFS_INO_RCHD_MASK((i)))
482 #define XFS_INO_RCHD_CLR_RCHD(rp, i) \
483 ((rp)->ino_un.ex_data->ino_reached &= ~XFS_INO_RCHD_MASK((i)))
484 /*
485 * set/clear/test is inode a directory inode
486 */
487 #define XFS_INO_ISADIR_MASK(i) ((__uint64_t)1 << (i))
488
489 #define inode_isadir(ino_rec, ino_offset) \
490 (((ino_rec)->ino_isa_dir & XFS_INO_ISADIR_MASK((ino_offset))) == 0LL \
491 ? 0 : 1)
492 #define set_inode_isadir(ino_rec, ino_offset) \
493 ((ino_rec)->ino_isa_dir |= XFS_INO_ISADIR_MASK((ino_offset)))
494 #define clear_inode_isadir(ino_rec, ino_offset) \
495 ((ino_rec)->ino_isa_dir &= ~XFS_INO_ISADIR_MASK((ino_offset)))
496
497
498 /*
499 * set/clear/test is inode known to be valid (although perhaps corrupt)
500 */
501 #define clear_inode_confirmed(ino_rec, ino_offset) \
502 XFS_INOCF_CLR_CF((ino_rec), (ino_offset))
503
504 #define set_inode_confirmed(ino_rec, ino_offset) \
505 XFS_INOCF_SET_CF((ino_rec), (ino_offset))
506
507 #define is_inode_confirmed(ino_rec, ino_offset) \
508 XFS_INOCF_IS_CF(ino_rec, ino_offset)
509
510 /*
511 * set/clear/test is inode free or used
512 */
513 #define set_inode_free(ino_rec, ino_offset) \
514 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
515 XFS_INOBT_SET_FREE((ino_rec), (ino_offset))
516
517 #define set_inode_used(ino_rec, ino_offset) \
518 XFS_INOCF_SET_CF((ino_rec), (ino_offset)), \
519 XFS_INOBT_CLR_FREE((ino_rec), (ino_offset))
520
521 #define is_inode_used(ino_rec, ino_offset) \
522 !XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
523
524 #define is_inode_free(ino_rec, ino_offset) \
525 XFS_INOBT_IS_FREE((ino_rec), (ino_offset))
526
527 /*
528 * add_inode_reached() is set on inode I only if I has been reached
529 * by an inode P claiming to be the parent and if I is a directory,
530 * the .. link in the I says that P is I's parent.
531 *
532 * add_inode_ref() is called every time a link to an inode is
533 * detected and drop_inode_ref() is called every time a link to
534 * an inode that we've counted is removed.
535 */
536
537 static inline int
538 is_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
539 {
540 ASSERT(ino_rec->ino_un.ex_data != NULL);
541 return(XFS_INO_RCHD_IS_RCHD(ino_rec, ino_offset));
542 }
543
544 static inline void
545 add_inode_reached(ino_tree_node_t *ino_rec, int ino_offset)
546 {
547 ASSERT(ino_rec->ino_un.ex_data != NULL);
548
549 (*ino_rec->nlinkops->counted_nlink_inc)(ino_rec, ino_offset);
550 XFS_INO_RCHD_SET_RCHD(ino_rec, ino_offset);
551
552 ASSERT(is_inode_reached(ino_rec, ino_offset));
553 }
554
555 static inline void
556 add_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
557 {
558 ASSERT(ino_rec->ino_un.ex_data != NULL);
559
560 (*ino_rec->nlinkops->counted_nlink_inc)(ino_rec, ino_offset);
561 }
562
563 static inline void
564 drop_inode_ref(ino_tree_node_t *ino_rec, int ino_offset)
565 {
566 ASSERT(ino_rec->ino_un.ex_data != NULL);
567
568 if ((*ino_rec->nlinkops->counted_nlink_dec)(ino_rec, ino_offset) == 0)
569 XFS_INO_RCHD_CLR_RCHD(ino_rec, ino_offset);
570 }
571
572 static inline int
573 is_inode_referenced(ino_tree_node_t *ino_rec, int ino_offset)
574 {
575 ASSERT(ino_rec->ino_un.ex_data != NULL);
576
577 return (*ino_rec->nlinkops->counted_nlink_get)(ino_rec, ino_offset) > 0;
578 }
579
580 static inline __uint32_t
581 num_inode_references(ino_tree_node_t *ino_rec, int ino_offset)
582 {
583 ASSERT(ino_rec->ino_un.ex_data != NULL);
584
585 return (*ino_rec->nlinkops->counted_nlink_get)(ino_rec, ino_offset);
586 }
587
588 static inline void
589 set_inode_disk_nlinks(ino_tree_node_t *ino_rec, int ino_offset, __uint32_t nlinks)
590 {
591 (*ino_rec->nlinkops->disk_nlink_set)(ino_rec, ino_offset, nlinks);
592 }
593
594 static inline __uint32_t
595 get_inode_disk_nlinks(ino_tree_node_t *ino_rec, int ino_offset)
596 {
597 return (*ino_rec->nlinkops->disk_nlink_get)(ino_rec, ino_offset);
598 }
599
600 /*
601 * has an inode been processed for phase 6 (reference count checking)?
602 * add_inode_refchecked() is set on an inode when it gets traversed
603 * during the reference count phase (6). It's set so that if the inode
604 * is a directory, it's traversed (and it's links counted) only once.
605 */
606 #ifndef XR_INO_REF_DEBUG
607 #define add_inode_refchecked(ino, ino_rec, ino_offset) \
608 XFS_INOPROC_SET_PROC((ino_rec), (ino_offset))
609 #define is_inode_refchecked(ino, ino_rec, ino_offset) \
610 (XFS_INOPROC_IS_PROC(ino_rec, ino_offset) != 0LL)
611 #else
612 void add_inode_refchecked(xfs_ino_t ino,
613 ino_tree_node_t *ino_rec, int ino_offset);
614 int is_inode_refchecked(xfs_ino_t ino,
615 ino_tree_node_t *ino_rec, int ino_offset);
616 #endif /* XR_INO_REF_DEBUG */
617
618 /*
619 * set/get inode number of parent -- works for directory inodes only
620 */
621 void set_inode_parent(ino_tree_node_t *irec, int ino_offset,
622 xfs_ino_t ino);
623 xfs_ino_t get_inode_parent(ino_tree_node_t *irec, int ino_offset);
624
625 /*
626 * bmap cursor for tracking and fixing bmap btrees. All xfs btrees number
627 * the levels with 0 being the leaf and every level up being 1 greater.
628 */
629
630 #define XR_MAX_BMLEVELS 10 /* XXX - rcc need to verify number */
631
632 typedef struct bm_level_state {
633 xfs_dfsbno_t fsbno;
634 xfs_dfsbno_t left_fsbno;
635 xfs_dfsbno_t right_fsbno;
636 __uint64_t first_key;
637 __uint64_t last_key;
638 /*
639 int level;
640 __uint64_t prev_last_key;
641 xfs_buf_t *bp;
642 xfs_bmbt_block_t *block;
643 */
644 } bm_level_state_t;
645
646 typedef struct bm_cursor {
647 int num_levels;
648 xfs_ino_t ino;
649 xfs_dinode_t *dip;
650 bm_level_state_t level[XR_MAX_BMLEVELS];
651 } bmap_cursor_t;
652
653 void init_bm_cursor(bmap_cursor_t *cursor, int num_level);
654
655 #endif /* XFS_REPAIR_INCORE_H */