]>
Commit | Line | Data |
---|---|---|
959ef981 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2bd0ea18 | 2 | /* |
da23017d NS |
3 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
4 | * All Rights Reserved. | |
2bd0ea18 NS |
5 | */ |
6 | ||
2556c98b BN |
7 | #ifndef XFS_REPAIR_INCORE_H |
8 | #define XFS_REPAIR_INCORE_H | |
9 | ||
10 | #include "avl.h" | |
79872d6e BN |
11 | |
12 | ||
2bd0ea18 NS |
13 | /* |
14 | * contains definition information. implementation (code) | |
15 | * is spread out in separate files. | |
16 | */ | |
17 | ||
2bd0ea18 | 18 | /* |
8961bfde | 19 | * block map -- track state of each filesystem block. |
2bd0ea18 | 20 | */ |
2bd0ea18 | 21 | |
8961bfde BN |
22 | void init_bmaps(xfs_mount_t *mp); |
23 | void reset_bmaps(xfs_mount_t *mp); | |
24 | void free_bmaps(xfs_mount_t *mp); | |
25 | ||
26 | void set_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno, | |
27 | xfs_extlen_t blen, int state); | |
28 | int get_bmap_ext(xfs_agnumber_t agno, xfs_agblock_t agbno, | |
29 | xfs_agblock_t maxbno, xfs_extlen_t *blen); | |
30 | ||
5a35bf2c DC |
31 | void set_rtbmap(xfs_rtblock_t bno, int state); |
32 | int get_rtbmap(xfs_rtblock_t bno); | |
8961bfde BN |
33 | |
34 | static inline void | |
35 | set_bmap(xfs_agnumber_t agno, xfs_agblock_t agbno, int state) | |
36 | { | |
37 | set_bmap_ext(agno, agbno, 1, state); | |
38 | } | |
39 | ||
40 | static inline int | |
41 | get_bmap(xfs_agnumber_t agno, xfs_agblock_t agbno) | |
42 | { | |
43 | return get_bmap_ext(agno, agbno, agbno + 1, NULL); | |
44 | } | |
2bd0ea18 NS |
45 | |
46 | /* | |
47 | * extent tree definitions | |
48 | * right now, there are 3 trees per AG, a bno tree, a bcnt tree | |
49 | * and a tree for dup extents. If the code is modified in the | |
50 | * future to use an extent tree instead of a bitmask for tracking | |
51 | * fs blocks, then we could lose the dup extent tree if we labelled | |
52 | * each extent with the inode that owned it. | |
53 | */ | |
54 | ||
55 | typedef unsigned char extent_state_t; | |
56 | ||
57 | typedef struct extent_tree_node { | |
58 | avlnode_t avl_node; | |
59 | xfs_agblock_t ex_startblock; /* starting block (agbno) */ | |
60 | xfs_extlen_t ex_blockcount; /* number of blocks in extent */ | |
61 | extent_state_t ex_state; /* see state flags below */ | |
62 | ||
63 | struct extent_tree_node *next; /* for bcnt extent lists */ | |
b87887d4 | 64 | struct extent_tree_node *last; /* for bcnt extent list anchors */ |
2bd0ea18 NS |
65 | #if 0 |
66 | xfs_ino_t ex_inode; /* owner, NULL if free or */ | |
67 | /* multiply allocated */ | |
68 | #endif | |
69 | } extent_tree_node_t; | |
70 | ||
71 | typedef struct rt_extent_tree_node { | |
72 | avlnode_t avl_node; | |
5a35bf2c | 73 | xfs_rtblock_t rt_startblock; /* starting realtime block */ |
2bd0ea18 NS |
74 | xfs_extlen_t rt_blockcount; /* number of blocks in extent */ |
75 | extent_state_t rt_state; /* see state flags below */ | |
76 | ||
77 | #if 0 | |
78 | xfs_ino_t ex_inode; /* owner, NULL if free or */ | |
79 | /* multiply allocated */ | |
80 | #endif | |
81 | } rt_extent_tree_node_t; | |
82 | ||
83 | /* extent states, prefix with XR_ to avoid conflict with buffer cache defines */ | |
84 | ||
85 | #define XR_E_UNKNOWN 0 /* unknown state */ | |
86 | #define XR_E_FREE1 1 /* free block (marked by one fs space tree) */ | |
87 | #define XR_E_FREE 2 /* free block (marked by both fs space trees) */ | |
88 | #define XR_E_INUSE 3 /* extent used by file/dir data or metadata */ | |
89 | #define XR_E_INUSE_FS 4 /* extent used by fs ag header or log */ | |
90 | #define XR_E_MULT 5 /* extent is multiply referenced */ | |
91 | #define XR_E_INO 6 /* extent used by inodes (inode blocks) */ | |
92 | #define XR_E_FS_MAP 7 /* extent used by fs space/inode maps */ | |
0f94fa4b DW |
93 | #define XR_E_INUSE1 8 /* used block (marked by rmap btree) */ |
94 | #define XR_E_INUSE_FS1 9 /* used by fs ag header or log (rmap btree) */ | |
95 | #define XR_E_INO1 10 /* used by inodes (marked by rmap btree) */ | |
96 | #define XR_E_FS_MAP1 11 /* used by fs space/inode maps (rmap btree) */ | |
18c44aa9 | 97 | #define XR_E_REFC 12 /* used by fs ag reference count btree */ |
13ef9674 DW |
98 | #define XR_E_COW 13 /* leftover cow extent */ |
99 | #define XR_E_BAD_STATE 14 | |
1e77098c | 100 | |
2bd0ea18 NS |
101 | /* separate state bit, OR'ed into high (4th) bit of ex_state field */ |
102 | ||
103 | #define XR_E_WRITTEN 0x8 /* extent has been written out, can't reclaim */ | |
2bd0ea18 NS |
104 | #define written(state) ((state) & XR_E_WRITTEN) |
105 | #define set_written(state) (state) &= XR_E_WRITTEN | |
106 | ||
107 | /* | |
108 | * bno extent tree functions | |
109 | */ | |
110 | void | |
111 | add_bno_extent(xfs_agnumber_t agno, xfs_agblock_t startblock, | |
112 | xfs_extlen_t blockcount); | |
113 | ||
114 | extent_tree_node_t * | |
115 | findfirst_bno_extent(xfs_agnumber_t agno); | |
116 | ||
117 | extent_tree_node_t * | |
118 | find_bno_extent(xfs_agnumber_t agno, xfs_agblock_t agbno); | |
119 | ||
120 | extent_tree_node_t * | |
121 | findfirst_bno_extent(xfs_agnumber_t agno); | |
122 | ||
123 | #define findnext_bno_extent(exent_ptr) \ | |
124 | ((extent_tree_node_t *) ((exent_ptr)->avl_node.avl_nextino)) | |
125 | ||
126 | void | |
127 | get_bno_extent(xfs_agnumber_t agno, extent_tree_node_t *ext); | |
128 | ||
129 | /* | |
130 | * bcnt tree functions | |
131 | */ | |
132 | void | |
133 | add_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock, | |
134 | xfs_extlen_t blockcount); | |
135 | ||
136 | extent_tree_node_t * | |
137 | findfirst_bcnt_extent(xfs_agnumber_t agno); | |
138 | ||
139 | extent_tree_node_t * | |
140 | find_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t agbno); | |
141 | ||
142 | extent_tree_node_t * | |
143 | findbiggest_bcnt_extent(xfs_agnumber_t agno); | |
144 | ||
145 | extent_tree_node_t * | |
146 | findnext_bcnt_extent(xfs_agnumber_t agno, extent_tree_node_t *ext); | |
147 | ||
148 | extent_tree_node_t * | |
149 | get_bcnt_extent(xfs_agnumber_t agno, xfs_agblock_t startblock, | |
150 | xfs_extlen_t blockcount); | |
151 | ||
152 | /* | |
153 | * duplicate extent tree functions | |
154 | */ | |
2bd0ea18 | 155 | |
79872d6e BN |
156 | int add_dup_extent(xfs_agnumber_t agno, xfs_agblock_t startblock, |
157 | xfs_extlen_t blockcount); | |
158 | int search_dup_extent(xfs_agnumber_t agno, | |
159 | xfs_agblock_t start_agbno, xfs_agblock_t end_agbno); | |
5a35bf2c | 160 | void add_rt_dup_extent(xfs_rtblock_t startblock, |
2bd0ea18 NS |
161 | xfs_extlen_t blockcount); |
162 | ||
163 | int search_rt_dup_extent(xfs_mount_t *mp, | |
5a35bf2c | 164 | xfs_rtblock_t bno); |
2bd0ea18 NS |
165 | |
166 | /* | |
167 | * extent/tree recyling and deletion routines | |
168 | */ | |
169 | ||
170 | /* | |
171 | * return an extent node to the extent node free list | |
172 | */ | |
173 | void release_extent_tree_node(extent_tree_node_t *node); | |
174 | ||
175 | /* | |
176 | * recycle all the nodes in the per-AG tree | |
177 | */ | |
178 | void release_dup_extent_tree(xfs_agnumber_t agno); | |
179 | void release_agbno_extent_tree(xfs_agnumber_t agno); | |
180 | void release_agbcnt_extent_tree(xfs_agnumber_t agno); | |
181 | ||
182 | /* | |
183 | * realtime duplicate extent tree - this one actually frees the memory | |
184 | */ | |
185 | void free_rt_dup_extent_tree(xfs_mount_t *mp); | |
186 | ||
c1f7a46c | 187 | void incore_ext_init(xfs_mount_t *); |
2bd0ea18 NS |
188 | /* |
189 | * per-AG extent trees shutdown routine -- all (bno, bcnt and dup) | |
190 | * at once. this one actually frees the memory instead of just recyling | |
191 | * the nodes. | |
192 | */ | |
193 | void incore_ext_teardown(xfs_mount_t *mp); | |
c1f7a46c BN |
194 | void incore_ino_init(xfs_mount_t *); |
195 | ||
52cb19dc CH |
196 | int count_bno_extents(xfs_agnumber_t); |
197 | int count_bno_extents_blocks(xfs_agnumber_t, uint *); | |
198 | int count_bcnt_extents(xfs_agnumber_t); | |
199 | ||
2bd0ea18 NS |
200 | /* |
201 | * inode definitions | |
202 | */ | |
203 | ||
204 | /* inode types */ | |
205 | ||
206 | #define XR_INO_UNKNOWN 0 /* unknown */ | |
207 | #define XR_INO_DIR 1 /* directory */ | |
208 | #define XR_INO_RTDATA 2 /* realtime file */ | |
209 | #define XR_INO_RTBITMAP 3 /* realtime bitmap inode */ | |
210 | #define XR_INO_RTSUM 4 /* realtime summary inode */ | |
211 | #define XR_INO_DATA 5 /* regular file */ | |
212 | #define XR_INO_SYMLINK 6 /* symlink */ | |
213 | #define XR_INO_CHRDEV 7 /* character device */ | |
214 | #define XR_INO_BLKDEV 8 /* block device */ | |
215 | #define XR_INO_SOCK 9 /* socket */ | |
216 | #define XR_INO_FIFO 10 /* fifo */ | |
5857dce9 ES |
217 | #define XR_INO_UQUOTA 12 /* user quota inode */ |
218 | #define XR_INO_GQUOTA 13 /* group quota inode */ | |
219 | #define XR_INO_PQUOTA 14 /* project quota inode */ | |
2bd0ea18 NS |
220 | |
221 | /* inode allocation tree */ | |
222 | ||
223 | /* | |
224 | * Inodes in the inode allocation trees are allocated in chunks. | |
225 | * Those groups can be easily duplicated in our trees. | |
226 | * Disconnected inodes are harder. We can do one of two | |
227 | * things in that case: if we know the inode allocation btrees | |
228 | * are good, then we can disallow directory references to unknown | |
229 | * inode chunks. If the inode allocation trees have been trashed or | |
230 | * we feel like being aggressive, then as we hit unknown inodes, | |
231 | * we can search on the disk for all contiguous inodes and see if | |
232 | * they fit into chunks. Before putting them into the inode tree, | |
233 | * we can scan each inode starting at the earliest inode to see which | |
234 | * ones are good. This protects us from the pathalogical case of | |
235 | * inodes appearing in user-data. We still may have to mark the | |
236 | * inodes as "possibly fake" so that if a file claims the blocks, | |
237 | * we decide to believe the inodes, especially if they're not | |
238 | * connected. | |
239 | */ | |
240 | ||
241 | #define PLIST_CHUNK_SIZE 4 | |
242 | ||
243 | typedef xfs_ino_t parent_entry_t; | |
244 | ||
0f012a4c BN |
245 | struct nlink_ops; |
246 | ||
2bd0ea18 | 247 | typedef struct parent_list { |
14f8b681 | 248 | uint64_t pmask; |
2bd0ea18 NS |
249 | parent_entry_t *pentries; |
250 | #ifdef DEBUG | |
251 | short cnt; | |
252 | #endif | |
253 | } parent_list_t; | |
254 | ||
edfb350c | 255 | union ino_nlink { |
14f8b681 DW |
256 | uint8_t *un8; |
257 | uint16_t *un16; | |
258 | uint32_t *un32; | |
edfb350c CH |
259 | }; |
260 | ||
0f012a4c | 261 | typedef struct ino_ex_data { |
14f8b681 DW |
262 | uint64_t ino_reached; /* bit == 1 if reached */ |
263 | uint64_t ino_processed; /* reference checked bit mask */ | |
2bd0ea18 | 264 | parent_list_t *parents; |
edfb350c | 265 | union ino_nlink counted_nlinks;/* counted nlinks in P6 */ |
0f012a4c | 266 | } ino_ex_data_t; |
2bd0ea18 NS |
267 | |
268 | typedef struct ino_tree_node { | |
269 | avlnode_t avl_node; | |
270 | xfs_agino_t ino_startnum; /* starting inode # */ | |
271 | xfs_inofree_t ir_free; /* inode free bit mask */ | |
14f8b681 DW |
272 | uint64_t ir_sparse; /* sparse inode bitmask */ |
273 | uint64_t ino_confirmed; /* confirmed bitmask */ | |
274 | uint64_t ino_isa_dir; /* bit == 1 if a directory */ | |
275 | uint64_t ino_was_rl; /* bit == 1 if reflink flag set */ | |
276 | uint64_t ino_is_rl; /* bit == 1 if reflink flag should be set */ | |
277 | uint8_t nlink_size; | |
edfb350c | 278 | union ino_nlink disk_nlinks; /* on-disk nlinks, set in P3 */ |
2bd0ea18 | 279 | union { |
0f012a4c BN |
280 | ino_ex_data_t *ex_data; /* phases 6,7 */ |
281 | parent_list_t *plist; /* phases 2-5 */ | |
2bd0ea18 | 282 | } ino_un; |
14f8b681 | 283 | uint8_t *ftypes; /* phases 3,6 */ |
96496f5d | 284 | pthread_mutex_t lock; |
2bd0ea18 NS |
285 | } ino_tree_node_t; |
286 | ||
14f8b681 DW |
287 | #define INOS_PER_IREC (sizeof(uint64_t) * NBBY) |
288 | #define IREC_MASK(i) ((uint64_t)1 << (i)) | |
f4ef1178 | 289 | |
0f012a4c | 290 | void add_ino_ex_data(xfs_mount_t *mp); |
2bd0ea18 NS |
291 | |
292 | /* | |
293 | * return an inode record to the free inode record pool | |
294 | */ | |
295 | void free_inode_rec(xfs_agnumber_t agno, ino_tree_node_t *ino_rec); | |
296 | ||
297 | /* | |
298 | * get pulls the inode record from the good inode tree | |
299 | */ | |
1ae311d5 LC |
300 | void get_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, |
301 | ino_tree_node_t *ino_rec); | |
2bd0ea18 | 302 | |
1e77098c | 303 | extern avltree_desc_t **inode_tree_ptrs; |
1b9f3650 CM |
304 | |
305 | static inline int | |
306 | get_inode_offset(struct xfs_mount *mp, xfs_ino_t ino, ino_tree_node_t *irec) | |
307 | { | |
308 | return XFS_INO_TO_AGINO(mp, ino) - irec->ino_startnum; | |
309 | } | |
78a0dc91 | 310 | static inline ino_tree_node_t * |
1e77098c MV |
311 | findfirst_inode_rec(xfs_agnumber_t agno) |
312 | { | |
313 | return((ino_tree_node_t *) inode_tree_ptrs[agno]->avl_firstino); | |
314 | } | |
78a0dc91 | 315 | static inline ino_tree_node_t * |
1ae311d5 | 316 | find_inode_rec(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t ino) |
1e77098c | 317 | { |
1ae311d5 LC |
318 | /* |
319 | * Is the AG inside the file system | |
320 | */ | |
321 | if (agno >= mp->m_sb.sb_agcount) | |
322 | return NULL; | |
1e77098c MV |
323 | return((ino_tree_node_t *) |
324 | avl_findrange(inode_tree_ptrs[agno], ino)); | |
325 | } | |
1ae311d5 | 326 | void find_inode_rec_range(struct xfs_mount *mp, xfs_agnumber_t agno, |
2bd0ea18 NS |
327 | xfs_agino_t start_ino, xfs_agino_t end_ino, |
328 | ino_tree_node_t **first, ino_tree_node_t **last); | |
329 | ||
330 | /* | |
331 | * set inode states -- setting an inode to used or free also | |
332 | * automatically marks it as "existing". Note -- all the inode | |
333 | * add/set/get routines assume a valid inode number. | |
334 | */ | |
1ae311d5 LC |
335 | ino_tree_node_t *set_inode_used_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, |
336 | xfs_agino_t ino); | |
337 | ino_tree_node_t *set_inode_free_alloc(struct xfs_mount *mp, xfs_agnumber_t agno, | |
338 | xfs_agino_t ino); | |
2bd0ea18 NS |
339 | |
340 | void print_inode_list(xfs_agnumber_t agno); | |
341 | void print_uncertain_inode_list(xfs_agnumber_t agno); | |
342 | ||
343 | /* | |
344 | * separate trees for uncertain inodes (they may not exist). | |
345 | */ | |
346 | ino_tree_node_t *findfirst_uncertain_inode_rec(xfs_agnumber_t agno); | |
2d9475a4 NS |
347 | ino_tree_node_t *find_uncertain_inode_rec(xfs_agnumber_t agno, |
348 | xfs_agino_t ino); | |
2bd0ea18 NS |
349 | void add_inode_uncertain(xfs_mount_t *mp, |
350 | xfs_ino_t ino, int free); | |
aaca101b DC |
351 | void add_aginode_uncertain(struct xfs_mount *mp, |
352 | xfs_agnumber_t agno, | |
2bd0ea18 | 353 | xfs_agino_t agino, int free); |
1ae311d5 LC |
354 | void get_uncertain_inode_rec(struct xfs_mount *mp, |
355 | xfs_agnumber_t agno, | |
2bd0ea18 NS |
356 | ino_tree_node_t *ino_rec); |
357 | void clear_uncertain_ino_cache(xfs_agnumber_t agno); | |
358 | ||
359 | /* | |
360 | * return next in-order inode tree node. takes an "ino_tree_node_t *" | |
361 | */ | |
362 | #define next_ino_rec(ino_node_ptr) \ | |
363 | ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_nextino)) | |
364 | /* | |
365 | * return the next linked inode (forward avl tree link)-- meant to be used | |
366 | * by linked list routines (uncertain inode routines/records) | |
367 | */ | |
368 | #define next_link_rec(ino_node_ptr) \ | |
369 | ((ino_tree_node_t *) ((ino_node_ptr)->avl_node.avl_forw)) | |
370 | ||
01159bd2 BF |
371 | /* |
372 | * finobt helpers | |
373 | */ | |
c77c41ee BF |
374 | |
375 | static inline bool | |
376 | inode_rec_has_free(struct ino_tree_node *ino_rec) | |
377 | { | |
378 | /* must have real, allocated inodes for finobt */ | |
379 | return ino_rec->ir_free & ~ino_rec->ir_sparse; | |
380 | } | |
381 | ||
01159bd2 BF |
382 | static inline ino_tree_node_t * |
383 | findfirst_free_inode_rec(xfs_agnumber_t agno) | |
384 | { | |
385 | ino_tree_node_t *ino_rec; | |
386 | ||
387 | ino_rec = findfirst_inode_rec(agno); | |
388 | ||
c77c41ee | 389 | while (ino_rec && !inode_rec_has_free(ino_rec)) |
01159bd2 BF |
390 | ino_rec = next_ino_rec(ino_rec); |
391 | ||
392 | return ino_rec; | |
393 | } | |
394 | ||
395 | static inline ino_tree_node_t * | |
396 | next_free_ino_rec(ino_tree_node_t *ino_rec) | |
397 | { | |
398 | ino_rec = next_ino_rec(ino_rec); | |
399 | ||
c77c41ee | 400 | while (ino_rec && !inode_rec_has_free(ino_rec)) |
01159bd2 BF |
401 | ino_rec = next_ino_rec(ino_rec); |
402 | ||
403 | return ino_rec; | |
404 | } | |
405 | ||
2bd0ea18 | 406 | /* |
f4ef1178 CH |
407 | * Has an inode been processed for phase 6 (reference count checking)? |
408 | * | |
409 | * add_inode_refchecked() is set on an inode when it gets traversed | |
410 | * during the reference count phase (6). It's set so that if the inode | |
411 | * is a directory, it's traversed (and it's links counted) only once. | |
2bd0ea18 | 412 | */ |
f4ef1178 CH |
413 | static inline void add_inode_refchecked(struct ino_tree_node *irec, int offset) |
414 | { | |
96496f5d | 415 | pthread_mutex_lock(&irec->lock); |
f4ef1178 | 416 | irec->ino_un.ex_data->ino_processed |= IREC_MASK(offset); |
96496f5d | 417 | pthread_mutex_unlock(&irec->lock); |
f4ef1178 | 418 | } |
2bd0ea18 | 419 | |
f4ef1178 CH |
420 | static inline int is_inode_refchecked(struct ino_tree_node *irec, int offset) |
421 | { | |
422 | return (irec->ino_un.ex_data->ino_processed & IREC_MASK(offset)) != 0; | |
423 | } | |
2bd0ea18 NS |
424 | |
425 | /* | |
f4ef1178 | 426 | * set/test is inode known to be valid (although perhaps corrupt) |
2bd0ea18 | 427 | */ |
f4ef1178 CH |
428 | static inline void set_inode_confirmed(struct ino_tree_node *irec, int offset) |
429 | { | |
430 | irec->ino_confirmed |= IREC_MASK(offset); | |
431 | } | |
2bd0ea18 | 432 | |
f4ef1178 CH |
433 | static inline int is_inode_confirmed(struct ino_tree_node *irec, int offset) |
434 | { | |
435 | return (irec->ino_confirmed & IREC_MASK(offset)) != 0; | |
436 | } | |
2bd0ea18 NS |
437 | |
438 | /* | |
f4ef1178 | 439 | * set/clear/test is inode a directory inode |
2bd0ea18 | 440 | */ |
f4ef1178 CH |
441 | static inline void set_inode_isadir(struct ino_tree_node *irec, int offset) |
442 | { | |
96496f5d | 443 | pthread_mutex_lock(&irec->lock); |
f4ef1178 | 444 | irec->ino_isa_dir |= IREC_MASK(offset); |
96496f5d | 445 | pthread_mutex_unlock(&irec->lock); |
f4ef1178 | 446 | } |
2bd0ea18 | 447 | |
f4ef1178 CH |
448 | static inline void clear_inode_isadir(struct ino_tree_node *irec, int offset) |
449 | { | |
96496f5d | 450 | pthread_mutex_lock(&irec->lock); |
f4ef1178 | 451 | irec->ino_isa_dir &= ~IREC_MASK(offset); |
96496f5d | 452 | pthread_mutex_unlock(&irec->lock); |
f4ef1178 | 453 | } |
2bd0ea18 | 454 | |
f4ef1178 CH |
455 | static inline int inode_isadir(struct ino_tree_node *irec, int offset) |
456 | { | |
457 | return (irec->ino_isa_dir & IREC_MASK(offset)) != 0; | |
458 | } | |
2bd0ea18 NS |
459 | |
460 | /* | |
461 | * set/clear/test is inode free or used | |
462 | */ | |
f4ef1178 CH |
463 | static inline void set_inode_free(struct ino_tree_node *irec, int offset) |
464 | { | |
96496f5d | 465 | pthread_mutex_lock(&irec->lock); |
f4ef1178 CH |
466 | set_inode_confirmed(irec, offset); |
467 | irec->ir_free |= XFS_INOBT_MASK(offset); | |
96496f5d | 468 | pthread_mutex_unlock(&irec->lock); |
2bd0ea18 | 469 | |
f4ef1178 | 470 | } |
56b2de80 | 471 | |
f4ef1178 CH |
472 | static inline void set_inode_used(struct ino_tree_node *irec, int offset) |
473 | { | |
96496f5d | 474 | pthread_mutex_lock(&irec->lock); |
f4ef1178 CH |
475 | set_inode_confirmed(irec, offset); |
476 | irec->ir_free &= ~XFS_INOBT_MASK(offset); | |
96496f5d | 477 | pthread_mutex_unlock(&irec->lock); |
f4ef1178 | 478 | } |
2bd0ea18 | 479 | |
f4ef1178 CH |
480 | static inline int is_inode_free(struct ino_tree_node *irec, int offset) |
481 | { | |
482 | return (irec->ir_free & XFS_INOBT_MASK(offset)) != 0; | |
483 | } | |
2bd0ea18 | 484 | |
c749bd55 BF |
485 | /* |
486 | * set/test is inode sparse (not physically allocated) | |
487 | */ | |
488 | static inline void set_inode_sparse(struct ino_tree_node *irec, int offset) | |
489 | { | |
96496f5d | 490 | pthread_mutex_lock(&irec->lock); |
c749bd55 | 491 | irec->ir_sparse |= XFS_INOBT_MASK(offset); |
96496f5d | 492 | pthread_mutex_unlock(&irec->lock); |
c749bd55 BF |
493 | } |
494 | ||
495 | static inline bool is_inode_sparse(struct ino_tree_node *irec, int offset) | |
496 | { | |
497 | return irec->ir_sparse & XFS_INOBT_MASK(offset); | |
498 | } | |
499 | ||
7e174ec7 DW |
500 | /* |
501 | * set/clear/test was inode marked as reflinked | |
502 | */ | |
503 | static inline void set_inode_was_rl(struct ino_tree_node *irec, int offset) | |
504 | { | |
96496f5d | 505 | pthread_mutex_lock(&irec->lock); |
7e174ec7 | 506 | irec->ino_was_rl |= IREC_MASK(offset); |
96496f5d | 507 | pthread_mutex_unlock(&irec->lock); |
7e174ec7 DW |
508 | } |
509 | ||
510 | static inline void clear_inode_was_rl(struct ino_tree_node *irec, int offset) | |
511 | { | |
96496f5d | 512 | pthread_mutex_lock(&irec->lock); |
7e174ec7 | 513 | irec->ino_was_rl &= ~IREC_MASK(offset); |
96496f5d | 514 | pthread_mutex_unlock(&irec->lock); |
7e174ec7 DW |
515 | } |
516 | ||
517 | static inline int inode_was_rl(struct ino_tree_node *irec, int offset) | |
518 | { | |
519 | return (irec->ino_was_rl & IREC_MASK(offset)) != 0; | |
520 | } | |
521 | ||
522 | /* | |
523 | * set/clear/test should inode be marked as reflinked | |
524 | */ | |
525 | static inline void set_inode_is_rl(struct ino_tree_node *irec, int offset) | |
526 | { | |
96496f5d | 527 | pthread_mutex_lock(&irec->lock); |
7e174ec7 | 528 | irec->ino_is_rl |= IREC_MASK(offset); |
96496f5d | 529 | pthread_mutex_unlock(&irec->lock); |
7e174ec7 DW |
530 | } |
531 | ||
532 | static inline void clear_inode_is_rl(struct ino_tree_node *irec, int offset) | |
533 | { | |
96496f5d | 534 | pthread_mutex_lock(&irec->lock); |
7e174ec7 | 535 | irec->ino_is_rl &= ~IREC_MASK(offset); |
96496f5d | 536 | pthread_mutex_unlock(&irec->lock); |
7e174ec7 DW |
537 | } |
538 | ||
539 | static inline int inode_is_rl(struct ino_tree_node *irec, int offset) | |
540 | { | |
541 | return (irec->ino_is_rl & IREC_MASK(offset)) != 0; | |
542 | } | |
543 | ||
2bd0ea18 NS |
544 | /* |
545 | * add_inode_reached() is set on inode I only if I has been reached | |
546 | * by an inode P claiming to be the parent and if I is a directory, | |
547 | * the .. link in the I says that P is I's parent. | |
548 | * | |
549 | * add_inode_ref() is called every time a link to an inode is | |
550 | * detected and drop_inode_ref() is called every time a link to | |
551 | * an inode that we've counted is removed. | |
552 | */ | |
edfb350c CH |
553 | void add_inode_ref(struct ino_tree_node *irec, int offset); |
554 | void drop_inode_ref(struct ino_tree_node *irec, int offset); | |
14f8b681 | 555 | uint32_t num_inode_references(struct ino_tree_node *irec, int offset); |
2bd0ea18 | 556 | |
14f8b681 DW |
557 | void set_inode_disk_nlinks(struct ino_tree_node *irec, int offset, uint32_t nlinks); |
558 | uint32_t get_inode_disk_nlinks(struct ino_tree_node *irec, int offset); | |
1e77098c | 559 | |
f4ef1178 | 560 | static inline int is_inode_reached(struct ino_tree_node *irec, int offset) |
1e77098c | 561 | { |
f4ef1178 CH |
562 | ASSERT(irec->ino_un.ex_data != NULL); |
563 | return (irec->ino_un.ex_data->ino_reached & IREC_MASK(offset)) != 0; | |
1e77098c MV |
564 | } |
565 | ||
f4ef1178 | 566 | static inline void add_inode_reached(struct ino_tree_node *irec, int offset) |
1e77098c | 567 | { |
f4ef1178 | 568 | add_inode_ref(irec, offset); |
96496f5d | 569 | pthread_mutex_lock(&irec->lock); |
f4ef1178 | 570 | irec->ino_un.ex_data->ino_reached |= IREC_MASK(offset); |
96496f5d | 571 | pthread_mutex_unlock(&irec->lock); |
0f012a4c BN |
572 | } |
573 | ||
aaca101b DC |
574 | /* |
575 | * get/set inode filetype. Only used if the superblock feature bit is set | |
576 | * which allocates irec->ftypes. | |
577 | */ | |
578 | static inline void | |
579 | set_inode_ftype(struct ino_tree_node *irec, | |
580 | int ino_offset, | |
14f8b681 | 581 | uint8_t ftype) |
aaca101b DC |
582 | { |
583 | if (irec->ftypes) | |
584 | irec->ftypes[ino_offset] = ftype; | |
585 | } | |
586 | ||
14f8b681 | 587 | static inline uint8_t |
aaca101b DC |
588 | get_inode_ftype( |
589 | struct ino_tree_node *irec, | |
590 | int ino_offset) | |
591 | { | |
592 | if (!irec->ftypes) | |
593 | return XFS_DIR3_FT_UNKNOWN; | |
594 | return irec->ftypes[ino_offset]; | |
595 | } | |
596 | ||
2bd0ea18 NS |
597 | /* |
598 | * set/get inode number of parent -- works for directory inodes only | |
599 | */ | |
600 | void set_inode_parent(ino_tree_node_t *irec, int ino_offset, | |
601 | xfs_ino_t ino); | |
2bd0ea18 NS |
602 | xfs_ino_t get_inode_parent(ino_tree_node_t *irec, int ino_offset); |
603 | ||
2e162270 ES |
604 | /* |
605 | * Allocate extra inode data | |
606 | */ | |
607 | void alloc_ex_data(ino_tree_node_t *irec); | |
608 | ||
2bd0ea18 NS |
609 | /* |
610 | * bmap cursor for tracking and fixing bmap btrees. All xfs btrees number | |
611 | * the levels with 0 being the leaf and every level up being 1 greater. | |
612 | */ | |
613 | ||
614 | #define XR_MAX_BMLEVELS 10 /* XXX - rcc need to verify number */ | |
615 | ||
616 | typedef struct bm_level_state { | |
5a35bf2c DC |
617 | xfs_fsblock_t fsbno; |
618 | xfs_fsblock_t left_fsbno; | |
619 | xfs_fsblock_t right_fsbno; | |
14f8b681 DW |
620 | uint64_t first_key; |
621 | uint64_t last_key; | |
2bd0ea18 NS |
622 | /* |
623 | int level; | |
14f8b681 | 624 | uint64_t prev_last_key; |
167137fe | 625 | struct xfs_buf *bp; |
2bd0ea18 NS |
626 | xfs_bmbt_block_t *block; |
627 | */ | |
628 | } bm_level_state_t; | |
629 | ||
630 | typedef struct bm_cursor { | |
631 | int num_levels; | |
632 | xfs_ino_t ino; | |
7328ea6e | 633 | struct xfs_dinode *dip; |
2bd0ea18 NS |
634 | bm_level_state_t level[XR_MAX_BMLEVELS]; |
635 | } bmap_cursor_t; | |
636 | ||
637 | void init_bm_cursor(bmap_cursor_t *cursor, int num_level); | |
2556c98b | 638 | |
ac9a3f73 BF |
639 | /* |
640 | * On-disk inobt record helpers. The sparse inode record format has a single | |
641 | * byte freecount. The older format has a 32-bit freecount and thus byte | |
642 | * conversion is necessary. | |
643 | */ | |
644 | ||
645 | static inline int | |
646 | inorec_get_freecount( | |
647 | struct xfs_mount *mp, | |
648 | struct xfs_inobt_rec *rp) | |
649 | { | |
2660e653 | 650 | if (xfs_has_sparseinodes(mp)) |
ac9a3f73 BF |
651 | return rp->ir_u.sp.ir_freecount; |
652 | return be32_to_cpu(rp->ir_u.f.ir_freecount); | |
653 | } | |
654 | ||
655 | static inline void | |
656 | inorec_set_freecount( | |
657 | struct xfs_mount *mp, | |
658 | struct xfs_inobt_rec *rp, | |
659 | int freecount) | |
660 | { | |
2660e653 | 661 | if (xfs_has_sparseinodes(mp)) |
ac9a3f73 BF |
662 | rp->ir_u.sp.ir_freecount = freecount; |
663 | else | |
664 | rp->ir_u.f.ir_freecount = cpu_to_be32(freecount); | |
665 | } | |
666 | ||
2556c98b | 667 | #endif /* XFS_REPAIR_INCORE_H */ |