]>
Commit | Line | Data |
---|---|---|
2bd0ea18 | 1 | /* |
0d3e0b37 | 2 | * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. |
dfc130f3 | 3 | * |
2bd0ea18 NS |
4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
dfc130f3 | 7 | * |
2bd0ea18 NS |
8 | * This program is distributed in the hope that it would be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
dfc130f3 | 11 | * |
2bd0ea18 NS |
12 | * Further, this software is distributed without any warranty that it is |
13 | * free of the rightful claim of any third person regarding infringement | |
14 | * or the like. Any license provided herein, whether implied or | |
15 | * otherwise, applies only to this software file. Patent licenses, if | |
16 | * any, provided herein do not apply to combinations of this program with | |
17 | * other software, or any other product whatsoever. | |
dfc130f3 | 18 | * |
2bd0ea18 NS |
19 | * You should have received a copy of the GNU General Public License along |
20 | * with this program; if not, write the Free Software Foundation, Inc., 59 | |
21 | * Temple Place - Suite 330, Boston MA 02111-1307, USA. | |
dfc130f3 | 22 | * |
2bd0ea18 NS |
23 | * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, |
24 | * Mountain View, CA 94043, or: | |
dfc130f3 RC |
25 | * |
26 | * http://www.sgi.com | |
27 | * | |
28 | * For further information regarding this notice, see: | |
29 | * | |
2bd0ea18 NS |
30 | * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ |
31 | */ | |
32 | ||
33 | #include <libxfs.h> | |
34 | #include "avl.h" | |
35 | #include "globals.h" | |
36 | #include "agheader.h" | |
37 | #include "incore.h" | |
38 | #include "protos.h" | |
39 | #include "err_protos.h" | |
40 | #include "dir.h" | |
41 | #include "dinode.h" | |
42 | #include "versions.h" | |
43 | ||
44 | /* | |
45 | * validates inode block or chunk, returns # of good inodes | |
46 | * the dinodes are verified using verify_uncertain_dinode() which | |
47 | * means only the basic inode info is checked, no fork checks. | |
48 | */ | |
49 | ||
50 | int | |
51 | check_aginode_block(xfs_mount_t *mp, | |
52 | xfs_agnumber_t agno, | |
53 | xfs_agblock_t agbno) | |
54 | { | |
55 | ||
56 | xfs_dinode_t *dino_p; | |
dfc130f3 RC |
57 | int i; |
58 | int cnt = 0; | |
2bd0ea18 NS |
59 | xfs_buf_t *bp; |
60 | ||
61 | /* | |
62 | * it's ok to read these possible inode blocks in one at | |
63 | * a time because they don't belong to known inodes (if | |
64 | * they did, we'd know about them courtesy of the incore inode | |
65 | * tree and we wouldn't be here and we stale the buffers out | |
66 | * so no one else will overlap them. | |
67 | */ | |
68 | bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, agbno), | |
69 | XFS_FSB_TO_BB(mp, 1), 0); | |
70 | if (!bp) { | |
507f4e33 | 71 | do_warn(_("cannot read agbno (%u/%u), disk block %lld\n"), agno, |
2bd0ea18 NS |
72 | agbno, (xfs_daddr_t)XFS_AGB_TO_DADDR(mp, agno, agbno)); |
73 | return(0); | |
74 | } | |
75 | ||
76 | for (i = 0; i < mp->m_sb.sb_inopblock; i++) { | |
77 | dino_p = XFS_MAKE_IPTR(mp, bp, i); | |
78 | if (!verify_uncertain_dinode(mp, dino_p, agno, | |
79 | XFS_OFFBNO_TO_AGINO(mp, agbno, i))) | |
80 | cnt++; | |
81 | } | |
82 | ||
83 | libxfs_putbuf(bp); | |
84 | return(cnt); | |
85 | } | |
86 | ||
87 | int | |
88 | check_inode_block(xfs_mount_t *mp, | |
89 | xfs_ino_t ino) | |
90 | { | |
dfc130f3 | 91 | return(check_aginode_block(mp, XFS_INO_TO_AGNO(mp, ino), |
2bd0ea18 NS |
92 | XFS_INO_TO_AGBNO(mp, ino))); |
93 | } | |
94 | ||
95 | /* | |
96 | * tries to establish if the inode really exists in a valid | |
97 | * inode chunk. returns number of new inodes if things are good | |
98 | * and 0 if bad. start is the start of the discovered inode chunk. | |
99 | * routine assumes that ino is a legal inode number | |
100 | * (verified by verify_inum()). If the inode chunk turns out | |
101 | * to be good, this routine will put the inode chunk into | |
102 | * the good inode chunk tree if required. | |
103 | * | |
104 | * the verify_(ag)inode* family of routines are utility | |
105 | * routines called by check_uncertain_aginodes() and | |
106 | * process_uncertain_aginodes(). | |
107 | */ | |
108 | int | |
109 | verify_inode_chunk(xfs_mount_t *mp, | |
110 | xfs_ino_t ino, | |
111 | xfs_ino_t *start_ino) | |
112 | { | |
113 | xfs_agnumber_t agno; | |
114 | xfs_agino_t agino; | |
115 | xfs_agino_t start_agino; | |
116 | xfs_agblock_t agbno; | |
117 | xfs_agblock_t start_agbno = 0; | |
118 | xfs_agblock_t end_agbno; | |
119 | xfs_agblock_t max_agbno; | |
120 | xfs_agblock_t cur_agbno; | |
121 | xfs_agblock_t chunk_start_agbno; | |
122 | xfs_agblock_t chunk_stop_agbno; | |
123 | ino_tree_node_t *irec_before_p = NULL; | |
124 | ino_tree_node_t *irec_after_p = NULL; | |
125 | ino_tree_node_t *irec_p; | |
126 | ino_tree_node_t *irec_next_p; | |
127 | int irec_cnt; | |
128 | int ino_cnt = 0; | |
129 | int num_blks; | |
130 | int i; | |
131 | int j; | |
132 | int state; | |
133 | ||
dfc130f3 RC |
134 | agno = XFS_INO_TO_AGNO(mp, ino); |
135 | agino = XFS_INO_TO_AGINO(mp, ino); | |
2bd0ea18 NS |
136 | agbno = XFS_INO_TO_AGBNO(mp, ino); |
137 | *start_ino = NULLFSINO; | |
138 | ||
139 | ASSERT(XFS_IALLOC_BLOCKS(mp) > 0); | |
140 | ||
141 | if (agno == mp->m_sb.sb_agcount - 1) | |
142 | max_agbno = mp->m_sb.sb_dblocks - | |
143 | (xfs_drfsbno_t) mp->m_sb.sb_agblocks * agno; | |
144 | else | |
145 | max_agbno = mp->m_sb.sb_agblocks; | |
146 | ||
147 | /* | |
148 | * is the inode beyond the end of the AG? | |
149 | */ | |
150 | if (agbno >= max_agbno) | |
151 | return(0); | |
152 | ||
153 | /* | |
154 | * check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK | |
155 | * (multiple chunks per block) | |
156 | */ | |
157 | if (XFS_IALLOC_BLOCKS(mp) == 1) { | |
158 | if (agbno > max_agbno) | |
159 | return(0); | |
160 | ||
161 | if (check_inode_block(mp, ino) == 0) | |
162 | return(0); | |
163 | ||
164 | switch (state = get_agbno_state(mp, agno, agbno)) { | |
165 | case XR_E_INO: | |
507f4e33 NS |
166 | do_warn( |
167 | _("uncertain inode block %d/%d already known\n"), | |
2bd0ea18 NS |
168 | agno, agbno); |
169 | break; | |
170 | case XR_E_UNKNOWN: | |
171 | case XR_E_FREE1: | |
172 | case XR_E_FREE: | |
173 | set_agbno_state(mp, agno, agbno, XR_E_INO); | |
174 | break; | |
175 | case XR_E_MULT: | |
176 | case XR_E_INUSE: | |
177 | case XR_E_INUSE_FS: | |
178 | case XR_E_FS_MAP: | |
179 | /* | |
180 | * if block is already claimed, forget it. | |
181 | */ | |
182 | do_warn( | |
507f4e33 | 183 | _("inode block %d/%d multiply claimed, (state %d)\n"), |
2bd0ea18 NS |
184 | agno, agbno, state); |
185 | set_agbno_state(mp, agno, agbno, XR_E_MULT); | |
186 | return(0); | |
187 | default: | |
507f4e33 NS |
188 | do_warn( |
189 | _("inode block %d/%d bad state, (state %d)\n"), | |
2bd0ea18 NS |
190 | agno, agbno, state); |
191 | set_agbno_state(mp, agno, agbno, XR_E_INO); | |
192 | break; | |
193 | } | |
194 | ||
195 | start_agino = XFS_OFFBNO_TO_AGINO(mp, agbno, 0); | |
196 | *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino); | |
197 | ||
198 | /* | |
199 | * put new inode record(s) into inode tree | |
200 | */ | |
201 | for (j = 0; j < chunks_pblock; j++) { | |
202 | if ((irec_p = find_inode_rec(agno, start_agino)) | |
203 | == NULL) { | |
204 | irec_p = set_inode_free_alloc(agno, | |
205 | start_agino); | |
206 | for (i = 1; i < XFS_INODES_PER_CHUNK; i++) | |
207 | set_inode_free(irec_p, i); | |
208 | } | |
209 | if (start_agino <= agino && agino < | |
210 | start_agino + XFS_INODES_PER_CHUNK) | |
211 | set_inode_used(irec_p, agino - start_agino); | |
212 | ||
213 | start_agino += XFS_INODES_PER_CHUNK; | |
214 | ino_cnt += XFS_INODES_PER_CHUNK; | |
215 | } | |
216 | ||
217 | return(ino_cnt); | |
218 | } else if (fs_aligned_inodes) { | |
219 | /* | |
220 | * next easy case -- aligned inode filesystem. | |
221 | * just check out the chunk | |
222 | */ | |
223 | start_agbno = rounddown(XFS_INO_TO_AGBNO(mp, ino), | |
224 | fs_ino_alignment); | |
225 | end_agbno = start_agbno + XFS_IALLOC_BLOCKS(mp); | |
226 | ||
227 | /* | |
228 | * if this fs has aligned inodes but the end of the | |
229 | * chunk is beyond the end of the ag, this is a bad | |
230 | * chunk | |
231 | */ | |
232 | if (end_agbno > max_agbno) | |
233 | return(0); | |
234 | ||
235 | /* | |
236 | * check out all blocks in chunk | |
237 | */ | |
238 | ino_cnt = 0; | |
239 | for (cur_agbno = start_agbno; cur_agbno < end_agbno; | |
240 | cur_agbno++) { | |
241 | ino_cnt += check_aginode_block(mp, agno, cur_agbno); | |
242 | } | |
243 | ||
244 | /* | |
245 | * if we lose either 2 blocks worth of inodes or >25% of | |
246 | * the chunk, just forget it. | |
247 | */ | |
248 | if (ino_cnt < XFS_INODES_PER_CHUNK - 2 * mp->m_sb.sb_inopblock | |
249 | || ino_cnt < XFS_INODES_PER_CHUNK - 16) | |
250 | return(0); | |
251 | ||
252 | /* | |
2d9475a4 | 253 | * ok, put the record into the tree, if no conflict. |
2bd0ea18 | 254 | */ |
2d9475a4 NS |
255 | if (find_uncertain_inode_rec(agno, |
256 | XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0))) | |
257 | return(0); | |
258 | ||
2bd0ea18 NS |
259 | start_agino = XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0); |
260 | *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino); | |
261 | ||
262 | irec_p = set_inode_free_alloc(agno, | |
263 | XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0)); | |
264 | ||
265 | for (i = 1; i < XFS_INODES_PER_CHUNK; i++) | |
266 | set_inode_free(irec_p, i); | |
267 | ||
268 | ASSERT(start_agino <= agino && | |
269 | start_agino + XFS_INODES_PER_CHUNK > agino); | |
270 | ||
271 | set_inode_used(irec_p, agino - start_agino); | |
272 | ||
273 | return(XFS_INODES_PER_CHUNK); | |
274 | } | |
275 | ||
276 | /* | |
277 | * hard case -- pre-6.3 filesystem. | |
278 | * set default start/end agbnos and ensure agbnos are legal. | |
279 | * we're setting a range [start_agbno, end_agbno) such that | |
280 | * a discovered inode chunk completely within that range | |
281 | * would include the inode passed into us. | |
282 | */ | |
283 | if (XFS_IALLOC_BLOCKS(mp) > 1) { | |
284 | if (agino > XFS_IALLOC_INODES(mp)) | |
285 | start_agbno = agbno - XFS_IALLOC_BLOCKS(mp) + 1; | |
286 | else | |
287 | start_agbno = 1; | |
288 | } | |
289 | ||
290 | end_agbno = agbno + XFS_IALLOC_BLOCKS(mp); | |
291 | ||
292 | if (end_agbno > max_agbno) | |
293 | end_agbno = max_agbno; | |
294 | ||
295 | /* | |
296 | * search tree for known inodes within +/- 1 inode chunk range | |
297 | */ | |
298 | irec_before_p = irec_after_p = NULL; | |
299 | ||
300 | find_inode_rec_range(agno, XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0), | |
301 | XFS_OFFBNO_TO_AGINO(mp, end_agbno, mp->m_sb.sb_inopblock - 1), | |
302 | &irec_before_p, &irec_after_p); | |
303 | ||
304 | /* | |
305 | * if we have known inode chunks in our search range, establish | |
306 | * their start and end-points to tighten our search range. range | |
307 | * is [start, end) -- e.g. max/end agbno is one beyond the | |
308 | * last block to be examined. the avl routines work this way. | |
309 | */ | |
310 | if (irec_before_p) { | |
311 | /* | |
312 | * only one inode record in the range, move one boundary in | |
313 | */ | |
314 | if (irec_before_p == irec_after_p) { | |
315 | if (irec_before_p->ino_startnum < agino) | |
316 | start_agbno = XFS_AGINO_TO_AGBNO(mp, | |
317 | irec_before_p->ino_startnum + | |
318 | XFS_INODES_PER_CHUNK); | |
319 | else | |
320 | end_agbno = XFS_AGINO_TO_AGBNO(mp, | |
321 | irec_before_p->ino_startnum); | |
322 | } | |
323 | ||
324 | /* | |
325 | * find the start of the gap in the search range (which | |
326 | * should contain our unknown inode). if the only irec | |
327 | * within +/- 1 chunks starts after the inode we're | |
328 | * looking for, skip this stuff since the end_agbno | |
329 | * of the range has already been trimmed in to not | |
330 | * include that irec. | |
331 | */ | |
332 | if (irec_before_p->ino_startnum < agino) { | |
333 | irec_p = irec_before_p; | |
334 | irec_next_p = next_ino_rec(irec_p); | |
335 | ||
336 | while(irec_next_p != NULL && | |
337 | irec_p->ino_startnum + XFS_INODES_PER_CHUNK == | |
338 | irec_next_p->ino_startnum) { | |
339 | irec_p = irec_next_p; | |
340 | irec_next_p = next_ino_rec(irec_next_p); | |
341 | } | |
342 | ||
343 | start_agbno = XFS_AGINO_TO_AGBNO(mp, | |
344 | irec_p->ino_startnum) + | |
345 | XFS_IALLOC_BLOCKS(mp); | |
346 | ||
347 | /* | |
348 | * we know that the inode we're trying to verify isn't | |
349 | * in an inode chunk so the next ino_rec marks the end | |
350 | * of the gap -- is it within the search range? | |
351 | */ | |
352 | if (irec_next_p != NULL && | |
353 | agino + XFS_IALLOC_INODES(mp) >= | |
354 | irec_next_p->ino_startnum) | |
355 | end_agbno = XFS_AGINO_TO_AGBNO(mp, | |
356 | irec_next_p->ino_startnum); | |
357 | } | |
358 | ||
359 | ASSERT(start_agbno < end_agbno); | |
360 | } | |
361 | ||
362 | /* | |
363 | * if the gap is too small to contain a chunk, we lose. | |
364 | * this means that inode chunks known to be good surround | |
365 | * the inode in question and that the space between them | |
366 | * is too small for a legal inode chunk | |
367 | */ | |
368 | if (end_agbno - start_agbno < XFS_IALLOC_BLOCKS(mp)) | |
369 | return(0); | |
370 | ||
371 | /* | |
372 | * now grunge around the disk, start at the inode block and | |
373 | * go in each direction until you hit a non-inode block or | |
374 | * run into a range boundary. A non-inode block is block | |
375 | * with *no* good inodes in it. Unfortunately, we can't | |
376 | * co-opt bad blocks into inode chunks (which might take | |
377 | * care of disk blocks that turn into zeroes) because the | |
378 | * filesystem could very well allocate two inode chunks | |
379 | * with a one block file in between and we'd zap the file. | |
380 | * We're better off just losing the rest of the | |
381 | * inode chunk instead. | |
382 | */ | |
383 | for (cur_agbno = agbno; cur_agbno >= start_agbno; cur_agbno--) { | |
384 | /* | |
385 | * if the block has no inodes, it's a bad block so | |
386 | * break out now without decrementing cur_agbno so | |
387 | * chunk start blockno will be set to the last good block | |
388 | */ | |
389 | if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno))) | |
390 | break; | |
391 | ino_cnt += irec_cnt; | |
392 | } | |
393 | ||
394 | chunk_start_agbno = cur_agbno + 1; | |
395 | ||
396 | for (cur_agbno = agbno + 1; cur_agbno < end_agbno; cur_agbno++) { | |
397 | /* | |
398 | * if the block has no inodes, it's a bad block so | |
399 | * break out now without incrementing cur_agbno so | |
400 | * chunk start blockno will be set to the block | |
401 | * immediately after the last good block. | |
402 | */ | |
403 | if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno))) | |
404 | break; | |
405 | ino_cnt += irec_cnt; | |
406 | } | |
407 | ||
408 | chunk_stop_agbno = cur_agbno; | |
409 | ||
410 | num_blks = chunk_stop_agbno - chunk_start_agbno; | |
411 | ||
412 | if (num_blks < XFS_IALLOC_BLOCKS(mp) || ino_cnt == 0) | |
413 | return(0); | |
414 | ||
415 | /* | |
416 | * XXX - later - if the entire range is selected and they're all | |
417 | * good inodes, keep searching in either direction. | |
418 | * until you the range of inodes end, then split into chunks | |
419 | * for now, just take one chunk's worth starting at the lowest | |
420 | * possible point and hopefully we'll pick the rest up later. | |
421 | * | |
422 | * XXX - if we were going to fix up an inode chunk for | |
423 | * any good inodes in the chunk, this is where we would | |
424 | * do it. For now, keep it simple and lose the rest of | |
425 | * the chunk | |
426 | */ | |
427 | ||
428 | if (num_blks % XFS_IALLOC_BLOCKS(mp) != 0) { | |
429 | num_blks = rounddown(num_blks, XFS_IALLOC_BLOCKS(mp)); | |
430 | chunk_stop_agbno = chunk_start_agbno + num_blks; | |
431 | } | |
432 | ||
433 | /* | |
434 | * ok, we've got a candidate inode chunk. now we have to | |
435 | * verify that we aren't trying to use blocks that are already | |
436 | * in use. If so, mark them as multiply claimed since odds | |
437 | * are very low that we found this chunk by stumbling across | |
438 | * user data -- we're probably here as a result of a directory | |
439 | * entry or an iunlinked pointer | |
440 | */ | |
441 | for (j = 0, cur_agbno = chunk_start_agbno; | |
442 | cur_agbno < chunk_stop_agbno; cur_agbno++) { | |
443 | switch (state = get_agbno_state(mp, agno, cur_agbno)) { | |
444 | case XR_E_MULT: | |
445 | case XR_E_INUSE: | |
446 | case XR_E_INUSE_FS: | |
447 | case XR_E_FS_MAP: | |
448 | do_warn( | |
507f4e33 | 449 | _("inode block %d/%d multiply claimed, (state %d)\n"), |
2bd0ea18 NS |
450 | agno, cur_agbno, state); |
451 | set_agbno_state(mp, agno, cur_agbno, XR_E_MULT); | |
452 | j = 1; | |
453 | break; | |
454 | case XR_E_INO: | |
455 | do_error( | |
507f4e33 | 456 | _("uncertain inode block overlap, agbno = %d, ino = %llu\n"), |
2bd0ea18 NS |
457 | agbno, ino); |
458 | break; | |
459 | default: | |
460 | break; | |
461 | } | |
462 | ||
463 | if (j) | |
464 | return(0); | |
465 | } | |
466 | ||
467 | /* | |
468 | * ok, chunk is good. put the record into the tree if required, | |
469 | * and fill in the bitmap. All inodes will be marked as "free" | |
470 | * except for the one that led us to discover the chunk. That's | |
471 | * ok because we'll override the free setting later if the | |
472 | * contents of the inode indicate it's in use. | |
473 | */ | |
474 | start_agino = XFS_OFFBNO_TO_AGINO(mp, chunk_start_agbno, 0); | |
475 | *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino); | |
476 | ||
477 | ASSERT(find_inode_rec(agno, start_agino) == NULL); | |
478 | ||
479 | irec_p = set_inode_free_alloc(agno, start_agino); | |
480 | for (i = 1; i < XFS_INODES_PER_CHUNK; i++) | |
481 | set_inode_free(irec_p, i); | |
482 | ||
483 | ASSERT(start_agino <= agino && | |
484 | start_agino + XFS_INODES_PER_CHUNK > agino); | |
485 | ||
486 | set_inode_used(irec_p, agino - start_agino); | |
487 | ||
488 | for (cur_agbno = chunk_start_agbno; | |
489 | cur_agbno < chunk_stop_agbno; cur_agbno++) { | |
490 | switch (state = get_agbno_state(mp, agno, cur_agbno)) { | |
491 | case XR_E_INO: | |
507f4e33 NS |
492 | do_error( |
493 | _("uncertain inode block %llu already known\n"), | |
2bd0ea18 NS |
494 | XFS_AGB_TO_FSB(mp, agno, cur_agbno)); |
495 | break; | |
496 | case XR_E_UNKNOWN: | |
497 | case XR_E_FREE1: | |
498 | case XR_E_FREE: | |
499 | set_agbno_state(mp, agno, cur_agbno, XR_E_INO); | |
500 | break; | |
501 | case XR_E_MULT: | |
502 | case XR_E_INUSE: | |
503 | case XR_E_INUSE_FS: | |
504 | case XR_E_FS_MAP: | |
505 | do_error( | |
507f4e33 | 506 | _("inode block %d/%d multiply claimed, (state %d)\n"), |
2bd0ea18 NS |
507 | agno, cur_agbno, state); |
508 | break; | |
509 | default: | |
507f4e33 NS |
510 | do_warn( |
511 | _("inode block %d/%d bad state, (state %d)\n"), | |
2bd0ea18 NS |
512 | agno, cur_agbno, state); |
513 | set_agbno_state(mp, agno, cur_agbno, XR_E_INO); | |
514 | break; | |
515 | } | |
516 | } | |
517 | ||
518 | return(ino_cnt); | |
519 | } | |
520 | ||
521 | /* | |
522 | * same as above only for ag inode chunks | |
523 | */ | |
524 | int | |
525 | verify_aginode_chunk(xfs_mount_t *mp, | |
526 | xfs_agnumber_t agno, | |
527 | xfs_agino_t agino, | |
528 | xfs_agino_t *agino_start) | |
529 | { | |
530 | xfs_ino_t ino; | |
531 | int res; | |
532 | ||
533 | res = verify_inode_chunk(mp, XFS_AGINO_TO_INO(mp, agno, agino), &ino); | |
534 | ||
535 | if (res) | |
536 | *agino_start = XFS_INO_TO_AGINO(mp, ino); | |
537 | else | |
538 | *agino_start = NULLAGINO; | |
539 | ||
540 | return(res); | |
541 | } | |
542 | ||
543 | /* | |
544 | * this does the same as the two above only it returns a pointer | |
545 | * to the inode record in the good inode tree | |
546 | */ | |
547 | ino_tree_node_t * | |
548 | verify_aginode_chunk_irec(xfs_mount_t *mp, | |
549 | xfs_agnumber_t agno, | |
550 | xfs_agino_t agino) | |
551 | { | |
552 | xfs_agino_t start_agino; | |
553 | ino_tree_node_t *irec = NULL; | |
554 | ||
555 | if (verify_aginode_chunk(mp, agno, agino, &start_agino)) | |
556 | irec = find_inode_rec(agno, start_agino); | |
557 | ||
558 | return(irec); | |
559 | } | |
560 | ||
561 | ||
562 | ||
563 | /* | |
564 | * processes an inode allocation chunk/block, returns 1 on I/O errors, | |
565 | * 0 otherwise | |
566 | * | |
567 | * *bogus is set to 1 if the entire set of inodes is bad. | |
568 | */ | |
569 | /* ARGSUSED */ | |
570 | int | |
571 | process_inode_chunk(xfs_mount_t *mp, xfs_agnumber_t agno, int num_inos, | |
572 | ino_tree_node_t *first_irec, int ino_discovery, | |
573 | int check_dups, int extra_attr_check, int *bogus) | |
574 | { | |
575 | xfs_ino_t parent; | |
576 | ino_tree_node_t *ino_rec; | |
577 | xfs_buf_t *bp; | |
578 | xfs_dinode_t *dino; | |
579 | int icnt; | |
580 | int status; | |
581 | int is_used; | |
582 | int state; | |
583 | int done; | |
584 | int ino_dirty; | |
585 | int irec_offset; | |
586 | int ibuf_offset; | |
587 | xfs_agino_t agino; | |
588 | xfs_agblock_t agbno; | |
589 | int dirty = 0; | |
590 | int cleared = 0; | |
591 | int isa_dir = 0; | |
592 | ||
593 | ASSERT(first_irec != NULL); | |
594 | ASSERT(XFS_AGINO_TO_OFFSET(mp, first_irec->ino_startnum) == 0); | |
595 | ||
596 | *bogus = 0; | |
597 | ASSERT(XFS_IALLOC_BLOCKS(mp) > 0); | |
598 | ||
599 | /* | |
600 | * get all blocks required to read in this chunk (may wind up | |
601 | * having to process more chunks in a multi-chunk per block fs) | |
602 | */ | |
603 | agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum); | |
604 | ||
605 | bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, agbno), | |
606 | XFS_FSB_TO_BB(mp, XFS_IALLOC_BLOCKS(mp)), 0); | |
607 | if (!bp) { | |
507f4e33 | 608 | do_warn(_("cannot read inode %llu, disk block %lld, cnt %d\n"), |
2bd0ea18 NS |
609 | XFS_AGINO_TO_INO(mp, agno, first_irec->ino_startnum), |
610 | XFS_AGB_TO_DADDR(mp, agno, agbno), | |
611 | (int)XFS_FSB_TO_BB(mp, XFS_IALLOC_BLOCKS(mp))); | |
612 | return(1); | |
613 | } | |
614 | ||
615 | /* | |
616 | * set up first irec | |
617 | */ | |
618 | ino_rec = first_irec; | |
619 | /* | |
620 | * initialize counters | |
621 | */ | |
622 | irec_offset = 0; | |
623 | ibuf_offset = 0; | |
624 | icnt = 0; | |
625 | status = 0; | |
626 | done = 0; | |
627 | ||
628 | /* | |
629 | * verify inode chunk if necessary | |
630 | */ | |
631 | if (ino_discovery) { | |
632 | while (!done) { | |
633 | /* | |
634 | * make inode pointer | |
635 | */ | |
636 | dino = XFS_MAKE_IPTR(mp, bp, icnt); | |
637 | agino = irec_offset + ino_rec->ino_startnum; | |
638 | ||
639 | /* | |
640 | * we always think that the root and realtime | |
641 | * inodes are verified even though we may have | |
642 | * to reset them later to keep from losing the | |
643 | * chunk that they're in | |
644 | */ | |
645 | if (verify_dinode(mp, dino, agno, agino) == 0 || | |
27527004 | 646 | (agno == 0 && |
2bd0ea18 NS |
647 | (mp->m_sb.sb_rootino == agino || |
648 | mp->m_sb.sb_rsumino == agino || | |
27527004 | 649 | mp->m_sb.sb_rbmino == agino))) |
2bd0ea18 NS |
650 | status++; |
651 | ||
652 | irec_offset++; | |
653 | icnt++; | |
654 | ||
655 | if (icnt == XFS_IALLOC_INODES(mp) && | |
656 | irec_offset == XFS_INODES_PER_CHUNK) { | |
657 | /* | |
658 | * done! - finished up irec and block | |
659 | * simultaneously | |
660 | */ | |
661 | libxfs_putbuf(bp); | |
662 | done = 1; | |
663 | break; | |
664 | } else if (irec_offset == XFS_INODES_PER_CHUNK) { | |
665 | /* | |
666 | * get new irec (multiple chunks per block fs) | |
667 | */ | |
668 | ino_rec = next_ino_rec(ino_rec); | |
669 | ASSERT(ino_rec->ino_startnum == agino + 1); | |
670 | irec_offset = 0; | |
671 | } | |
672 | } | |
673 | ||
674 | /* | |
675 | * if chunk/block is bad, blow it off. the inode records | |
676 | * will be deleted by the caller if appropriate. | |
677 | */ | |
678 | if (!status) { | |
679 | *bogus = 1; | |
680 | if (!done) /* already free'd */ | |
681 | libxfs_putbuf(bp); | |
682 | return(0); | |
683 | } | |
684 | ||
685 | /* | |
686 | * reset irec and counters | |
687 | */ | |
688 | ino_rec = first_irec; | |
689 | ||
690 | irec_offset = 0; | |
691 | ibuf_offset = 0; | |
692 | icnt = 0; | |
693 | status = 0; | |
694 | done = 0; | |
695 | ||
696 | /* nathans TODO ... memory leak here?: */ | |
697 | ||
698 | /* | |
699 | * get first block | |
700 | */ | |
701 | bp = libxfs_readbuf(mp->m_dev, | |
702 | XFS_AGB_TO_DADDR(mp, agno, agbno), | |
703 | XFS_FSB_TO_BB(mp, XFS_IALLOC_BLOCKS(mp)), 0); | |
704 | if (!bp) { | |
507f4e33 NS |
705 | do_warn(_("can't read inode %llu, disk block %lld, " |
706 | "cnt %d\n"), XFS_AGINO_TO_INO(mp, agno, agino), | |
2bd0ea18 NS |
707 | XFS_AGB_TO_DADDR(mp, agno, agbno), |
708 | (int)XFS_FSB_TO_BB(mp, XFS_IALLOC_BLOCKS(mp))); | |
709 | return(1); | |
710 | } | |
711 | } | |
712 | ||
713 | /* | |
714 | * mark block as an inode block in the incore bitmap | |
715 | */ | |
716 | switch (state = get_agbno_state(mp, agno, agbno)) { | |
717 | case XR_E_INO: /* already marked */ | |
718 | break; | |
719 | case XR_E_UNKNOWN: | |
720 | case XR_E_FREE: | |
721 | case XR_E_FREE1: | |
722 | set_agbno_state(mp, agno, agbno, XR_E_INO); | |
723 | break; | |
724 | case XR_E_BAD_STATE: | |
507f4e33 | 725 | do_error(_("bad state in block map %d\n"), state); |
2bd0ea18 NS |
726 | break; |
727 | default: | |
728 | set_agbno_state(mp, agno, agbno, XR_E_MULT); | |
507f4e33 | 729 | do_warn(_("inode block %llu multiply claimed, state was %d\n"), |
2bd0ea18 NS |
730 | XFS_AGB_TO_FSB(mp, agno, agbno), state); |
731 | break; | |
732 | } | |
733 | ||
734 | while (!done) { | |
735 | /* | |
736 | * make inode pointer | |
737 | */ | |
738 | dino = XFS_MAKE_IPTR(mp, bp, icnt); | |
739 | agino = irec_offset + ino_rec->ino_startnum; | |
740 | ||
741 | is_used = 3; | |
742 | ino_dirty = 0; | |
743 | parent = 0; | |
744 | ||
745 | status = process_dinode(mp, dino, agno, agino, | |
746 | is_inode_free(ino_rec, irec_offset), | |
747 | &ino_dirty, &cleared, &is_used, | |
748 | ino_discovery, check_dups, | |
749 | extra_attr_check, &isa_dir, &parent); | |
750 | ||
751 | ASSERT(is_used != 3); | |
752 | if (ino_dirty) | |
753 | dirty = 1; | |
754 | /* | |
755 | * XXX - if we want to try and keep | |
756 | * track of whether we need to bang on | |
757 | * the inode maps (instead of just | |
758 | * blindly reconstructing them like | |
759 | * we do now, this is where to start. | |
760 | */ | |
761 | if (is_used) { | |
762 | if (is_inode_free(ino_rec, irec_offset)) { | |
763 | if (verbose || no_modify || | |
764 | XFS_AGINO_TO_INO(mp, agno, agino) != | |
765 | old_orphanage_ino) { | |
507f4e33 NS |
766 | do_warn(_("imap claims in-use inode " |
767 | "%llu is free, "), | |
2bd0ea18 NS |
768 | XFS_AGINO_TO_INO(mp, agno, |
769 | agino)); | |
770 | } | |
771 | ||
772 | if (verbose || (!no_modify && | |
773 | XFS_AGINO_TO_INO(mp, agno, agino) != | |
774 | old_orphanage_ino)) | |
507f4e33 | 775 | do_warn(_("correcting imap\n")); |
2bd0ea18 | 776 | else |
507f4e33 | 777 | do_warn(_("would correct imap\n")); |
2bd0ea18 NS |
778 | } |
779 | set_inode_used(ino_rec, irec_offset); | |
780 | } else { | |
781 | set_inode_free(ino_rec, irec_offset); | |
782 | } | |
783 | ||
784 | /* | |
785 | * if we lose the root inode, or it turns into | |
786 | * a non-directory, that allows us to double-check | |
787 | * later whether or not we need to reinitialize it. | |
788 | */ | |
789 | if (isa_dir) { | |
790 | set_inode_isadir(ino_rec, irec_offset); | |
791 | /* | |
792 | * we always set the parent but | |
793 | * we may as well wait until | |
794 | * phase 4 (no inode discovery) | |
795 | * because the parent info will | |
796 | * be solid then. | |
797 | */ | |
798 | if (!ino_discovery) { | |
799 | ASSERT(parent != 0); | |
800 | set_inode_parent(ino_rec, irec_offset, parent); | |
801 | ASSERT(parent == | |
802 | get_inode_parent(ino_rec, irec_offset)); | |
803 | } | |
804 | } else { | |
805 | clear_inode_isadir(ino_rec, irec_offset); | |
806 | } | |
807 | ||
808 | if (status) { | |
809 | if (mp->m_sb.sb_rootino == | |
810 | XFS_AGINO_TO_INO(mp, agno, agino)) { | |
811 | need_root_inode = 1; | |
812 | ||
813 | if (!no_modify) { | |
507f4e33 | 814 | do_warn(_("cleared root inode %llu\n"), |
2bd0ea18 NS |
815 | XFS_AGINO_TO_INO(mp, agno, |
816 | agino)); | |
817 | } else { | |
507f4e33 | 818 | do_warn(_("would clear root inode %llu\n"), |
2bd0ea18 NS |
819 | XFS_AGINO_TO_INO(mp, agno, |
820 | agino)); | |
821 | } | |
822 | } else if (mp->m_sb.sb_rbmino == | |
823 | XFS_AGINO_TO_INO(mp, agno, agino)) { | |
824 | need_rbmino = 1; | |
825 | ||
826 | if (!no_modify) { | |
507f4e33 NS |
827 | do_warn(_("cleared realtime bitmap " |
828 | "inode %llu\n"), | |
2bd0ea18 NS |
829 | XFS_AGINO_TO_INO(mp, agno, |
830 | agino)); | |
831 | } else { | |
507f4e33 NS |
832 | do_warn(_("would clear realtime bitmap " |
833 | "inode %llu\n"), | |
2bd0ea18 NS |
834 | XFS_AGINO_TO_INO(mp, agno, |
835 | agino)); | |
836 | } | |
837 | } else if (mp->m_sb.sb_rsumino == | |
838 | XFS_AGINO_TO_INO(mp, agno, agino)) { | |
839 | need_rsumino = 1; | |
840 | ||
841 | if (!no_modify) { | |
507f4e33 NS |
842 | do_warn(_("cleared realtime summary " |
843 | "inode %llu\n"), | |
2bd0ea18 NS |
844 | XFS_AGINO_TO_INO(mp, agno, |
845 | agino)); | |
846 | } else { | |
507f4e33 NS |
847 | do_warn(_("would clear realtime summary" |
848 | " inode %llu\n"), | |
2bd0ea18 NS |
849 | XFS_AGINO_TO_INO(mp, agno, |
850 | agino)); | |
851 | } | |
852 | } else if (!no_modify) { | |
507f4e33 | 853 | do_warn(_("cleared inode %llu\n"), |
2bd0ea18 NS |
854 | XFS_AGINO_TO_INO(mp, agno, agino)); |
855 | } else { | |
507f4e33 | 856 | do_warn(_("would have cleared inode %llu\n"), |
2bd0ea18 NS |
857 | XFS_AGINO_TO_INO(mp, agno, agino)); |
858 | } | |
859 | } | |
860 | ||
861 | irec_offset++; | |
862 | ibuf_offset++; | |
863 | icnt++; | |
864 | ||
865 | if (icnt == XFS_IALLOC_INODES(mp) && | |
866 | irec_offset == XFS_INODES_PER_CHUNK) { | |
867 | /* | |
868 | * done! - finished up irec and block simultaneously | |
869 | */ | |
870 | if (dirty && !no_modify) | |
871 | libxfs_writebuf(bp, 0); | |
872 | else | |
873 | libxfs_putbuf(bp); | |
874 | ||
875 | done = 1; | |
876 | break; | |
877 | } else if (ibuf_offset == mp->m_sb.sb_inopblock) { | |
878 | /* | |
879 | * mark block as an inode block in the incore bitmap | |
880 | * and reset inode buffer offset counter | |
881 | */ | |
882 | ibuf_offset = 0; | |
883 | agbno++; | |
884 | ||
885 | switch (state = get_agbno_state(mp, agno, agbno)) { | |
886 | case XR_E_INO: /* already marked */ | |
887 | break; | |
888 | case XR_E_UNKNOWN: | |
889 | case XR_E_FREE: | |
890 | case XR_E_FREE1: | |
891 | set_agbno_state(mp, agno, agbno, XR_E_INO); | |
892 | break; | |
893 | case XR_E_BAD_STATE: | |
507f4e33 | 894 | do_error(_("bad state in block map %d\n"), |
2bd0ea18 NS |
895 | state); |
896 | break; | |
897 | default: | |
898 | set_agbno_state(mp, agno, agbno, XR_E_MULT); | |
507f4e33 NS |
899 | do_warn(_("inode block %llu multiply claimed, " |
900 | "state was %d\n"), | |
2bd0ea18 NS |
901 | XFS_AGB_TO_FSB(mp, agno, agbno), state); |
902 | break; | |
903 | } | |
904 | ||
905 | } else if (irec_offset == XFS_INODES_PER_CHUNK) { | |
906 | /* | |
907 | * get new irec (multiple chunks per block fs) | |
908 | */ | |
909 | ino_rec = next_ino_rec(ino_rec); | |
910 | ASSERT(ino_rec->ino_startnum == agino + 1); | |
911 | irec_offset = 0; | |
912 | } | |
913 | } | |
914 | return(0); | |
915 | } | |
916 | ||
917 | /* | |
918 | * check all inodes mentioned in the ag's incore inode maps. | |
919 | * the map may be incomplete. If so, we'll catch the missing | |
920 | * inodes (hopefully) when we traverse the directory tree. | |
921 | * check_dirs is set to 1 if directory inodes should be | |
922 | * processed for internal consistency, parent setting and | |
923 | * discovery of unknown inodes. this only happens | |
924 | * in phase 3. check_dups is set to 1 if we're looking for | |
925 | * inodes that reference duplicate blocks so we can trash | |
926 | * the inode right then and there. this is set only in | |
927 | * phase 4 after we've run through and set the bitmap once. | |
928 | */ | |
929 | void | |
930 | process_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno, | |
931 | int ino_discovery, int check_dups, int extra_attr_check) | |
932 | { | |
933 | int num_inos, bogus; | |
934 | ino_tree_node_t *ino_rec, *first_ino_rec, *prev_ino_rec; | |
935 | ||
936 | first_ino_rec = ino_rec = findfirst_inode_rec(agno); | |
937 | while (ino_rec != NULL) { | |
938 | /* | |
939 | * paranoia - step through inode records until we step | |
940 | * through a full allocation of inodes. this could | |
941 | * be an issue in big-block filesystems where a block | |
942 | * can hold more than one inode chunk. make sure to | |
943 | * grab the record corresponding to the beginning of | |
944 | * the next block before we call the processing routines. | |
945 | */ | |
946 | num_inos = XFS_INODES_PER_CHUNK; | |
947 | while (num_inos < XFS_IALLOC_INODES(mp) && ino_rec != NULL) { | |
948 | ASSERT(ino_rec != NULL); | |
949 | /* | |
950 | * inodes chunks will always be aligned and sized | |
951 | * correctly | |
952 | */ | |
953 | if ((ino_rec = next_ino_rec(ino_rec)) != NULL) | |
954 | num_inos += XFS_INODES_PER_CHUNK; | |
955 | } | |
956 | ||
957 | ASSERT(num_inos == XFS_IALLOC_INODES(mp)); | |
958 | ||
959 | if (process_inode_chunk(mp, agno, num_inos, first_ino_rec, | |
960 | ino_discovery, check_dups, extra_attr_check, &bogus)) { | |
961 | /* XXX - i/o error, we've got a problem */ | |
962 | abort(); | |
963 | } | |
964 | ||
965 | if (!bogus) | |
966 | first_ino_rec = ino_rec = next_ino_rec(ino_rec); | |
967 | else { | |
968 | /* | |
969 | * inodes pointed to by this record are | |
970 | * completely bogus, blow the records for | |
971 | * this chunk out. | |
972 | * the inode block(s) will get reclaimed | |
973 | * in phase 4 when the block map is | |
974 | * reconstructed after inodes claiming | |
975 | * duplicate blocks are deleted. | |
976 | */ | |
977 | num_inos = 0; | |
978 | ino_rec = first_ino_rec; | |
979 | while (num_inos < XFS_IALLOC_INODES(mp) && | |
980 | ino_rec != NULL) { | |
981 | prev_ino_rec = ino_rec; | |
982 | ||
983 | if ((ino_rec = next_ino_rec(ino_rec)) != NULL) | |
984 | num_inos += XFS_INODES_PER_CHUNK; | |
985 | ||
986 | get_inode_rec(agno, prev_ino_rec); | |
987 | free_inode_rec(agno, prev_ino_rec); | |
988 | } | |
989 | ||
990 | first_ino_rec = ino_rec; | |
991 | } | |
992 | } | |
993 | } | |
994 | ||
995 | /* | |
996 | * verify the uncertain inode list for an ag. | |
997 | * Good inodes get moved into the good inode tree. | |
998 | * returns 0 if there are no uncertain inode records to | |
999 | * be processed, 1 otherwise. This routine destroys the | |
1000 | * the entire uncertain inode tree for the ag as a side-effect. | |
1001 | */ | |
1002 | void | |
1003 | check_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno) | |
1004 | { | |
1005 | ino_tree_node_t *irec; | |
1006 | ino_tree_node_t *nrec; | |
1007 | xfs_agino_t start; | |
1008 | xfs_agino_t i; | |
1009 | xfs_agino_t agino; | |
1010 | int got_some; | |
1011 | ||
1012 | nrec = NULL; | |
1013 | got_some = 0; | |
1014 | ||
1015 | clear_uncertain_ino_cache(agno); | |
1016 | ||
1017 | if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL) | |
1018 | return; | |
1019 | ||
1020 | /* | |
1021 | * the trick here is to find a contiguous range | |
1022 | * of inodes, make sure that it doesn't overlap | |
1023 | * with a known to exist chunk, and then make | |
1024 | * sure it is a number of entire chunks. | |
1025 | * we check on-disk once we have an idea of what's | |
1026 | * going on just to double-check. | |
1027 | * | |
1028 | * process the uncertain inode record list and look | |
1029 | * on disk to see if the referenced inodes are good | |
1030 | */ | |
1031 | ||
507f4e33 | 1032 | do_warn(_("found inodes not in the inode allocation tree\n")); |
2bd0ea18 NS |
1033 | |
1034 | do { | |
1035 | /* | |
1036 | * check every confirmed (which in this case means | |
1037 | * inode that we really suspect to be an inode) inode | |
1038 | */ | |
1039 | for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { | |
1040 | if (!is_inode_confirmed(irec, i)) | |
1041 | continue; | |
1042 | ||
1043 | agino = i + irec->ino_startnum; | |
1044 | ||
1045 | if (verify_aginum(mp, agno, agino)) | |
1046 | continue; | |
1047 | ||
1048 | if (nrec != NULL && nrec->ino_startnum <= agino && | |
1049 | agino < nrec->ino_startnum + | |
1050 | XFS_INODES_PER_CHUNK) | |
1051 | continue; | |
1052 | ||
1053 | if ((nrec = find_inode_rec(agno, agino)) == NULL) | |
1054 | if (!verify_aginum(mp, agno, agino)) | |
1055 | if (verify_aginode_chunk(mp, agno, | |
1056 | agino, &start)) | |
1057 | got_some = 1; | |
1058 | } | |
1059 | ||
1060 | get_uncertain_inode_rec(agno, irec); | |
1061 | free_inode_rec(agno, irec); | |
1062 | ||
1063 | irec = findfirst_uncertain_inode_rec(agno); | |
1064 | } while (irec != NULL); | |
1065 | ||
1066 | if (got_some) | |
507f4e33 | 1067 | do_warn(_("found inodes not in the inode allocation tree\n")); |
2bd0ea18 NS |
1068 | |
1069 | return; | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * verify and process the uncertain inodes for an ag. | |
1074 | * this is different from check_ in that we can't just | |
1075 | * move the good inodes into the good inode tree and let | |
1076 | * process_aginodes() deal with them because this gets called | |
1077 | * after process_aginodes() has been run on the ag inode tree. | |
1078 | * So we have to process the inodes as well as verify since | |
1079 | * we don't want to rerun process_aginodes() on a tree that has | |
1080 | * mostly been processed. | |
1081 | * | |
1082 | * Note that if this routine does process some inodes, it can | |
1083 | * add uncertain inodes to any ag which would require that | |
1084 | * the routine be called again to process those newly-added | |
1085 | * uncertain inodes. | |
1086 | * | |
1087 | * returns 0 if no inodes were processed and 1 if inodes | |
1088 | * were processed (and it is possible that new uncertain | |
1089 | * inodes were discovered). | |
1090 | * | |
1091 | * as a side-effect, this routine tears down the uncertain | |
1092 | * inode tree for the ag. | |
1093 | */ | |
1094 | int | |
1095 | process_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno) | |
1096 | { | |
1097 | ino_tree_node_t *irec; | |
1098 | ino_tree_node_t *nrec; | |
1099 | xfs_agino_t agino; | |
1100 | int i; | |
1101 | int bogus; | |
1102 | int cnt; | |
1103 | int got_some; | |
1104 | ||
1105 | #ifdef XR_INODE_TRACE | |
1106 | fprintf(stderr, "in process_uncertain_aginodes, agno = %d\n", agno); | |
1107 | #endif | |
1108 | ||
1109 | got_some = 0; | |
1110 | ||
1111 | clear_uncertain_ino_cache(agno); | |
1112 | ||
1113 | if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL) | |
1114 | return(0); | |
1115 | ||
1116 | nrec = NULL; | |
1117 | ||
1118 | do { | |
1119 | /* | |
1120 | * check every confirmed inode | |
1121 | */ | |
1122 | for (cnt = i = 0; i < XFS_INODES_PER_CHUNK; i++) { | |
1123 | if (!is_inode_confirmed(irec, i)) | |
1124 | continue; | |
1125 | cnt++; | |
1126 | agino = i + irec->ino_startnum; | |
1127 | #ifdef XR_INODE_TRACE | |
1128 | fprintf(stderr, "ag inode = %d (0x%x)\n", agino, agino); | |
1129 | #endif | |
1130 | /* | |
1131 | * skip over inodes already processed (in the | |
1132 | * good tree), bad inode numbers, and inode numbers | |
1133 | * pointing to bogus inodes | |
1134 | */ | |
1135 | if (verify_aginum(mp, agno, agino)) | |
1136 | continue; | |
1137 | ||
1138 | if (nrec != NULL && nrec->ino_startnum <= agino && | |
1139 | agino < nrec->ino_startnum + | |
1140 | XFS_INODES_PER_CHUNK) | |
1141 | continue; | |
1142 | ||
1143 | if ((nrec = find_inode_rec(agno, agino)) != NULL) | |
1144 | continue; | |
1145 | ||
1146 | /* | |
1147 | * verify the chunk. if good, it will be | |
1148 | * added to the good inode tree. | |
1149 | */ | |
1150 | if ((nrec = verify_aginode_chunk_irec(mp, | |
1151 | agno, agino)) == NULL) | |
1152 | continue; | |
1153 | ||
1154 | got_some = 1; | |
1155 | ||
1156 | /* | |
1157 | * process the inode record we just added | |
1158 | * to the good inode tree. The inode | |
1159 | * processing may add more records to the | |
1160 | * uncertain inode lists. | |
1161 | */ | |
1162 | if (process_inode_chunk(mp, agno, XFS_IALLOC_INODES(mp), | |
1163 | nrec, 1, 0, 0, &bogus)) { | |
1164 | /* XXX - i/o error, we've got a problem */ | |
1165 | abort(); | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | ASSERT(cnt != 0); | |
1170 | /* | |
1171 | * now return the uncertain inode record to the free pool | |
1172 | * and pull another one off the list for processing | |
1173 | */ | |
1174 | get_uncertain_inode_rec(agno, irec); | |
1175 | free_inode_rec(agno, irec); | |
1176 | ||
1177 | irec = findfirst_uncertain_inode_rec(agno); | |
1178 | } while (irec != NULL); | |
1179 | ||
1180 | if (got_some) | |
507f4e33 | 1181 | do_warn(_("found inodes not in the inode allocation tree\n")); |
2bd0ea18 NS |
1182 | |
1183 | return(1); | |
1184 | } |