]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/dino_chunks.c
libxfs: refactor manage_zones()
[thirdparty/xfsprogs-dev.git] / repair / dino_chunks.c
CommitLineData
959ef981 1// SPDX-License-Identifier: GPL-2.0
2bd0ea18 2/*
da23017d
NS
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
2bd0ea18
NS
5 */
6
6b803e5a 7#include "libxfs.h"
2bd0ea18
NS
8#include "avl.h"
9#include "globals.h"
10#include "agheader.h"
11#include "incore.h"
12#include "protos.h"
13#include "err_protos.h"
2bd0ea18
NS
14#include "dinode.h"
15#include "versions.h"
2556c98b 16#include "prefetch.h"
06fbdda9 17#include "progress.h"
2bd0ea18
NS
18
19/*
20 * validates inode block or chunk, returns # of good inodes
21 * the dinodes are verified using verify_uncertain_dinode() which
22 * means only the basic inode info is checked, no fork checks.
23 */
8b8a6b02 24static int
2bd0ea18
NS
25check_aginode_block(xfs_mount_t *mp,
26 xfs_agnumber_t agno,
27 xfs_agblock_t agbno)
28{
29
30 xfs_dinode_t *dino_p;
dfc130f3
RC
31 int i;
32 int cnt = 0;
2bd0ea18
NS
33 xfs_buf_t *bp;
34
35 /*
36 * it's ok to read these possible inode blocks in one at
37 * a time because they don't belong to known inodes (if
38 * they did, we'd know about them courtesy of the incore inode
39 * tree and we wouldn't be here and we stale the buffers out
40 * so no one else will overlap them.
41 */
42 bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, agbno),
75c8b434 43 XFS_FSB_TO_BB(mp, 1), 0, NULL);
2bd0ea18 44 if (!bp) {
5d1b7f0f
CH
45 do_warn(_("cannot read agbno (%u/%u), disk block %" PRId64 "\n"),
46 agno, agbno, XFS_AGB_TO_DADDR(mp, agno, agbno));
2bd0ea18
NS
47 return(0);
48 }
49
50 for (i = 0; i < mp->m_sb.sb_inopblock; i++) {
56b2de80 51 dino_p = xfs_make_iptr(mp, bp, i);
2bd0ea18
NS
52 if (!verify_uncertain_dinode(mp, dino_p, agno,
53 XFS_OFFBNO_TO_AGINO(mp, agbno, i)))
54 cnt++;
55 }
75c8b434
DC
56 if (cnt)
57 bp->b_ops = &xfs_inode_buf_ops;
2bd0ea18
NS
58
59 libxfs_putbuf(bp);
60 return(cnt);
61}
62
2bd0ea18
NS
63/*
64 * tries to establish if the inode really exists in a valid
65 * inode chunk. returns number of new inodes if things are good
66 * and 0 if bad. start is the start of the discovered inode chunk.
67 * routine assumes that ino is a legal inode number
68 * (verified by verify_inum()). If the inode chunk turns out
69 * to be good, this routine will put the inode chunk into
70 * the good inode chunk tree if required.
71 *
72 * the verify_(ag)inode* family of routines are utility
73 * routines called by check_uncertain_aginodes() and
74 * process_uncertain_aginodes().
75 */
8b8a6b02 76static int
2bd0ea18
NS
77verify_inode_chunk(xfs_mount_t *mp,
78 xfs_ino_t ino,
79 xfs_ino_t *start_ino)
80{
81 xfs_agnumber_t agno;
82 xfs_agino_t agino;
83 xfs_agino_t start_agino;
84 xfs_agblock_t agbno;
85 xfs_agblock_t start_agbno = 0;
86 xfs_agblock_t end_agbno;
87 xfs_agblock_t max_agbno;
88 xfs_agblock_t cur_agbno;
89 xfs_agblock_t chunk_start_agbno;
90 xfs_agblock_t chunk_stop_agbno;
91 ino_tree_node_t *irec_before_p = NULL;
92 ino_tree_node_t *irec_after_p = NULL;
93 ino_tree_node_t *irec_p;
94 ino_tree_node_t *irec_next_p;
95 int irec_cnt;
96 int ino_cnt = 0;
97 int num_blks;
98 int i;
99 int j;
100 int state;
8961bfde 101 xfs_extlen_t blen;
2bd0ea18 102
dfc130f3
RC
103 agno = XFS_INO_TO_AGNO(mp, ino);
104 agino = XFS_INO_TO_AGINO(mp, ino);
2bd0ea18
NS
105 agbno = XFS_INO_TO_AGBNO(mp, ino);
106 *start_ino = NULLFSINO;
107
ff105f75 108 ASSERT(mp->m_ialloc_blks > 0);
2bd0ea18
NS
109
110 if (agno == mp->m_sb.sb_agcount - 1)
111 max_agbno = mp->m_sb.sb_dblocks -
5a35bf2c 112 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno;
2bd0ea18
NS
113 else
114 max_agbno = mp->m_sb.sb_agblocks;
115
116 /*
117 * is the inode beyond the end of the AG?
118 */
119 if (agbno >= max_agbno)
120 return(0);
121
122 /*
123 * check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
124 * (multiple chunks per block)
125 */
ff105f75 126 if (mp->m_ialloc_blks == 1) {
2bd0ea18 127 if (agbno > max_agbno)
0553a94f
CH
128 return 0;
129 if (check_aginode_block(mp, agno, agino) == 0)
130 return 0;
2bd0ea18 131
586f8abf 132 pthread_mutex_lock(&ag_locks[agno].lock);
2556c98b 133
95650c4d
BN
134 state = get_bmap(agno, agbno);
135 switch (state) {
2bd0ea18 136 case XR_E_INO:
507f4e33
NS
137 do_warn(
138 _("uncertain inode block %d/%d already known\n"),
2bd0ea18
NS
139 agno, agbno);
140 break;
141 case XR_E_UNKNOWN:
142 case XR_E_FREE1:
143 case XR_E_FREE:
95650c4d 144 set_bmap(agno, agbno, XR_E_INO);
2bd0ea18
NS
145 break;
146 case XR_E_MULT:
147 case XR_E_INUSE:
148 case XR_E_INUSE_FS:
149 case XR_E_FS_MAP:
150 /*
151 * if block is already claimed, forget it.
152 */
153 do_warn(
507f4e33 154 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18 155 agno, agbno, state);
95650c4d 156 set_bmap(agno, agbno, XR_E_MULT);
586f8abf 157 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
158 return(0);
159 default:
507f4e33
NS
160 do_warn(
161 _("inode block %d/%d bad state, (state %d)\n"),
2bd0ea18 162 agno, agbno, state);
95650c4d 163 set_bmap(agno, agbno, XR_E_INO);
2bd0ea18
NS
164 break;
165 }
166
586f8abf 167 pthread_mutex_unlock(&ag_locks[agno].lock);
2556c98b 168
7516da71 169 start_agino = XFS_AGB_TO_AGINO(mp, agbno);
2bd0ea18
NS
170 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
171
172 /*
173 * put new inode record(s) into inode tree
174 */
175 for (j = 0; j < chunks_pblock; j++) {
1ae311d5 176 if ((irec_p = find_inode_rec(mp, agno, start_agino))
2bd0ea18 177 == NULL) {
1ae311d5 178 irec_p = set_inode_free_alloc(mp, agno,
2bd0ea18
NS
179 start_agino);
180 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
181 set_inode_free(irec_p, i);
182 }
183 if (start_agino <= agino && agino <
184 start_agino + XFS_INODES_PER_CHUNK)
185 set_inode_used(irec_p, agino - start_agino);
186
187 start_agino += XFS_INODES_PER_CHUNK;
188 ino_cnt += XFS_INODES_PER_CHUNK;
189 }
190
191 return(ino_cnt);
192 } else if (fs_aligned_inodes) {
193 /*
194 * next easy case -- aligned inode filesystem.
195 * just check out the chunk
196 */
197 start_agbno = rounddown(XFS_INO_TO_AGBNO(mp, ino),
198 fs_ino_alignment);
ff105f75 199 end_agbno = start_agbno + mp->m_ialloc_blks;
2bd0ea18
NS
200
201 /*
202 * if this fs has aligned inodes but the end of the
203 * chunk is beyond the end of the ag, this is a bad
204 * chunk
205 */
206 if (end_agbno > max_agbno)
207 return(0);
208
209 /*
210 * check out all blocks in chunk
211 */
212 ino_cnt = 0;
213 for (cur_agbno = start_agbno; cur_agbno < end_agbno;
214 cur_agbno++) {
215 ino_cnt += check_aginode_block(mp, agno, cur_agbno);
216 }
217
218 /*
219 * if we lose either 2 blocks worth of inodes or >25% of
220 * the chunk, just forget it.
221 */
222 if (ino_cnt < XFS_INODES_PER_CHUNK - 2 * mp->m_sb.sb_inopblock
223 || ino_cnt < XFS_INODES_PER_CHUNK - 16)
224 return(0);
225
226 /*
2d9475a4 227 * ok, put the record into the tree, if no conflict.
2bd0ea18 228 */
2d9475a4 229 if (find_uncertain_inode_rec(agno,
7516da71 230 XFS_AGB_TO_AGINO(mp, start_agbno)))
2d9475a4
NS
231 return(0);
232
7516da71 233 start_agino = XFS_AGB_TO_AGINO(mp, start_agbno);
2bd0ea18
NS
234 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
235
1ae311d5 236 irec_p = set_inode_free_alloc(mp, agno,
7516da71 237 XFS_AGB_TO_AGINO(mp, start_agbno));
2bd0ea18
NS
238
239 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
240 set_inode_free(irec_p, i);
241
242 ASSERT(start_agino <= agino &&
243 start_agino + XFS_INODES_PER_CHUNK > agino);
244
245 set_inode_used(irec_p, agino - start_agino);
246
247 return(XFS_INODES_PER_CHUNK);
248 }
249
250 /*
251 * hard case -- pre-6.3 filesystem.
252 * set default start/end agbnos and ensure agbnos are legal.
253 * we're setting a range [start_agbno, end_agbno) such that
254 * a discovered inode chunk completely within that range
255 * would include the inode passed into us.
256 */
ff105f75
DC
257 if (mp->m_ialloc_blks > 1) {
258 if (agino > mp->m_ialloc_inos)
259 start_agbno = agbno - mp->m_ialloc_blks + 1;
2bd0ea18
NS
260 else
261 start_agbno = 1;
262 }
263
ff105f75 264 end_agbno = agbno + mp->m_ialloc_blks;
2bd0ea18
NS
265
266 if (end_agbno > max_agbno)
267 end_agbno = max_agbno;
268
269 /*
270 * search tree for known inodes within +/- 1 inode chunk range
271 */
272 irec_before_p = irec_after_p = NULL;
273
7516da71 274 find_inode_rec_range(mp, agno, XFS_AGB_TO_AGINO(mp, start_agbno),
2bd0ea18
NS
275 XFS_OFFBNO_TO_AGINO(mp, end_agbno, mp->m_sb.sb_inopblock - 1),
276 &irec_before_p, &irec_after_p);
277
278 /*
279 * if we have known inode chunks in our search range, establish
280 * their start and end-points to tighten our search range. range
281 * is [start, end) -- e.g. max/end agbno is one beyond the
282 * last block to be examined. the avl routines work this way.
283 */
284 if (irec_before_p) {
285 /*
286 * only one inode record in the range, move one boundary in
287 */
288 if (irec_before_p == irec_after_p) {
289 if (irec_before_p->ino_startnum < agino)
290 start_agbno = XFS_AGINO_TO_AGBNO(mp,
291 irec_before_p->ino_startnum +
292 XFS_INODES_PER_CHUNK);
293 else
294 end_agbno = XFS_AGINO_TO_AGBNO(mp,
295 irec_before_p->ino_startnum);
296 }
297
298 /*
299 * find the start of the gap in the search range (which
300 * should contain our unknown inode). if the only irec
301 * within +/- 1 chunks starts after the inode we're
302 * looking for, skip this stuff since the end_agbno
303 * of the range has already been trimmed in to not
304 * include that irec.
305 */
306 if (irec_before_p->ino_startnum < agino) {
307 irec_p = irec_before_p;
308 irec_next_p = next_ino_rec(irec_p);
309
310 while(irec_next_p != NULL &&
311 irec_p->ino_startnum + XFS_INODES_PER_CHUNK ==
312 irec_next_p->ino_startnum) {
313 irec_p = irec_next_p;
314 irec_next_p = next_ino_rec(irec_next_p);
315 }
316
317 start_agbno = XFS_AGINO_TO_AGBNO(mp,
318 irec_p->ino_startnum) +
ff105f75 319 mp->m_ialloc_blks;
2bd0ea18
NS
320
321 /*
322 * we know that the inode we're trying to verify isn't
323 * in an inode chunk so the next ino_rec marks the end
324 * of the gap -- is it within the search range?
325 */
326 if (irec_next_p != NULL &&
ff105f75 327 agino + mp->m_ialloc_inos >=
2bd0ea18
NS
328 irec_next_p->ino_startnum)
329 end_agbno = XFS_AGINO_TO_AGBNO(mp,
330 irec_next_p->ino_startnum);
331 }
332
333 ASSERT(start_agbno < end_agbno);
334 }
335
336 /*
337 * if the gap is too small to contain a chunk, we lose.
338 * this means that inode chunks known to be good surround
339 * the inode in question and that the space between them
340 * is too small for a legal inode chunk
341 */
ff105f75 342 if (end_agbno - start_agbno < mp->m_ialloc_blks)
2bd0ea18
NS
343 return(0);
344
345 /*
346 * now grunge around the disk, start at the inode block and
347 * go in each direction until you hit a non-inode block or
348 * run into a range boundary. A non-inode block is block
349 * with *no* good inodes in it. Unfortunately, we can't
350 * co-opt bad blocks into inode chunks (which might take
351 * care of disk blocks that turn into zeroes) because the
352 * filesystem could very well allocate two inode chunks
353 * with a one block file in between and we'd zap the file.
354 * We're better off just losing the rest of the
355 * inode chunk instead.
356 */
357 for (cur_agbno = agbno; cur_agbno >= start_agbno; cur_agbno--) {
358 /*
359 * if the block has no inodes, it's a bad block so
360 * break out now without decrementing cur_agbno so
361 * chunk start blockno will be set to the last good block
362 */
363 if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno)))
364 break;
365 ino_cnt += irec_cnt;
366 }
367
368 chunk_start_agbno = cur_agbno + 1;
369
370 for (cur_agbno = agbno + 1; cur_agbno < end_agbno; cur_agbno++) {
371 /*
372 * if the block has no inodes, it's a bad block so
373 * break out now without incrementing cur_agbno so
374 * chunk start blockno will be set to the block
375 * immediately after the last good block.
376 */
377 if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno)))
378 break;
379 ino_cnt += irec_cnt;
380 }
381
382 chunk_stop_agbno = cur_agbno;
383
384 num_blks = chunk_stop_agbno - chunk_start_agbno;
385
ff105f75
DC
386 if (num_blks < mp->m_ialloc_blks || ino_cnt == 0)
387 return 0;
2bd0ea18
NS
388
389 /*
390 * XXX - later - if the entire range is selected and they're all
391 * good inodes, keep searching in either direction.
392 * until you the range of inodes end, then split into chunks
393 * for now, just take one chunk's worth starting at the lowest
394 * possible point and hopefully we'll pick the rest up later.
395 *
396 * XXX - if we were going to fix up an inode chunk for
397 * any good inodes in the chunk, this is where we would
398 * do it. For now, keep it simple and lose the rest of
399 * the chunk
400 */
401
ff105f75
DC
402 if (num_blks % mp->m_ialloc_blks != 0) {
403 num_blks = rounddown(num_blks, mp->m_ialloc_blks);
2bd0ea18
NS
404 chunk_stop_agbno = chunk_start_agbno + num_blks;
405 }
406
407 /*
408 * ok, we've got a candidate inode chunk. now we have to
409 * verify that we aren't trying to use blocks that are already
410 * in use. If so, mark them as multiply claimed since odds
411 * are very low that we found this chunk by stumbling across
412 * user data -- we're probably here as a result of a directory
413 * entry or an iunlinked pointer
414 */
586f8abf 415 pthread_mutex_lock(&ag_locks[agno].lock);
8961bfde
BN
416 for (cur_agbno = chunk_start_agbno;
417 cur_agbno < chunk_stop_agbno;
418 cur_agbno += blen) {
419 state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
95650c4d 420 switch (state) {
2bd0ea18
NS
421 case XR_E_MULT:
422 case XR_E_INUSE:
423 case XR_E_INUSE_FS:
424 case XR_E_FS_MAP:
425 do_warn(
5d1b7f0f 426 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18 427 agno, cur_agbno, state);
8961bfde 428 set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
586f8abf 429 pthread_mutex_unlock(&ag_locks[agno].lock);
8961bfde 430 return 0;
2bd0ea18
NS
431 case XR_E_INO:
432 do_error(
5d1b7f0f 433 _("uncertain inode block overlap, agbno = %d, ino = %" PRIu64 "\n"),
2bd0ea18
NS
434 agbno, ino);
435 break;
436 default:
437 break;
438 }
2bd0ea18 439 }
586f8abf 440 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
441
442 /*
443 * ok, chunk is good. put the record into the tree if required,
444 * and fill in the bitmap. All inodes will be marked as "free"
445 * except for the one that led us to discover the chunk. That's
446 * ok because we'll override the free setting later if the
447 * contents of the inode indicate it's in use.
448 */
7516da71 449 start_agino = XFS_AGB_TO_AGINO(mp, chunk_start_agbno);
2bd0ea18
NS
450 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
451
1ae311d5 452 ASSERT(find_inode_rec(mp, agno, start_agino) == NULL);
2bd0ea18 453
1ae311d5 454 irec_p = set_inode_free_alloc(mp, agno, start_agino);
2bd0ea18
NS
455 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
456 set_inode_free(irec_p, i);
457
458 ASSERT(start_agino <= agino &&
459 start_agino + XFS_INODES_PER_CHUNK > agino);
460
461 set_inode_used(irec_p, agino - start_agino);
462
586f8abf 463 pthread_mutex_lock(&ag_locks[agno].lock);
2556c98b 464
2bd0ea18 465 for (cur_agbno = chunk_start_agbno;
8961bfde
BN
466 cur_agbno < chunk_stop_agbno;
467 cur_agbno += blen) {
468 state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
95650c4d 469 switch (state) {
2bd0ea18 470 case XR_E_INO:
507f4e33 471 do_error(
5d1b7f0f 472 _("uncertain inode block %" PRIu64 " already known\n"),
2bd0ea18
NS
473 XFS_AGB_TO_FSB(mp, agno, cur_agbno));
474 break;
475 case XR_E_UNKNOWN:
476 case XR_E_FREE1:
477 case XR_E_FREE:
8961bfde 478 set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
2bd0ea18
NS
479 break;
480 case XR_E_MULT:
481 case XR_E_INUSE:
482 case XR_E_INUSE_FS:
483 case XR_E_FS_MAP:
484 do_error(
507f4e33 485 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18
NS
486 agno, cur_agbno, state);
487 break;
488 default:
507f4e33
NS
489 do_warn(
490 _("inode block %d/%d bad state, (state %d)\n"),
2bd0ea18 491 agno, cur_agbno, state);
8961bfde 492 set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
2bd0ea18
NS
493 break;
494 }
495 }
586f8abf 496 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
497
498 return(ino_cnt);
499}
500
501/*
502 * same as above only for ag inode chunks
503 */
8b8a6b02 504static int
2bd0ea18
NS
505verify_aginode_chunk(xfs_mount_t *mp,
506 xfs_agnumber_t agno,
507 xfs_agino_t agino,
508 xfs_agino_t *agino_start)
509{
510 xfs_ino_t ino;
511 int res;
512
513 res = verify_inode_chunk(mp, XFS_AGINO_TO_INO(mp, agno, agino), &ino);
514
515 if (res)
516 *agino_start = XFS_INO_TO_AGINO(mp, ino);
517 else
518 *agino_start = NULLAGINO;
519
520 return(res);
521}
522
523/*
524 * this does the same as the two above only it returns a pointer
525 * to the inode record in the good inode tree
526 */
8b8a6b02 527static ino_tree_node_t *
2bd0ea18
NS
528verify_aginode_chunk_irec(xfs_mount_t *mp,
529 xfs_agnumber_t agno,
530 xfs_agino_t agino)
531{
532 xfs_agino_t start_agino;
533 ino_tree_node_t *irec = NULL;
534
535 if (verify_aginode_chunk(mp, agno, agino, &start_agino))
1ae311d5 536 irec = find_inode_rec(mp, agno, start_agino);
2bd0ea18
NS
537
538 return(irec);
539}
540
618ba571
BF
541/*
542 * Set the state of an inode block during inode chunk processing. The block is
543 * expected to be in the free or inode state. If free, it transitions to the
544 * inode state. Warn if the block is in neither expected state as this indicates
545 * multiply claimed blocks.
546 */
547static void
548process_inode_agbno_state(
549 struct xfs_mount *mp,
550 xfs_agnumber_t agno,
551 xfs_agblock_t agbno)
552{
553 int state;
2bd0ea18 554
618ba571
BF
555 pthread_mutex_lock(&ag_locks[agno].lock);
556 state = get_bmap(agno, agbno);
557 switch (state) {
558 case XR_E_INO: /* already marked */
559 break;
560 case XR_E_UNKNOWN:
561 case XR_E_FREE:
562 case XR_E_FREE1:
563 set_bmap(agno, agbno, XR_E_INO);
564 break;
565 case XR_E_BAD_STATE:
566 do_error(_("bad state in block map %d\n"), state);
567 break;
568 default:
569 set_bmap(agno, agbno, XR_E_MULT);
570 do_warn(
571 _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
572 XFS_AGB_TO_FSB(mp, agno, agbno), state);
573 break;
574 }
575 pthread_mutex_unlock(&ag_locks[agno].lock);
576}
2bd0ea18
NS
577
578/*
579 * processes an inode allocation chunk/block, returns 1 on I/O errors,
580 * 0 otherwise
581 *
582 * *bogus is set to 1 if the entire set of inodes is bad.
583 */
2556c98b
BN
584static int
585process_inode_chunk(
586 xfs_mount_t *mp,
587 xfs_agnumber_t agno,
588 int num_inos,
589 ino_tree_node_t *first_irec,
590 int ino_discovery,
591 int check_dups,
592 int extra_attr_check,
593 int *bogus)
2bd0ea18
NS
594{
595 xfs_ino_t parent;
596 ino_tree_node_t *ino_rec;
2556c98b 597 xfs_buf_t **bplist;
2bd0ea18
NS
598 xfs_dinode_t *dino;
599 int icnt;
600 int status;
601 int is_used;
2bd0ea18
NS
602 int ino_dirty;
603 int irec_offset;
604 int ibuf_offset;
605 xfs_agino_t agino;
606 xfs_agblock_t agbno;
5d1b7f0f 607 xfs_ino_t ino;
2bd0ea18 608 int dirty = 0;
2bd0ea18 609 int isa_dir = 0;
2556c98b
BN
610 int blks_per_cluster;
611 int cluster_count;
612 int bp_index;
613 int cluster_offset;
2bd0ea18
NS
614
615 ASSERT(first_irec != NULL);
616 ASSERT(XFS_AGINO_TO_OFFSET(mp, first_irec->ino_startnum) == 0);
617
618 *bogus = 0;
ff105f75 619 ASSERT(mp->m_ialloc_blks > 0);
2bd0ea18 620
ff105f75 621 blks_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
2556c98b
BN
622 if (blks_per_cluster == 0)
623 blks_per_cluster = 1;
624 cluster_count = XFS_INODES_PER_CHUNK / inodes_per_cluster;
bc828eed
BN
625 if (cluster_count == 0)
626 cluster_count = 1;
2556c98b 627
2bd0ea18
NS
628 /*
629 * get all blocks required to read in this chunk (may wind up
630 * having to process more chunks in a multi-chunk per block fs)
631 */
632 agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
633
2bd0ea18
NS
634 /*
635 * set up first irec
636 */
637 ino_rec = first_irec;
3e62b2c0 638 irec_offset = 0;
2556c98b
BN
639
640 bplist = malloc(cluster_count * sizeof(xfs_buf_t *));
641 if (bplist == NULL)
5d1b7f0f
CH
642 do_error(_("failed to allocate %zd bytes of memory\n"),
643 cluster_count * sizeof(xfs_buf_t *));
2556c98b
BN
644
645 for (bp_index = 0; bp_index < cluster_count; bp_index++) {
3e62b2c0
BF
646 /*
647 * Skip the cluster buffer if the first inode is sparse. The
648 * remaining inodes in the cluster share the same state as
649 * sparse inodes occur at cluster granularity.
650 */
651 if (is_inode_sparse(ino_rec, irec_offset)) {
652 pftrace("skip sparse inode, startnum 0x%x idx %d",
653 ino_rec->ino_startnum, irec_offset);
654 bplist[bp_index] = NULL;
655 goto next_readbuf;
656 }
657
2556c98b 658 pftrace("about to read off %llu in AG %d",
5d1b7f0f 659 XFS_AGB_TO_DADDR(mp, agno, agbno), agno);
4c0a98ae 660
2556c98b
BN
661 bplist[bp_index] = libxfs_readbuf(mp->m_dev,
662 XFS_AGB_TO_DADDR(mp, agno, agbno),
75c8b434 663 XFS_FSB_TO_BB(mp, blks_per_cluster), 0,
e0607266 664 &xfs_inode_buf_ops);
2556c98b 665 if (!bplist[bp_index]) {
5d1b7f0f 666 do_warn(_("cannot read inode %" PRIu64 ", disk block %" PRId64 ", cnt %d\n"),
2556c98b
BN
667 XFS_AGINO_TO_INO(mp, agno, first_irec->ino_startnum),
668 XFS_AGB_TO_DADDR(mp, agno, agbno),
5d1b7f0f 669 XFS_FSB_TO_BB(mp, blks_per_cluster));
2556c98b
BN
670 while (bp_index > 0) {
671 bp_index--;
672 libxfs_putbuf(bplist[bp_index]);
673 }
674 free(bplist);
675 return(1);
676 }
2556c98b 677
2556c98b
BN
678 pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
679 (long long)XFS_BUF_ADDR(bplist[bp_index]),
135e4bfe 680 bplist[bp_index]->b_bcount, agno);
3e62b2c0
BF
681
682 bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
683
684next_readbuf:
685 irec_offset += mp->m_sb.sb_inopblock * blks_per_cluster;
686 agbno += blks_per_cluster;
2556c98b
BN
687 }
688 agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
689
2bd0ea18
NS
690 /*
691 * initialize counters
692 */
693 irec_offset = 0;
694 ibuf_offset = 0;
2556c98b 695 cluster_offset = 0;
2bd0ea18
NS
696 icnt = 0;
697 status = 0;
2556c98b 698 bp_index = 0;
2bd0ea18
NS
699
700 /*
701 * verify inode chunk if necessary
702 */
703 if (ino_discovery) {
2556c98b 704 for (;;) {
2bd0ea18
NS
705 agino = irec_offset + ino_rec->ino_startnum;
706
3e62b2c0
BF
707 /* no buffers for sparse clusters */
708 if (bplist[bp_index]) {
709 /* make inode pointer */
710 dino = xfs_make_iptr(mp, bplist[bp_index],
711 cluster_offset);
712
713 /*
714 * we always think that the root and realtime
715 * inodes are verified even though we may have
716 * to reset them later to keep from losing the
717 * chunk that they're in
718 */
719 if (verify_dinode(mp, dino, agno, agino) == 0 ||
720 (agno == 0 &&
721 (mp->m_sb.sb_rootino == agino ||
722 mp->m_sb.sb_rsumino == agino ||
723 mp->m_sb.sb_rbmino == agino)))
724 status++;
725 }
2bd0ea18
NS
726
727 irec_offset++;
728 icnt++;
2556c98b 729 cluster_offset++;
2bd0ea18 730
ff105f75 731 if (icnt == mp->m_ialloc_inos &&
2bd0ea18
NS
732 irec_offset == XFS_INODES_PER_CHUNK) {
733 /*
734 * done! - finished up irec and block
735 * simultaneously
736 */
2bd0ea18
NS
737 break;
738 } else if (irec_offset == XFS_INODES_PER_CHUNK) {
739 /*
740 * get new irec (multiple chunks per block fs)
741 */
742 ino_rec = next_ino_rec(ino_rec);
743 ASSERT(ino_rec->ino_startnum == agino + 1);
744 irec_offset = 0;
745 }
2556c98b
BN
746 if (cluster_offset == inodes_per_cluster) {
747 bp_index++;
748 cluster_offset = 0;
749 }
2bd0ea18
NS
750 }
751
752 /*
753 * if chunk/block is bad, blow it off. the inode records
754 * will be deleted by the caller if appropriate.
755 */
756 if (!status) {
757 *bogus = 1;
2556c98b 758 for (bp_index = 0; bp_index < cluster_count; bp_index++)
3e62b2c0
BF
759 if (bplist[bp_index])
760 libxfs_putbuf(bplist[bp_index]);
2556c98b 761 free(bplist);
2bd0ea18
NS
762 return(0);
763 }
764
765 /*
766 * reset irec and counters
767 */
768 ino_rec = first_irec;
769
770 irec_offset = 0;
2556c98b
BN
771 cluster_offset = 0;
772 bp_index = 0;
2bd0ea18
NS
773 icnt = 0;
774 status = 0;
2bd0ea18
NS
775 }
776
777 /*
778 * mark block as an inode block in the incore bitmap
779 */
618ba571
BF
780 if (!is_inode_sparse(ino_rec, irec_offset))
781 process_inode_agbno_state(mp, agno, agbno);
2bd0ea18 782
2556c98b 783 for (;;) {
2bd0ea18 784 agino = irec_offset + ino_rec->ino_startnum;
5d1b7f0f 785 ino = XFS_AGINO_TO_INO(mp, agno, agino);
2bd0ea18 786
3e62b2c0
BF
787 if (is_inode_sparse(ino_rec, irec_offset))
788 goto process_next;
789
790 /* make inode pointer */
791 dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
792
793
2bd0ea18
NS
794 is_used = 3;
795 ino_dirty = 0;
796 parent = 0;
797
798 status = process_dinode(mp, dino, agno, agino,
799 is_inode_free(ino_rec, irec_offset),
0459b626 800 &ino_dirty, &is_used,ino_discovery, check_dups,
2bd0ea18
NS
801 extra_attr_check, &isa_dir, &parent);
802
803 ASSERT(is_used != 3);
e0607266 804 if (ino_dirty) {
2bd0ea18 805 dirty = 1;
e0607266
DC
806 libxfs_dinode_calc_crc(mp, dino);
807 }
808
2bd0ea18
NS
809 /*
810 * XXX - if we want to try and keep
811 * track of whether we need to bang on
812 * the inode maps (instead of just
813 * blindly reconstructing them like
814 * we do now, this is where to start.
815 */
816 if (is_used) {
817 if (is_inode_free(ino_rec, irec_offset)) {
6c39a3cb 818 if (verbose || no_modify) {
5d1b7f0f
CH
819 do_warn(
820 _("imap claims in-use inode %" PRIu64 " is free, "),
821 ino);
2bd0ea18
NS
822 }
823
6c39a3cb 824 if (verbose || !no_modify)
507f4e33 825 do_warn(_("correcting imap\n"));
2bd0ea18 826 else
507f4e33 827 do_warn(_("would correct imap\n"));
2bd0ea18
NS
828 }
829 set_inode_used(ino_rec, irec_offset);
0f012a4c 830
aaca101b
DC
831 /*
832 * store the on-disk file type for comparing in
833 * phase 6.
834 */
aaca101b 835 set_inode_ftype(ino_rec, irec_offset,
532d03d5 836 libxfs_mode_to_ftype(be16_to_cpu(dino->di_mode)));
aaca101b 837
0f012a4c
BN
838 /*
839 * store on-disk nlink count for comparing in phase 7
840 */
841 set_inode_disk_nlinks(ino_rec, irec_offset,
56b2de80
DC
842 dino->di_version > 1
843 ? be32_to_cpu(dino->di_nlink)
844 : be16_to_cpu(dino->di_onlink));
0f012a4c 845
2bd0ea18
NS
846 } else {
847 set_inode_free(ino_rec, irec_offset);
848 }
849
850 /*
851 * if we lose the root inode, or it turns into
852 * a non-directory, that allows us to double-check
853 * later whether or not we need to reinitialize it.
854 */
855 if (isa_dir) {
856 set_inode_isadir(ino_rec, irec_offset);
857 /*
858 * we always set the parent but
859 * we may as well wait until
860 * phase 4 (no inode discovery)
861 * because the parent info will
862 * be solid then.
863 */
864 if (!ino_discovery) {
865 ASSERT(parent != 0);
866 set_inode_parent(ino_rec, irec_offset, parent);
867 ASSERT(parent ==
868 get_inode_parent(ino_rec, irec_offset));
869 }
870 } else {
871 clear_inode_isadir(ino_rec, irec_offset);
872 }
873
874 if (status) {
5d1b7f0f 875 if (mp->m_sb.sb_rootino == ino) {
2bd0ea18
NS
876 need_root_inode = 1;
877
878 if (!no_modify) {
5d1b7f0f
CH
879 do_warn(
880 _("cleared root inode %" PRIu64 "\n"),
881 ino);
2bd0ea18 882 } else {
5d1b7f0f
CH
883 do_warn(
884 _("would clear root inode %" PRIu64 "\n"),
885 ino);
2bd0ea18 886 }
5d1b7f0f 887 } else if (mp->m_sb.sb_rbmino == ino) {
2bd0ea18
NS
888 need_rbmino = 1;
889
890 if (!no_modify) {
5d1b7f0f
CH
891 do_warn(
892 _("cleared realtime bitmap inode %" PRIu64 "\n"),
893 ino);
2bd0ea18 894 } else {
5d1b7f0f
CH
895 do_warn(
896 _("would clear realtime bitmap inode %" PRIu64 "\n"),
897 ino);
2bd0ea18 898 }
5d1b7f0f 899 } else if (mp->m_sb.sb_rsumino == ino) {
2bd0ea18
NS
900 need_rsumino = 1;
901
902 if (!no_modify) {
5d1b7f0f
CH
903 do_warn(
904 _("cleared realtime summary inode %" PRIu64 "\n"),
905 ino);
2bd0ea18 906 } else {
5d1b7f0f
CH
907 do_warn(
908 _("would clear realtime summary inode %" PRIu64 "\n"),
909 ino);
2bd0ea18
NS
910 }
911 } else if (!no_modify) {
5d1b7f0f
CH
912 do_warn(_("cleared inode %" PRIu64 "\n"),
913 ino);
2bd0ea18 914 } else {
5d1b7f0f
CH
915 do_warn(_("would have cleared inode %" PRIu64 "\n"),
916 ino);
2bd0ea18 917 }
7e174ec7 918 clear_inode_was_rl(ino_rec, irec_offset);
2bd0ea18
NS
919 }
920
3e62b2c0 921process_next:
2bd0ea18
NS
922 irec_offset++;
923 ibuf_offset++;
924 icnt++;
2556c98b 925 cluster_offset++;
2bd0ea18 926
ff105f75 927 if (icnt == mp->m_ialloc_inos &&
2bd0ea18
NS
928 irec_offset == XFS_INODES_PER_CHUNK) {
929 /*
930 * done! - finished up irec and block simultaneously
931 */
2556c98b 932 for (bp_index = 0; bp_index < cluster_count; bp_index++) {
3e62b2c0
BF
933 if (!bplist[bp_index])
934 continue;
935
4c0a98ae
BN
936 pftrace("put/writebuf %p (%llu) in AG %d",
937 bplist[bp_index], (long long)
938 XFS_BUF_ADDR(bplist[bp_index]), agno);
939
2556c98b
BN
940 if (dirty && !no_modify)
941 libxfs_writebuf(bplist[bp_index], 0);
942 else
943 libxfs_putbuf(bplist[bp_index]);
944 }
945 free(bplist);
2bd0ea18
NS
946 break;
947 } else if (ibuf_offset == mp->m_sb.sb_inopblock) {
948 /*
949 * mark block as an inode block in the incore bitmap
950 * and reset inode buffer offset counter
951 */
952 ibuf_offset = 0;
953 agbno++;
954
618ba571
BF
955 if (!is_inode_sparse(ino_rec, irec_offset))
956 process_inode_agbno_state(mp, agno, agbno);
2bd0ea18
NS
957 } else if (irec_offset == XFS_INODES_PER_CHUNK) {
958 /*
959 * get new irec (multiple chunks per block fs)
960 */
961 ino_rec = next_ino_rec(ino_rec);
962 ASSERT(ino_rec->ino_startnum == agino + 1);
963 irec_offset = 0;
964 }
2556c98b
BN
965 if (cluster_offset == inodes_per_cluster) {
966 bp_index++;
967 cluster_offset = 0;
968 }
2bd0ea18
NS
969 }
970 return(0);
971}
972
973/*
974 * check all inodes mentioned in the ag's incore inode maps.
975 * the map may be incomplete. If so, we'll catch the missing
976 * inodes (hopefully) when we traverse the directory tree.
977 * check_dirs is set to 1 if directory inodes should be
978 * processed for internal consistency, parent setting and
979 * discovery of unknown inodes. this only happens
980 * in phase 3. check_dups is set to 1 if we're looking for
981 * inodes that reference duplicate blocks so we can trash
982 * the inode right then and there. this is set only in
983 * phase 4 after we've run through and set the bitmap once.
984 */
985void
2556c98b
BN
986process_aginodes(
987 xfs_mount_t *mp,
988 prefetch_args_t *pf_args,
989 xfs_agnumber_t agno,
990 int ino_discovery,
991 int check_dups,
992 int extra_attr_check)
2bd0ea18 993{
2556c98b
BN
994 int num_inos, bogus;
995 ino_tree_node_t *ino_rec, *first_ino_rec, *prev_ino_rec;
996#ifdef XR_PF_TRACE
997 int count;
998#endif
2bd0ea18 999 first_ino_rec = ino_rec = findfirst_inode_rec(agno);
2556c98b 1000
2bd0ea18
NS
1001 while (ino_rec != NULL) {
1002 /*
1003 * paranoia - step through inode records until we step
1004 * through a full allocation of inodes. this could
1005 * be an issue in big-block filesystems where a block
1006 * can hold more than one inode chunk. make sure to
1007 * grab the record corresponding to the beginning of
1008 * the next block before we call the processing routines.
1009 */
1010 num_inos = XFS_INODES_PER_CHUNK;
ff105f75 1011 while (num_inos < mp->m_ialloc_inos && ino_rec != NULL) {
2bd0ea18
NS
1012 /*
1013 * inodes chunks will always be aligned and sized
1014 * correctly
1015 */
1016 if ((ino_rec = next_ino_rec(ino_rec)) != NULL)
1017 num_inos += XFS_INODES_PER_CHUNK;
1018 }
1019
ff105f75 1020 ASSERT(num_inos == mp->m_ialloc_inos);
2bd0ea18 1021
2556c98b
BN
1022 if (pf_args) {
1023 sem_post(&pf_args->ra_count);
1024#ifdef XR_PF_TRACE
1025 sem_getvalue(&pf_args->ra_count, &count);
1026 pftrace("processing inode chunk %p in AG %d (sem count = %d)",
1027 first_ino_rec, agno, count);
1028#endif
1029 }
cb5b3ef4 1030
2bd0ea18 1031 if (process_inode_chunk(mp, agno, num_inos, first_ino_rec,
2556c98b
BN
1032 ino_discovery, check_dups, extra_attr_check,
1033 &bogus)) {
2bd0ea18
NS
1034 /* XXX - i/o error, we've got a problem */
1035 abort();
1036 }
1037
1038 if (!bogus)
1039 first_ino_rec = ino_rec = next_ino_rec(ino_rec);
1040 else {
1041 /*
1042 * inodes pointed to by this record are
1043 * completely bogus, blow the records for
1044 * this chunk out.
1045 * the inode block(s) will get reclaimed
1046 * in phase 4 when the block map is
1047 * reconstructed after inodes claiming
1048 * duplicate blocks are deleted.
1049 */
1050 num_inos = 0;
1051 ino_rec = first_ino_rec;
ff105f75 1052 while (num_inos < mp->m_ialloc_inos &&
2bd0ea18
NS
1053 ino_rec != NULL) {
1054 prev_ino_rec = ino_rec;
1055
1056 if ((ino_rec = next_ino_rec(ino_rec)) != NULL)
1057 num_inos += XFS_INODES_PER_CHUNK;
1058
1ae311d5 1059 get_inode_rec(mp, agno, prev_ino_rec);
2bd0ea18
NS
1060 free_inode_rec(agno, prev_ino_rec);
1061 }
1062
1063 first_ino_rec = ino_rec;
1064 }
06fbdda9 1065 PROG_RPT_INC(prog_rpt_done[agno], num_inos);
2bd0ea18
NS
1066 }
1067}
1068
1069/*
1070 * verify the uncertain inode list for an ag.
1071 * Good inodes get moved into the good inode tree.
1072 * returns 0 if there are no uncertain inode records to
1073 * be processed, 1 otherwise. This routine destroys the
1074 * the entire uncertain inode tree for the ag as a side-effect.
1075 */
1076void
1077check_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno)
1078{
1079 ino_tree_node_t *irec;
1080 ino_tree_node_t *nrec;
1081 xfs_agino_t start;
1082 xfs_agino_t i;
1083 xfs_agino_t agino;
1084 int got_some;
1085
1086 nrec = NULL;
1087 got_some = 0;
1088
1089 clear_uncertain_ino_cache(agno);
1090
1091 if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL)
1092 return;
1093
1094 /*
1095 * the trick here is to find a contiguous range
1096 * of inodes, make sure that it doesn't overlap
1097 * with a known to exist chunk, and then make
1098 * sure it is a number of entire chunks.
1099 * we check on-disk once we have an idea of what's
1100 * going on just to double-check.
1101 *
1102 * process the uncertain inode record list and look
1103 * on disk to see if the referenced inodes are good
1104 */
1105
507f4e33 1106 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1107
1108 do {
1109 /*
1110 * check every confirmed (which in this case means
1111 * inode that we really suspect to be an inode) inode
1112 */
1113 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
1114 if (!is_inode_confirmed(irec, i))
1115 continue;
1116
1117 agino = i + irec->ino_startnum;
1118
1119 if (verify_aginum(mp, agno, agino))
1120 continue;
1121
1122 if (nrec != NULL && nrec->ino_startnum <= agino &&
1123 agino < nrec->ino_startnum +
1124 XFS_INODES_PER_CHUNK)
1125 continue;
1126
1ae311d5 1127 if ((nrec = find_inode_rec(mp, agno, agino)) == NULL)
2bd0ea18
NS
1128 if (!verify_aginum(mp, agno, agino))
1129 if (verify_aginode_chunk(mp, agno,
1130 agino, &start))
1131 got_some = 1;
1132 }
1133
1ae311d5 1134 get_uncertain_inode_rec(mp, agno, irec);
2bd0ea18
NS
1135 free_inode_rec(agno, irec);
1136
1137 irec = findfirst_uncertain_inode_rec(agno);
1138 } while (irec != NULL);
1139
1140 if (got_some)
507f4e33 1141 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1142
1143 return;
1144}
1145
1146/*
1147 * verify and process the uncertain inodes for an ag.
1148 * this is different from check_ in that we can't just
1149 * move the good inodes into the good inode tree and let
1150 * process_aginodes() deal with them because this gets called
1151 * after process_aginodes() has been run on the ag inode tree.
1152 * So we have to process the inodes as well as verify since
1153 * we don't want to rerun process_aginodes() on a tree that has
1154 * mostly been processed.
1155 *
1156 * Note that if this routine does process some inodes, it can
1157 * add uncertain inodes to any ag which would require that
1158 * the routine be called again to process those newly-added
1159 * uncertain inodes.
1160 *
1161 * returns 0 if no inodes were processed and 1 if inodes
1162 * were processed (and it is possible that new uncertain
1163 * inodes were discovered).
1164 *
1165 * as a side-effect, this routine tears down the uncertain
1166 * inode tree for the ag.
1167 */
1168int
1169process_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno)
1170{
1171 ino_tree_node_t *irec;
1172 ino_tree_node_t *nrec;
1173 xfs_agino_t agino;
1174 int i;
1175 int bogus;
1176 int cnt;
1177 int got_some;
1178
1179#ifdef XR_INODE_TRACE
1180 fprintf(stderr, "in process_uncertain_aginodes, agno = %d\n", agno);
1181#endif
1182
1183 got_some = 0;
1184
1185 clear_uncertain_ino_cache(agno);
1186
1187 if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL)
1188 return(0);
1189
1190 nrec = NULL;
1191
1192 do {
1193 /*
1194 * check every confirmed inode
1195 */
1196 for (cnt = i = 0; i < XFS_INODES_PER_CHUNK; i++) {
1197 if (!is_inode_confirmed(irec, i))
1198 continue;
1199 cnt++;
1200 agino = i + irec->ino_startnum;
1201#ifdef XR_INODE_TRACE
1202 fprintf(stderr, "ag inode = %d (0x%x)\n", agino, agino);
1203#endif
1204 /*
1205 * skip over inodes already processed (in the
1206 * good tree), bad inode numbers, and inode numbers
1207 * pointing to bogus inodes
1208 */
1209 if (verify_aginum(mp, agno, agino))
1210 continue;
1211
1212 if (nrec != NULL && nrec->ino_startnum <= agino &&
1213 agino < nrec->ino_startnum +
1214 XFS_INODES_PER_CHUNK)
1215 continue;
1216
1ae311d5 1217 if ((nrec = find_inode_rec(mp, agno, agino)) != NULL)
2bd0ea18
NS
1218 continue;
1219
1220 /*
1221 * verify the chunk. if good, it will be
1222 * added to the good inode tree.
1223 */
1224 if ((nrec = verify_aginode_chunk_irec(mp,
1225 agno, agino)) == NULL)
1226 continue;
1227
1228 got_some = 1;
1229
1230 /*
1231 * process the inode record we just added
1232 * to the good inode tree. The inode
1233 * processing may add more records to the
1234 * uncertain inode lists.
1235 */
ff105f75 1236 if (process_inode_chunk(mp, agno, mp->m_ialloc_inos,
2bd0ea18
NS
1237 nrec, 1, 0, 0, &bogus)) {
1238 /* XXX - i/o error, we've got a problem */
1239 abort();
1240 }
1241 }
1242
1243 ASSERT(cnt != 0);
1244 /*
1245 * now return the uncertain inode record to the free pool
1246 * and pull another one off the list for processing
1247 */
1ae311d5 1248 get_uncertain_inode_rec(mp, agno, irec);
2bd0ea18
NS
1249 free_inode_rec(agno, irec);
1250
1251 irec = findfirst_uncertain_inode_rec(agno);
1252 } while (irec != NULL);
1253
1254 if (got_some)
507f4e33 1255 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1256
1257 return(1);
1258}