]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - repair/dino_chunks.c
xfsprogs: Release v4.17.0
[thirdparty/xfsprogs-dev.git] / repair / dino_chunks.c
CommitLineData
2bd0ea18 1/*
da23017d
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
dfc130f3 4 *
da23017d
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
2bd0ea18 7 * published by the Free Software Foundation.
dfc130f3 8 *
da23017d
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
dfc130f3 13 *
da23017d
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2bd0ea18
NS
17 */
18
6b803e5a 19#include "libxfs.h"
2bd0ea18
NS
20#include "avl.h"
21#include "globals.h"
22#include "agheader.h"
23#include "incore.h"
24#include "protos.h"
25#include "err_protos.h"
2bd0ea18
NS
26#include "dinode.h"
27#include "versions.h"
2556c98b 28#include "prefetch.h"
06fbdda9 29#include "progress.h"
2bd0ea18
NS
30
31/*
32 * validates inode block or chunk, returns # of good inodes
33 * the dinodes are verified using verify_uncertain_dinode() which
34 * means only the basic inode info is checked, no fork checks.
35 */
8b8a6b02 36static int
2bd0ea18
NS
37check_aginode_block(xfs_mount_t *mp,
38 xfs_agnumber_t agno,
39 xfs_agblock_t agbno)
40{
41
42 xfs_dinode_t *dino_p;
dfc130f3
RC
43 int i;
44 int cnt = 0;
2bd0ea18
NS
45 xfs_buf_t *bp;
46
47 /*
48 * it's ok to read these possible inode blocks in one at
49 * a time because they don't belong to known inodes (if
50 * they did, we'd know about them courtesy of the incore inode
51 * tree and we wouldn't be here and we stale the buffers out
52 * so no one else will overlap them.
53 */
54 bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, agbno),
75c8b434 55 XFS_FSB_TO_BB(mp, 1), 0, NULL);
2bd0ea18 56 if (!bp) {
5d1b7f0f
CH
57 do_warn(_("cannot read agbno (%u/%u), disk block %" PRId64 "\n"),
58 agno, agbno, XFS_AGB_TO_DADDR(mp, agno, agbno));
2bd0ea18
NS
59 return(0);
60 }
61
62 for (i = 0; i < mp->m_sb.sb_inopblock; i++) {
56b2de80 63 dino_p = xfs_make_iptr(mp, bp, i);
2bd0ea18
NS
64 if (!verify_uncertain_dinode(mp, dino_p, agno,
65 XFS_OFFBNO_TO_AGINO(mp, agbno, i)))
66 cnt++;
67 }
75c8b434
DC
68 if (cnt)
69 bp->b_ops = &xfs_inode_buf_ops;
2bd0ea18
NS
70
71 libxfs_putbuf(bp);
72 return(cnt);
73}
74
2bd0ea18
NS
75/*
76 * tries to establish if the inode really exists in a valid
77 * inode chunk. returns number of new inodes if things are good
78 * and 0 if bad. start is the start of the discovered inode chunk.
79 * routine assumes that ino is a legal inode number
80 * (verified by verify_inum()). If the inode chunk turns out
81 * to be good, this routine will put the inode chunk into
82 * the good inode chunk tree if required.
83 *
84 * the verify_(ag)inode* family of routines are utility
85 * routines called by check_uncertain_aginodes() and
86 * process_uncertain_aginodes().
87 */
8b8a6b02 88static int
2bd0ea18
NS
89verify_inode_chunk(xfs_mount_t *mp,
90 xfs_ino_t ino,
91 xfs_ino_t *start_ino)
92{
93 xfs_agnumber_t agno;
94 xfs_agino_t agino;
95 xfs_agino_t start_agino;
96 xfs_agblock_t agbno;
97 xfs_agblock_t start_agbno = 0;
98 xfs_agblock_t end_agbno;
99 xfs_agblock_t max_agbno;
100 xfs_agblock_t cur_agbno;
101 xfs_agblock_t chunk_start_agbno;
102 xfs_agblock_t chunk_stop_agbno;
103 ino_tree_node_t *irec_before_p = NULL;
104 ino_tree_node_t *irec_after_p = NULL;
105 ino_tree_node_t *irec_p;
106 ino_tree_node_t *irec_next_p;
107 int irec_cnt;
108 int ino_cnt = 0;
109 int num_blks;
110 int i;
111 int j;
112 int state;
8961bfde 113 xfs_extlen_t blen;
2bd0ea18 114
dfc130f3
RC
115 agno = XFS_INO_TO_AGNO(mp, ino);
116 agino = XFS_INO_TO_AGINO(mp, ino);
2bd0ea18
NS
117 agbno = XFS_INO_TO_AGBNO(mp, ino);
118 *start_ino = NULLFSINO;
119
ff105f75 120 ASSERT(mp->m_ialloc_blks > 0);
2bd0ea18
NS
121
122 if (agno == mp->m_sb.sb_agcount - 1)
123 max_agbno = mp->m_sb.sb_dblocks -
5a35bf2c 124 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno;
2bd0ea18
NS
125 else
126 max_agbno = mp->m_sb.sb_agblocks;
127
128 /*
129 * is the inode beyond the end of the AG?
130 */
131 if (agbno >= max_agbno)
132 return(0);
133
134 /*
135 * check for the easy case, inodes per block >= XFS_INODES_PER_CHUNK
136 * (multiple chunks per block)
137 */
ff105f75 138 if (mp->m_ialloc_blks == 1) {
2bd0ea18 139 if (agbno > max_agbno)
0553a94f
CH
140 return 0;
141 if (check_aginode_block(mp, agno, agino) == 0)
142 return 0;
2bd0ea18 143
586f8abf 144 pthread_mutex_lock(&ag_locks[agno].lock);
2556c98b 145
95650c4d
BN
146 state = get_bmap(agno, agbno);
147 switch (state) {
2bd0ea18 148 case XR_E_INO:
507f4e33
NS
149 do_warn(
150 _("uncertain inode block %d/%d already known\n"),
2bd0ea18
NS
151 agno, agbno);
152 break;
153 case XR_E_UNKNOWN:
154 case XR_E_FREE1:
155 case XR_E_FREE:
95650c4d 156 set_bmap(agno, agbno, XR_E_INO);
2bd0ea18
NS
157 break;
158 case XR_E_MULT:
159 case XR_E_INUSE:
160 case XR_E_INUSE_FS:
161 case XR_E_FS_MAP:
162 /*
163 * if block is already claimed, forget it.
164 */
165 do_warn(
507f4e33 166 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18 167 agno, agbno, state);
95650c4d 168 set_bmap(agno, agbno, XR_E_MULT);
586f8abf 169 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
170 return(0);
171 default:
507f4e33
NS
172 do_warn(
173 _("inode block %d/%d bad state, (state %d)\n"),
2bd0ea18 174 agno, agbno, state);
95650c4d 175 set_bmap(agno, agbno, XR_E_INO);
2bd0ea18
NS
176 break;
177 }
178
586f8abf 179 pthread_mutex_unlock(&ag_locks[agno].lock);
2556c98b 180
2bd0ea18
NS
181 start_agino = XFS_OFFBNO_TO_AGINO(mp, agbno, 0);
182 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
183
184 /*
185 * put new inode record(s) into inode tree
186 */
187 for (j = 0; j < chunks_pblock; j++) {
1ae311d5 188 if ((irec_p = find_inode_rec(mp, agno, start_agino))
2bd0ea18 189 == NULL) {
1ae311d5 190 irec_p = set_inode_free_alloc(mp, agno,
2bd0ea18
NS
191 start_agino);
192 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
193 set_inode_free(irec_p, i);
194 }
195 if (start_agino <= agino && agino <
196 start_agino + XFS_INODES_PER_CHUNK)
197 set_inode_used(irec_p, agino - start_agino);
198
199 start_agino += XFS_INODES_PER_CHUNK;
200 ino_cnt += XFS_INODES_PER_CHUNK;
201 }
202
203 return(ino_cnt);
204 } else if (fs_aligned_inodes) {
205 /*
206 * next easy case -- aligned inode filesystem.
207 * just check out the chunk
208 */
209 start_agbno = rounddown(XFS_INO_TO_AGBNO(mp, ino),
210 fs_ino_alignment);
ff105f75 211 end_agbno = start_agbno + mp->m_ialloc_blks;
2bd0ea18
NS
212
213 /*
214 * if this fs has aligned inodes but the end of the
215 * chunk is beyond the end of the ag, this is a bad
216 * chunk
217 */
218 if (end_agbno > max_agbno)
219 return(0);
220
221 /*
222 * check out all blocks in chunk
223 */
224 ino_cnt = 0;
225 for (cur_agbno = start_agbno; cur_agbno < end_agbno;
226 cur_agbno++) {
227 ino_cnt += check_aginode_block(mp, agno, cur_agbno);
228 }
229
230 /*
231 * if we lose either 2 blocks worth of inodes or >25% of
232 * the chunk, just forget it.
233 */
234 if (ino_cnt < XFS_INODES_PER_CHUNK - 2 * mp->m_sb.sb_inopblock
235 || ino_cnt < XFS_INODES_PER_CHUNK - 16)
236 return(0);
237
238 /*
2d9475a4 239 * ok, put the record into the tree, if no conflict.
2bd0ea18 240 */
2d9475a4
NS
241 if (find_uncertain_inode_rec(agno,
242 XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0)))
243 return(0);
244
2bd0ea18
NS
245 start_agino = XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0);
246 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
247
1ae311d5 248 irec_p = set_inode_free_alloc(mp, agno,
2bd0ea18
NS
249 XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0));
250
251 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
252 set_inode_free(irec_p, i);
253
254 ASSERT(start_agino <= agino &&
255 start_agino + XFS_INODES_PER_CHUNK > agino);
256
257 set_inode_used(irec_p, agino - start_agino);
258
259 return(XFS_INODES_PER_CHUNK);
260 }
261
262 /*
263 * hard case -- pre-6.3 filesystem.
264 * set default start/end agbnos and ensure agbnos are legal.
265 * we're setting a range [start_agbno, end_agbno) such that
266 * a discovered inode chunk completely within that range
267 * would include the inode passed into us.
268 */
ff105f75
DC
269 if (mp->m_ialloc_blks > 1) {
270 if (agino > mp->m_ialloc_inos)
271 start_agbno = agbno - mp->m_ialloc_blks + 1;
2bd0ea18
NS
272 else
273 start_agbno = 1;
274 }
275
ff105f75 276 end_agbno = agbno + mp->m_ialloc_blks;
2bd0ea18
NS
277
278 if (end_agbno > max_agbno)
279 end_agbno = max_agbno;
280
281 /*
282 * search tree for known inodes within +/- 1 inode chunk range
283 */
284 irec_before_p = irec_after_p = NULL;
285
1ae311d5 286 find_inode_rec_range(mp, agno, XFS_OFFBNO_TO_AGINO(mp, start_agbno, 0),
2bd0ea18
NS
287 XFS_OFFBNO_TO_AGINO(mp, end_agbno, mp->m_sb.sb_inopblock - 1),
288 &irec_before_p, &irec_after_p);
289
290 /*
291 * if we have known inode chunks in our search range, establish
292 * their start and end-points to tighten our search range. range
293 * is [start, end) -- e.g. max/end agbno is one beyond the
294 * last block to be examined. the avl routines work this way.
295 */
296 if (irec_before_p) {
297 /*
298 * only one inode record in the range, move one boundary in
299 */
300 if (irec_before_p == irec_after_p) {
301 if (irec_before_p->ino_startnum < agino)
302 start_agbno = XFS_AGINO_TO_AGBNO(mp,
303 irec_before_p->ino_startnum +
304 XFS_INODES_PER_CHUNK);
305 else
306 end_agbno = XFS_AGINO_TO_AGBNO(mp,
307 irec_before_p->ino_startnum);
308 }
309
310 /*
311 * find the start of the gap in the search range (which
312 * should contain our unknown inode). if the only irec
313 * within +/- 1 chunks starts after the inode we're
314 * looking for, skip this stuff since the end_agbno
315 * of the range has already been trimmed in to not
316 * include that irec.
317 */
318 if (irec_before_p->ino_startnum < agino) {
319 irec_p = irec_before_p;
320 irec_next_p = next_ino_rec(irec_p);
321
322 while(irec_next_p != NULL &&
323 irec_p->ino_startnum + XFS_INODES_PER_CHUNK ==
324 irec_next_p->ino_startnum) {
325 irec_p = irec_next_p;
326 irec_next_p = next_ino_rec(irec_next_p);
327 }
328
329 start_agbno = XFS_AGINO_TO_AGBNO(mp,
330 irec_p->ino_startnum) +
ff105f75 331 mp->m_ialloc_blks;
2bd0ea18
NS
332
333 /*
334 * we know that the inode we're trying to verify isn't
335 * in an inode chunk so the next ino_rec marks the end
336 * of the gap -- is it within the search range?
337 */
338 if (irec_next_p != NULL &&
ff105f75 339 agino + mp->m_ialloc_inos >=
2bd0ea18
NS
340 irec_next_p->ino_startnum)
341 end_agbno = XFS_AGINO_TO_AGBNO(mp,
342 irec_next_p->ino_startnum);
343 }
344
345 ASSERT(start_agbno < end_agbno);
346 }
347
348 /*
349 * if the gap is too small to contain a chunk, we lose.
350 * this means that inode chunks known to be good surround
351 * the inode in question and that the space between them
352 * is too small for a legal inode chunk
353 */
ff105f75 354 if (end_agbno - start_agbno < mp->m_ialloc_blks)
2bd0ea18
NS
355 return(0);
356
357 /*
358 * now grunge around the disk, start at the inode block and
359 * go in each direction until you hit a non-inode block or
360 * run into a range boundary. A non-inode block is block
361 * with *no* good inodes in it. Unfortunately, we can't
362 * co-opt bad blocks into inode chunks (which might take
363 * care of disk blocks that turn into zeroes) because the
364 * filesystem could very well allocate two inode chunks
365 * with a one block file in between and we'd zap the file.
366 * We're better off just losing the rest of the
367 * inode chunk instead.
368 */
369 for (cur_agbno = agbno; cur_agbno >= start_agbno; cur_agbno--) {
370 /*
371 * if the block has no inodes, it's a bad block so
372 * break out now without decrementing cur_agbno so
373 * chunk start blockno will be set to the last good block
374 */
375 if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno)))
376 break;
377 ino_cnt += irec_cnt;
378 }
379
380 chunk_start_agbno = cur_agbno + 1;
381
382 for (cur_agbno = agbno + 1; cur_agbno < end_agbno; cur_agbno++) {
383 /*
384 * if the block has no inodes, it's a bad block so
385 * break out now without incrementing cur_agbno so
386 * chunk start blockno will be set to the block
387 * immediately after the last good block.
388 */
389 if (!(irec_cnt = check_aginode_block(mp, agno, cur_agbno)))
390 break;
391 ino_cnt += irec_cnt;
392 }
393
394 chunk_stop_agbno = cur_agbno;
395
396 num_blks = chunk_stop_agbno - chunk_start_agbno;
397
ff105f75
DC
398 if (num_blks < mp->m_ialloc_blks || ino_cnt == 0)
399 return 0;
2bd0ea18
NS
400
401 /*
402 * XXX - later - if the entire range is selected and they're all
403 * good inodes, keep searching in either direction.
404 * until you the range of inodes end, then split into chunks
405 * for now, just take one chunk's worth starting at the lowest
406 * possible point and hopefully we'll pick the rest up later.
407 *
408 * XXX - if we were going to fix up an inode chunk for
409 * any good inodes in the chunk, this is where we would
410 * do it. For now, keep it simple and lose the rest of
411 * the chunk
412 */
413
ff105f75
DC
414 if (num_blks % mp->m_ialloc_blks != 0) {
415 num_blks = rounddown(num_blks, mp->m_ialloc_blks);
2bd0ea18
NS
416 chunk_stop_agbno = chunk_start_agbno + num_blks;
417 }
418
419 /*
420 * ok, we've got a candidate inode chunk. now we have to
421 * verify that we aren't trying to use blocks that are already
422 * in use. If so, mark them as multiply claimed since odds
423 * are very low that we found this chunk by stumbling across
424 * user data -- we're probably here as a result of a directory
425 * entry or an iunlinked pointer
426 */
586f8abf 427 pthread_mutex_lock(&ag_locks[agno].lock);
8961bfde
BN
428 for (cur_agbno = chunk_start_agbno;
429 cur_agbno < chunk_stop_agbno;
430 cur_agbno += blen) {
431 state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
95650c4d 432 switch (state) {
2bd0ea18
NS
433 case XR_E_MULT:
434 case XR_E_INUSE:
435 case XR_E_INUSE_FS:
436 case XR_E_FS_MAP:
437 do_warn(
5d1b7f0f 438 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18 439 agno, cur_agbno, state);
8961bfde 440 set_bmap_ext(agno, cur_agbno, blen, XR_E_MULT);
586f8abf 441 pthread_mutex_unlock(&ag_locks[agno].lock);
8961bfde 442 return 0;
2bd0ea18
NS
443 case XR_E_INO:
444 do_error(
5d1b7f0f 445 _("uncertain inode block overlap, agbno = %d, ino = %" PRIu64 "\n"),
2bd0ea18
NS
446 agbno, ino);
447 break;
448 default:
449 break;
450 }
2bd0ea18 451 }
586f8abf 452 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
453
454 /*
455 * ok, chunk is good. put the record into the tree if required,
456 * and fill in the bitmap. All inodes will be marked as "free"
457 * except for the one that led us to discover the chunk. That's
458 * ok because we'll override the free setting later if the
459 * contents of the inode indicate it's in use.
460 */
461 start_agino = XFS_OFFBNO_TO_AGINO(mp, chunk_start_agbno, 0);
462 *start_ino = XFS_AGINO_TO_INO(mp, agno, start_agino);
463
1ae311d5 464 ASSERT(find_inode_rec(mp, agno, start_agino) == NULL);
2bd0ea18 465
1ae311d5 466 irec_p = set_inode_free_alloc(mp, agno, start_agino);
2bd0ea18
NS
467 for (i = 1; i < XFS_INODES_PER_CHUNK; i++)
468 set_inode_free(irec_p, i);
469
470 ASSERT(start_agino <= agino &&
471 start_agino + XFS_INODES_PER_CHUNK > agino);
472
473 set_inode_used(irec_p, agino - start_agino);
474
586f8abf 475 pthread_mutex_lock(&ag_locks[agno].lock);
2556c98b 476
2bd0ea18 477 for (cur_agbno = chunk_start_agbno;
8961bfde
BN
478 cur_agbno < chunk_stop_agbno;
479 cur_agbno += blen) {
480 state = get_bmap_ext(agno, cur_agbno, chunk_stop_agbno, &blen);
95650c4d 481 switch (state) {
2bd0ea18 482 case XR_E_INO:
507f4e33 483 do_error(
5d1b7f0f 484 _("uncertain inode block %" PRIu64 " already known\n"),
2bd0ea18
NS
485 XFS_AGB_TO_FSB(mp, agno, cur_agbno));
486 break;
487 case XR_E_UNKNOWN:
488 case XR_E_FREE1:
489 case XR_E_FREE:
8961bfde 490 set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
2bd0ea18
NS
491 break;
492 case XR_E_MULT:
493 case XR_E_INUSE:
494 case XR_E_INUSE_FS:
495 case XR_E_FS_MAP:
496 do_error(
507f4e33 497 _("inode block %d/%d multiply claimed, (state %d)\n"),
2bd0ea18
NS
498 agno, cur_agbno, state);
499 break;
500 default:
507f4e33
NS
501 do_warn(
502 _("inode block %d/%d bad state, (state %d)\n"),
2bd0ea18 503 agno, cur_agbno, state);
8961bfde 504 set_bmap_ext(agno, cur_agbno, blen, XR_E_INO);
2bd0ea18
NS
505 break;
506 }
507 }
586f8abf 508 pthread_mutex_unlock(&ag_locks[agno].lock);
2bd0ea18
NS
509
510 return(ino_cnt);
511}
512
513/*
514 * same as above only for ag inode chunks
515 */
8b8a6b02 516static int
2bd0ea18
NS
517verify_aginode_chunk(xfs_mount_t *mp,
518 xfs_agnumber_t agno,
519 xfs_agino_t agino,
520 xfs_agino_t *agino_start)
521{
522 xfs_ino_t ino;
523 int res;
524
525 res = verify_inode_chunk(mp, XFS_AGINO_TO_INO(mp, agno, agino), &ino);
526
527 if (res)
528 *agino_start = XFS_INO_TO_AGINO(mp, ino);
529 else
530 *agino_start = NULLAGINO;
531
532 return(res);
533}
534
535/*
536 * this does the same as the two above only it returns a pointer
537 * to the inode record in the good inode tree
538 */
8b8a6b02 539static ino_tree_node_t *
2bd0ea18
NS
540verify_aginode_chunk_irec(xfs_mount_t *mp,
541 xfs_agnumber_t agno,
542 xfs_agino_t agino)
543{
544 xfs_agino_t start_agino;
545 ino_tree_node_t *irec = NULL;
546
547 if (verify_aginode_chunk(mp, agno, agino, &start_agino))
1ae311d5 548 irec = find_inode_rec(mp, agno, start_agino);
2bd0ea18
NS
549
550 return(irec);
551}
552
618ba571
BF
553/*
554 * Set the state of an inode block during inode chunk processing. The block is
555 * expected to be in the free or inode state. If free, it transitions to the
556 * inode state. Warn if the block is in neither expected state as this indicates
557 * multiply claimed blocks.
558 */
559static void
560process_inode_agbno_state(
561 struct xfs_mount *mp,
562 xfs_agnumber_t agno,
563 xfs_agblock_t agbno)
564{
565 int state;
2bd0ea18 566
618ba571
BF
567 pthread_mutex_lock(&ag_locks[agno].lock);
568 state = get_bmap(agno, agbno);
569 switch (state) {
570 case XR_E_INO: /* already marked */
571 break;
572 case XR_E_UNKNOWN:
573 case XR_E_FREE:
574 case XR_E_FREE1:
575 set_bmap(agno, agbno, XR_E_INO);
576 break;
577 case XR_E_BAD_STATE:
578 do_error(_("bad state in block map %d\n"), state);
579 break;
580 default:
581 set_bmap(agno, agbno, XR_E_MULT);
582 do_warn(
583 _("inode block %" PRIu64 " multiply claimed, state was %d\n"),
584 XFS_AGB_TO_FSB(mp, agno, agbno), state);
585 break;
586 }
587 pthread_mutex_unlock(&ag_locks[agno].lock);
588}
2bd0ea18
NS
589
590/*
591 * processes an inode allocation chunk/block, returns 1 on I/O errors,
592 * 0 otherwise
593 *
594 * *bogus is set to 1 if the entire set of inodes is bad.
595 */
2556c98b
BN
596static int
597process_inode_chunk(
598 xfs_mount_t *mp,
599 xfs_agnumber_t agno,
600 int num_inos,
601 ino_tree_node_t *first_irec,
602 int ino_discovery,
603 int check_dups,
604 int extra_attr_check,
605 int *bogus)
2bd0ea18
NS
606{
607 xfs_ino_t parent;
608 ino_tree_node_t *ino_rec;
2556c98b 609 xfs_buf_t **bplist;
2bd0ea18
NS
610 xfs_dinode_t *dino;
611 int icnt;
612 int status;
613 int is_used;
2bd0ea18
NS
614 int ino_dirty;
615 int irec_offset;
616 int ibuf_offset;
617 xfs_agino_t agino;
618 xfs_agblock_t agbno;
5d1b7f0f 619 xfs_ino_t ino;
2bd0ea18 620 int dirty = 0;
2bd0ea18 621 int isa_dir = 0;
2556c98b
BN
622 int blks_per_cluster;
623 int cluster_count;
624 int bp_index;
625 int cluster_offset;
2bd0ea18
NS
626
627 ASSERT(first_irec != NULL);
628 ASSERT(XFS_AGINO_TO_OFFSET(mp, first_irec->ino_startnum) == 0);
629
630 *bogus = 0;
ff105f75 631 ASSERT(mp->m_ialloc_blks > 0);
2bd0ea18 632
ff105f75 633 blks_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_blocklog;
2556c98b
BN
634 if (blks_per_cluster == 0)
635 blks_per_cluster = 1;
636 cluster_count = XFS_INODES_PER_CHUNK / inodes_per_cluster;
bc828eed
BN
637 if (cluster_count == 0)
638 cluster_count = 1;
2556c98b 639
2bd0ea18
NS
640 /*
641 * get all blocks required to read in this chunk (may wind up
642 * having to process more chunks in a multi-chunk per block fs)
643 */
644 agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
645
2bd0ea18
NS
646 /*
647 * set up first irec
648 */
649 ino_rec = first_irec;
3e62b2c0 650 irec_offset = 0;
2556c98b
BN
651
652 bplist = malloc(cluster_count * sizeof(xfs_buf_t *));
653 if (bplist == NULL)
5d1b7f0f
CH
654 do_error(_("failed to allocate %zd bytes of memory\n"),
655 cluster_count * sizeof(xfs_buf_t *));
2556c98b
BN
656
657 for (bp_index = 0; bp_index < cluster_count; bp_index++) {
3e62b2c0
BF
658 /*
659 * Skip the cluster buffer if the first inode is sparse. The
660 * remaining inodes in the cluster share the same state as
661 * sparse inodes occur at cluster granularity.
662 */
663 if (is_inode_sparse(ino_rec, irec_offset)) {
664 pftrace("skip sparse inode, startnum 0x%x idx %d",
665 ino_rec->ino_startnum, irec_offset);
666 bplist[bp_index] = NULL;
667 goto next_readbuf;
668 }
669
2556c98b 670 pftrace("about to read off %llu in AG %d",
5d1b7f0f 671 XFS_AGB_TO_DADDR(mp, agno, agbno), agno);
4c0a98ae 672
2556c98b
BN
673 bplist[bp_index] = libxfs_readbuf(mp->m_dev,
674 XFS_AGB_TO_DADDR(mp, agno, agbno),
75c8b434 675 XFS_FSB_TO_BB(mp, blks_per_cluster), 0,
e0607266 676 &xfs_inode_buf_ops);
2556c98b 677 if (!bplist[bp_index]) {
5d1b7f0f 678 do_warn(_("cannot read inode %" PRIu64 ", disk block %" PRId64 ", cnt %d\n"),
2556c98b
BN
679 XFS_AGINO_TO_INO(mp, agno, first_irec->ino_startnum),
680 XFS_AGB_TO_DADDR(mp, agno, agbno),
5d1b7f0f 681 XFS_FSB_TO_BB(mp, blks_per_cluster));
2556c98b
BN
682 while (bp_index > 0) {
683 bp_index--;
684 libxfs_putbuf(bplist[bp_index]);
685 }
686 free(bplist);
687 return(1);
688 }
2556c98b 689
2556c98b
BN
690 pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
691 (long long)XFS_BUF_ADDR(bplist[bp_index]),
692 XFS_BUF_COUNT(bplist[bp_index]), agno);
3e62b2c0
BF
693
694 bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
695
696next_readbuf:
697 irec_offset += mp->m_sb.sb_inopblock * blks_per_cluster;
698 agbno += blks_per_cluster;
2556c98b
BN
699 }
700 agbno = XFS_AGINO_TO_AGBNO(mp, first_irec->ino_startnum);
701
2bd0ea18
NS
702 /*
703 * initialize counters
704 */
705 irec_offset = 0;
706 ibuf_offset = 0;
2556c98b 707 cluster_offset = 0;
2bd0ea18
NS
708 icnt = 0;
709 status = 0;
2556c98b 710 bp_index = 0;
2bd0ea18
NS
711
712 /*
713 * verify inode chunk if necessary
714 */
715 if (ino_discovery) {
2556c98b 716 for (;;) {
2bd0ea18
NS
717 agino = irec_offset + ino_rec->ino_startnum;
718
3e62b2c0
BF
719 /* no buffers for sparse clusters */
720 if (bplist[bp_index]) {
721 /* make inode pointer */
722 dino = xfs_make_iptr(mp, bplist[bp_index],
723 cluster_offset);
724
725 /*
726 * we always think that the root and realtime
727 * inodes are verified even though we may have
728 * to reset them later to keep from losing the
729 * chunk that they're in
730 */
731 if (verify_dinode(mp, dino, agno, agino) == 0 ||
732 (agno == 0 &&
733 (mp->m_sb.sb_rootino == agino ||
734 mp->m_sb.sb_rsumino == agino ||
735 mp->m_sb.sb_rbmino == agino)))
736 status++;
737 }
2bd0ea18
NS
738
739 irec_offset++;
740 icnt++;
2556c98b 741 cluster_offset++;
2bd0ea18 742
ff105f75 743 if (icnt == mp->m_ialloc_inos &&
2bd0ea18
NS
744 irec_offset == XFS_INODES_PER_CHUNK) {
745 /*
746 * done! - finished up irec and block
747 * simultaneously
748 */
2bd0ea18
NS
749 break;
750 } else if (irec_offset == XFS_INODES_PER_CHUNK) {
751 /*
752 * get new irec (multiple chunks per block fs)
753 */
754 ino_rec = next_ino_rec(ino_rec);
755 ASSERT(ino_rec->ino_startnum == agino + 1);
756 irec_offset = 0;
757 }
2556c98b
BN
758 if (cluster_offset == inodes_per_cluster) {
759 bp_index++;
760 cluster_offset = 0;
761 }
2bd0ea18
NS
762 }
763
764 /*
765 * if chunk/block is bad, blow it off. the inode records
766 * will be deleted by the caller if appropriate.
767 */
768 if (!status) {
769 *bogus = 1;
2556c98b 770 for (bp_index = 0; bp_index < cluster_count; bp_index++)
3e62b2c0
BF
771 if (bplist[bp_index])
772 libxfs_putbuf(bplist[bp_index]);
2556c98b 773 free(bplist);
2bd0ea18
NS
774 return(0);
775 }
776
777 /*
778 * reset irec and counters
779 */
780 ino_rec = first_irec;
781
782 irec_offset = 0;
2556c98b
BN
783 cluster_offset = 0;
784 bp_index = 0;
2bd0ea18
NS
785 icnt = 0;
786 status = 0;
2bd0ea18
NS
787 }
788
789 /*
790 * mark block as an inode block in the incore bitmap
791 */
618ba571
BF
792 if (!is_inode_sparse(ino_rec, irec_offset))
793 process_inode_agbno_state(mp, agno, agbno);
2bd0ea18 794
2556c98b 795 for (;;) {
2bd0ea18 796 agino = irec_offset + ino_rec->ino_startnum;
5d1b7f0f 797 ino = XFS_AGINO_TO_INO(mp, agno, agino);
2bd0ea18 798
3e62b2c0
BF
799 if (is_inode_sparse(ino_rec, irec_offset))
800 goto process_next;
801
802 /* make inode pointer */
803 dino = xfs_make_iptr(mp, bplist[bp_index], cluster_offset);
804
805
2bd0ea18
NS
806 is_used = 3;
807 ino_dirty = 0;
808 parent = 0;
809
810 status = process_dinode(mp, dino, agno, agino,
811 is_inode_free(ino_rec, irec_offset),
0459b626 812 &ino_dirty, &is_used,ino_discovery, check_dups,
2bd0ea18
NS
813 extra_attr_check, &isa_dir, &parent);
814
815 ASSERT(is_used != 3);
e0607266 816 if (ino_dirty) {
2bd0ea18 817 dirty = 1;
e0607266
DC
818 libxfs_dinode_calc_crc(mp, dino);
819 }
820
2bd0ea18
NS
821 /*
822 * XXX - if we want to try and keep
823 * track of whether we need to bang on
824 * the inode maps (instead of just
825 * blindly reconstructing them like
826 * we do now, this is where to start.
827 */
828 if (is_used) {
829 if (is_inode_free(ino_rec, irec_offset)) {
6c39a3cb 830 if (verbose || no_modify) {
5d1b7f0f
CH
831 do_warn(
832 _("imap claims in-use inode %" PRIu64 " is free, "),
833 ino);
2bd0ea18
NS
834 }
835
6c39a3cb 836 if (verbose || !no_modify)
507f4e33 837 do_warn(_("correcting imap\n"));
2bd0ea18 838 else
507f4e33 839 do_warn(_("would correct imap\n"));
2bd0ea18
NS
840 }
841 set_inode_used(ino_rec, irec_offset);
0f012a4c 842
aaca101b
DC
843 /*
844 * store the on-disk file type for comparing in
845 * phase 6.
846 */
aaca101b 847 set_inode_ftype(ino_rec, irec_offset,
532d03d5 848 libxfs_mode_to_ftype(be16_to_cpu(dino->di_mode)));
aaca101b 849
0f012a4c
BN
850 /*
851 * store on-disk nlink count for comparing in phase 7
852 */
853 set_inode_disk_nlinks(ino_rec, irec_offset,
56b2de80
DC
854 dino->di_version > 1
855 ? be32_to_cpu(dino->di_nlink)
856 : be16_to_cpu(dino->di_onlink));
0f012a4c 857
2bd0ea18
NS
858 } else {
859 set_inode_free(ino_rec, irec_offset);
860 }
861
862 /*
863 * if we lose the root inode, or it turns into
864 * a non-directory, that allows us to double-check
865 * later whether or not we need to reinitialize it.
866 */
867 if (isa_dir) {
868 set_inode_isadir(ino_rec, irec_offset);
869 /*
870 * we always set the parent but
871 * we may as well wait until
872 * phase 4 (no inode discovery)
873 * because the parent info will
874 * be solid then.
875 */
876 if (!ino_discovery) {
877 ASSERT(parent != 0);
878 set_inode_parent(ino_rec, irec_offset, parent);
879 ASSERT(parent ==
880 get_inode_parent(ino_rec, irec_offset));
881 }
882 } else {
883 clear_inode_isadir(ino_rec, irec_offset);
884 }
885
886 if (status) {
5d1b7f0f 887 if (mp->m_sb.sb_rootino == ino) {
2bd0ea18
NS
888 need_root_inode = 1;
889
890 if (!no_modify) {
5d1b7f0f
CH
891 do_warn(
892 _("cleared root inode %" PRIu64 "\n"),
893 ino);
2bd0ea18 894 } else {
5d1b7f0f
CH
895 do_warn(
896 _("would clear root inode %" PRIu64 "\n"),
897 ino);
2bd0ea18 898 }
5d1b7f0f 899 } else if (mp->m_sb.sb_rbmino == ino) {
2bd0ea18
NS
900 need_rbmino = 1;
901
902 if (!no_modify) {
5d1b7f0f
CH
903 do_warn(
904 _("cleared realtime bitmap inode %" PRIu64 "\n"),
905 ino);
2bd0ea18 906 } else {
5d1b7f0f
CH
907 do_warn(
908 _("would clear realtime bitmap inode %" PRIu64 "\n"),
909 ino);
2bd0ea18 910 }
5d1b7f0f 911 } else if (mp->m_sb.sb_rsumino == ino) {
2bd0ea18
NS
912 need_rsumino = 1;
913
914 if (!no_modify) {
5d1b7f0f
CH
915 do_warn(
916 _("cleared realtime summary inode %" PRIu64 "\n"),
917 ino);
2bd0ea18 918 } else {
5d1b7f0f
CH
919 do_warn(
920 _("would clear realtime summary inode %" PRIu64 "\n"),
921 ino);
2bd0ea18
NS
922 }
923 } else if (!no_modify) {
5d1b7f0f
CH
924 do_warn(_("cleared inode %" PRIu64 "\n"),
925 ino);
2bd0ea18 926 } else {
5d1b7f0f
CH
927 do_warn(_("would have cleared inode %" PRIu64 "\n"),
928 ino);
2bd0ea18 929 }
7e174ec7 930 clear_inode_was_rl(ino_rec, irec_offset);
2bd0ea18
NS
931 }
932
3e62b2c0 933process_next:
2bd0ea18
NS
934 irec_offset++;
935 ibuf_offset++;
936 icnt++;
2556c98b 937 cluster_offset++;
2bd0ea18 938
ff105f75 939 if (icnt == mp->m_ialloc_inos &&
2bd0ea18
NS
940 irec_offset == XFS_INODES_PER_CHUNK) {
941 /*
942 * done! - finished up irec and block simultaneously
943 */
2556c98b 944 for (bp_index = 0; bp_index < cluster_count; bp_index++) {
3e62b2c0
BF
945 if (!bplist[bp_index])
946 continue;
947
4c0a98ae
BN
948 pftrace("put/writebuf %p (%llu) in AG %d",
949 bplist[bp_index], (long long)
950 XFS_BUF_ADDR(bplist[bp_index]), agno);
951
2556c98b
BN
952 if (dirty && !no_modify)
953 libxfs_writebuf(bplist[bp_index], 0);
954 else
955 libxfs_putbuf(bplist[bp_index]);
956 }
957 free(bplist);
2bd0ea18
NS
958 break;
959 } else if (ibuf_offset == mp->m_sb.sb_inopblock) {
960 /*
961 * mark block as an inode block in the incore bitmap
962 * and reset inode buffer offset counter
963 */
964 ibuf_offset = 0;
965 agbno++;
966
618ba571
BF
967 if (!is_inode_sparse(ino_rec, irec_offset))
968 process_inode_agbno_state(mp, agno, agbno);
2bd0ea18
NS
969 } else if (irec_offset == XFS_INODES_PER_CHUNK) {
970 /*
971 * get new irec (multiple chunks per block fs)
972 */
973 ino_rec = next_ino_rec(ino_rec);
974 ASSERT(ino_rec->ino_startnum == agino + 1);
975 irec_offset = 0;
976 }
2556c98b
BN
977 if (cluster_offset == inodes_per_cluster) {
978 bp_index++;
979 cluster_offset = 0;
980 }
2bd0ea18
NS
981 }
982 return(0);
983}
984
985/*
986 * check all inodes mentioned in the ag's incore inode maps.
987 * the map may be incomplete. If so, we'll catch the missing
988 * inodes (hopefully) when we traverse the directory tree.
989 * check_dirs is set to 1 if directory inodes should be
990 * processed for internal consistency, parent setting and
991 * discovery of unknown inodes. this only happens
992 * in phase 3. check_dups is set to 1 if we're looking for
993 * inodes that reference duplicate blocks so we can trash
994 * the inode right then and there. this is set only in
995 * phase 4 after we've run through and set the bitmap once.
996 */
997void
2556c98b
BN
998process_aginodes(
999 xfs_mount_t *mp,
1000 prefetch_args_t *pf_args,
1001 xfs_agnumber_t agno,
1002 int ino_discovery,
1003 int check_dups,
1004 int extra_attr_check)
2bd0ea18 1005{
2556c98b
BN
1006 int num_inos, bogus;
1007 ino_tree_node_t *ino_rec, *first_ino_rec, *prev_ino_rec;
1008#ifdef XR_PF_TRACE
1009 int count;
1010#endif
2bd0ea18 1011 first_ino_rec = ino_rec = findfirst_inode_rec(agno);
2556c98b 1012
2bd0ea18
NS
1013 while (ino_rec != NULL) {
1014 /*
1015 * paranoia - step through inode records until we step
1016 * through a full allocation of inodes. this could
1017 * be an issue in big-block filesystems where a block
1018 * can hold more than one inode chunk. make sure to
1019 * grab the record corresponding to the beginning of
1020 * the next block before we call the processing routines.
1021 */
1022 num_inos = XFS_INODES_PER_CHUNK;
ff105f75 1023 while (num_inos < mp->m_ialloc_inos && ino_rec != NULL) {
2bd0ea18
NS
1024 /*
1025 * inodes chunks will always be aligned and sized
1026 * correctly
1027 */
1028 if ((ino_rec = next_ino_rec(ino_rec)) != NULL)
1029 num_inos += XFS_INODES_PER_CHUNK;
1030 }
1031
ff105f75 1032 ASSERT(num_inos == mp->m_ialloc_inos);
2bd0ea18 1033
2556c98b
BN
1034 if (pf_args) {
1035 sem_post(&pf_args->ra_count);
1036#ifdef XR_PF_TRACE
1037 sem_getvalue(&pf_args->ra_count, &count);
1038 pftrace("processing inode chunk %p in AG %d (sem count = %d)",
1039 first_ino_rec, agno, count);
1040#endif
1041 }
cb5b3ef4 1042
2bd0ea18 1043 if (process_inode_chunk(mp, agno, num_inos, first_ino_rec,
2556c98b
BN
1044 ino_discovery, check_dups, extra_attr_check,
1045 &bogus)) {
2bd0ea18
NS
1046 /* XXX - i/o error, we've got a problem */
1047 abort();
1048 }
1049
1050 if (!bogus)
1051 first_ino_rec = ino_rec = next_ino_rec(ino_rec);
1052 else {
1053 /*
1054 * inodes pointed to by this record are
1055 * completely bogus, blow the records for
1056 * this chunk out.
1057 * the inode block(s) will get reclaimed
1058 * in phase 4 when the block map is
1059 * reconstructed after inodes claiming
1060 * duplicate blocks are deleted.
1061 */
1062 num_inos = 0;
1063 ino_rec = first_ino_rec;
ff105f75 1064 while (num_inos < mp->m_ialloc_inos &&
2bd0ea18
NS
1065 ino_rec != NULL) {
1066 prev_ino_rec = ino_rec;
1067
1068 if ((ino_rec = next_ino_rec(ino_rec)) != NULL)
1069 num_inos += XFS_INODES_PER_CHUNK;
1070
1ae311d5 1071 get_inode_rec(mp, agno, prev_ino_rec);
2bd0ea18
NS
1072 free_inode_rec(agno, prev_ino_rec);
1073 }
1074
1075 first_ino_rec = ino_rec;
1076 }
06fbdda9 1077 PROG_RPT_INC(prog_rpt_done[agno], num_inos);
2bd0ea18
NS
1078 }
1079}
1080
1081/*
1082 * verify the uncertain inode list for an ag.
1083 * Good inodes get moved into the good inode tree.
1084 * returns 0 if there are no uncertain inode records to
1085 * be processed, 1 otherwise. This routine destroys the
1086 * the entire uncertain inode tree for the ag as a side-effect.
1087 */
1088void
1089check_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno)
1090{
1091 ino_tree_node_t *irec;
1092 ino_tree_node_t *nrec;
1093 xfs_agino_t start;
1094 xfs_agino_t i;
1095 xfs_agino_t agino;
1096 int got_some;
1097
1098 nrec = NULL;
1099 got_some = 0;
1100
1101 clear_uncertain_ino_cache(agno);
1102
1103 if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL)
1104 return;
1105
1106 /*
1107 * the trick here is to find a contiguous range
1108 * of inodes, make sure that it doesn't overlap
1109 * with a known to exist chunk, and then make
1110 * sure it is a number of entire chunks.
1111 * we check on-disk once we have an idea of what's
1112 * going on just to double-check.
1113 *
1114 * process the uncertain inode record list and look
1115 * on disk to see if the referenced inodes are good
1116 */
1117
507f4e33 1118 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1119
1120 do {
1121 /*
1122 * check every confirmed (which in this case means
1123 * inode that we really suspect to be an inode) inode
1124 */
1125 for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
1126 if (!is_inode_confirmed(irec, i))
1127 continue;
1128
1129 agino = i + irec->ino_startnum;
1130
1131 if (verify_aginum(mp, agno, agino))
1132 continue;
1133
1134 if (nrec != NULL && nrec->ino_startnum <= agino &&
1135 agino < nrec->ino_startnum +
1136 XFS_INODES_PER_CHUNK)
1137 continue;
1138
1ae311d5 1139 if ((nrec = find_inode_rec(mp, agno, agino)) == NULL)
2bd0ea18
NS
1140 if (!verify_aginum(mp, agno, agino))
1141 if (verify_aginode_chunk(mp, agno,
1142 agino, &start))
1143 got_some = 1;
1144 }
1145
1ae311d5 1146 get_uncertain_inode_rec(mp, agno, irec);
2bd0ea18
NS
1147 free_inode_rec(agno, irec);
1148
1149 irec = findfirst_uncertain_inode_rec(agno);
1150 } while (irec != NULL);
1151
1152 if (got_some)
507f4e33 1153 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1154
1155 return;
1156}
1157
1158/*
1159 * verify and process the uncertain inodes for an ag.
1160 * this is different from check_ in that we can't just
1161 * move the good inodes into the good inode tree and let
1162 * process_aginodes() deal with them because this gets called
1163 * after process_aginodes() has been run on the ag inode tree.
1164 * So we have to process the inodes as well as verify since
1165 * we don't want to rerun process_aginodes() on a tree that has
1166 * mostly been processed.
1167 *
1168 * Note that if this routine does process some inodes, it can
1169 * add uncertain inodes to any ag which would require that
1170 * the routine be called again to process those newly-added
1171 * uncertain inodes.
1172 *
1173 * returns 0 if no inodes were processed and 1 if inodes
1174 * were processed (and it is possible that new uncertain
1175 * inodes were discovered).
1176 *
1177 * as a side-effect, this routine tears down the uncertain
1178 * inode tree for the ag.
1179 */
1180int
1181process_uncertain_aginodes(xfs_mount_t *mp, xfs_agnumber_t agno)
1182{
1183 ino_tree_node_t *irec;
1184 ino_tree_node_t *nrec;
1185 xfs_agino_t agino;
1186 int i;
1187 int bogus;
1188 int cnt;
1189 int got_some;
1190
1191#ifdef XR_INODE_TRACE
1192 fprintf(stderr, "in process_uncertain_aginodes, agno = %d\n", agno);
1193#endif
1194
1195 got_some = 0;
1196
1197 clear_uncertain_ino_cache(agno);
1198
1199 if ((irec = findfirst_uncertain_inode_rec(agno)) == NULL)
1200 return(0);
1201
1202 nrec = NULL;
1203
1204 do {
1205 /*
1206 * check every confirmed inode
1207 */
1208 for (cnt = i = 0; i < XFS_INODES_PER_CHUNK; i++) {
1209 if (!is_inode_confirmed(irec, i))
1210 continue;
1211 cnt++;
1212 agino = i + irec->ino_startnum;
1213#ifdef XR_INODE_TRACE
1214 fprintf(stderr, "ag inode = %d (0x%x)\n", agino, agino);
1215#endif
1216 /*
1217 * skip over inodes already processed (in the
1218 * good tree), bad inode numbers, and inode numbers
1219 * pointing to bogus inodes
1220 */
1221 if (verify_aginum(mp, agno, agino))
1222 continue;
1223
1224 if (nrec != NULL && nrec->ino_startnum <= agino &&
1225 agino < nrec->ino_startnum +
1226 XFS_INODES_PER_CHUNK)
1227 continue;
1228
1ae311d5 1229 if ((nrec = find_inode_rec(mp, agno, agino)) != NULL)
2bd0ea18
NS
1230 continue;
1231
1232 /*
1233 * verify the chunk. if good, it will be
1234 * added to the good inode tree.
1235 */
1236 if ((nrec = verify_aginode_chunk_irec(mp,
1237 agno, agino)) == NULL)
1238 continue;
1239
1240 got_some = 1;
1241
1242 /*
1243 * process the inode record we just added
1244 * to the good inode tree. The inode
1245 * processing may add more records to the
1246 * uncertain inode lists.
1247 */
ff105f75 1248 if (process_inode_chunk(mp, agno, mp->m_ialloc_inos,
2bd0ea18
NS
1249 nrec, 1, 0, 0, &bogus)) {
1250 /* XXX - i/o error, we've got a problem */
1251 abort();
1252 }
1253 }
1254
1255 ASSERT(cnt != 0);
1256 /*
1257 * now return the uncertain inode record to the free pool
1258 * and pull another one off the list for processing
1259 */
1ae311d5 1260 get_uncertain_inode_rec(mp, agno, irec);
2bd0ea18
NS
1261 free_inode_rec(agno, irec);
1262
1263 irec = findfirst_uncertain_inode_rec(agno);
1264 } while (irec != NULL);
1265
1266 if (got_some)
507f4e33 1267 do_warn(_("found inodes not in the inode allocation tree\n"));
2bd0ea18
NS
1268
1269 return(1);
1270}