]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/scan.c
Undoes mod: xfs-cmds:slinx:120772a
[thirdparty/xfsprogs-dev.git] / repair / scan.c
1 /*
2 * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include <libxfs.h>
34 #include "avl.h"
35 #include "globals.h"
36 #include "agheader.h"
37 #include "incore.h"
38 #include "protos.h"
39 #include "err_protos.h"
40 #include "dinode.h"
41 #include "scan.h"
42 #include "versions.h"
43 #include "bmap.h"
44
45 extern int verify_set_agheader(xfs_mount_t *mp, xfs_buf_t *sbuf, xfs_sb_t *sb,
46 xfs_agf_t *agf, xfs_agi_t *agi, xfs_agnumber_t i);
47
48 static xfs_mount_t *mp = NULL;
49 static xfs_extlen_t bno_agffreeblks;
50 static xfs_extlen_t cnt_agffreeblks;
51 static xfs_extlen_t bno_agflongest;
52 static xfs_extlen_t cnt_agflongest;
53 static xfs_agino_t agicount;
54 static xfs_agino_t agifreecount;
55
56 void
57 set_mp(xfs_mount_t *mpp)
58 {
59 mp = mpp;
60 }
61
62 void
63 scan_sbtree(
64 xfs_agblock_t root,
65 int nlevels,
66 xfs_agnumber_t agno,
67 int suspect,
68 void (*func)(xfs_btree_sblock_t *block,
69 int level,
70 xfs_agblock_t bno,
71 xfs_agnumber_t agno,
72 int suspect,
73 int isroot),
74 int isroot)
75 {
76 xfs_buf_t *bp;
77
78 bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, root),
79 XFS_FSB_TO_BB(mp, 1), 0);
80 if (!bp) {
81 do_error("can't read btree block %d/%d\n", agno, root);
82 return;
83 }
84 (*func)((xfs_btree_sblock_t *)XFS_BUF_PTR(bp),
85 nlevels - 1, root, agno, suspect, isroot);
86 libxfs_putbuf(bp);
87 }
88
89 /*
90 * returns 1 on bad news (inode needs to be cleared), 0 on good
91 */
92 int
93 scan_lbtree(
94 xfs_dfsbno_t root,
95 int nlevels,
96 int (*func)(xfs_btree_lblock_t *block,
97 int level,
98 int type,
99 int whichfork,
100 xfs_dfsbno_t bno,
101 xfs_ino_t ino,
102 xfs_drfsbno_t *tot,
103 __uint64_t *nex,
104 blkmap_t **blkmapp,
105 bmap_cursor_t *bm_cursor,
106 int isroot,
107 int check_dups,
108 int *dirty),
109 int type,
110 int whichfork,
111 xfs_ino_t ino,
112 xfs_drfsbno_t *tot,
113 __uint64_t *nex,
114 blkmap_t **blkmapp,
115 bmap_cursor_t *bm_cursor,
116 int isroot,
117 int check_dups)
118 {
119 xfs_buf_t *bp;
120 int err;
121 int dirty = 0;
122
123 bp = libxfs_readbuf(mp->m_dev, XFS_FSB_TO_DADDR(mp, root),
124 XFS_FSB_TO_BB(mp, 1), 0);
125 if (!bp) {
126 do_error("can't read btree block %d/%d\n",
127 XFS_FSB_TO_AGNO(mp, root),
128 XFS_FSB_TO_AGBNO(mp, root));
129 return(1);
130 }
131 err = (*func)((xfs_btree_lblock_t *)XFS_BUF_PTR(bp), nlevels - 1,
132 type, whichfork, root, ino, tot, nex, blkmapp,
133 bm_cursor, isroot, check_dups, &dirty);
134
135 ASSERT(dirty == 0 || (dirty && !no_modify));
136
137 if (dirty && !no_modify)
138 libxfs_writebuf(bp, 0);
139 else
140 libxfs_putbuf(bp);
141
142 return(err);
143 }
144
145 int
146 scanfunc_bmap(
147 xfs_btree_lblock_t *ablock,
148 int level,
149 int type,
150 int whichfork,
151 xfs_dfsbno_t bno,
152 xfs_ino_t ino,
153 xfs_drfsbno_t *tot,
154 __uint64_t *nex,
155 blkmap_t **blkmapp,
156 bmap_cursor_t *bm_cursor,
157 int isroot,
158 int check_dups,
159 int *dirty)
160 {
161 xfs_bmbt_block_t *block = (xfs_bmbt_block_t *)ablock;
162 int i;
163 int err;
164 xfs_bmbt_ptr_t *pp;
165 xfs_bmbt_key_t *pkey;
166 xfs_bmbt_rec_32_t *rp;
167 xfs_dfiloff_t first_key;
168 xfs_dfiloff_t last_key;
169 char *forkname;
170
171 if (whichfork == XFS_DATA_FORK)
172 forkname = "data";
173 else
174 forkname = "attr";
175
176 /*
177 * unlike the ag freeblock btrees, if anything looks wrong
178 * in an inode bmap tree, just bail. it's possible that
179 * we'll miss a case where the to-be-toasted inode and
180 * another inode are claiming the same block but that's
181 * highly unlikely.
182 */
183 if (INT_GET(block->bb_magic, ARCH_CONVERT) != XFS_BMAP_MAGIC) {
184 do_warn(
185 "bad magic # %#x in inode %llu (%s fork) bmbt block %llu\n",
186 INT_GET(block->bb_magic, ARCH_CONVERT), ino, forkname, bno);
187 return(1);
188 }
189 if (INT_GET(block->bb_level, ARCH_CONVERT) != level) {
190 do_warn(
191 "expected level %d got %d in inode %llu, (%s fork) bmbt block %llu\n",
192 level, INT_GET(block->bb_level, ARCH_CONVERT), ino, forkname, bno);
193 return(1);
194 }
195
196 if (check_dups == 0) {
197 /*
198 * check sibling pointers. if bad we have a conflict
199 * between the sibling pointers and the child pointers
200 * in the parent block. blow out the inode if that happens
201 */
202 if (bm_cursor->level[level].fsbno != NULLDFSBNO) {
203 /*
204 * this is not the first block on this level
205 * so the cursor for this level has recorded the
206 * values for this's block left-sibling.
207 */
208 if (bno != bm_cursor->level[level].right_fsbno) {
209 do_warn(
210 "bad fwd (right) sibling pointer (saw %llu parent block says %llu)\n",
211 bm_cursor->level[level].right_fsbno,
212 bno);
213 do_warn(
214 "\tin inode %llu (%s fork) bmap btree block %llu\n",
215 ino, forkname,
216 bm_cursor->level[level].fsbno);
217 return(1);
218 }
219 if (INT_GET(block->bb_leftsib, ARCH_CONVERT) !=
220 bm_cursor->level[level].fsbno) {
221 do_warn(
222 "bad back (left) sibling pointer (saw %llu parent block says %llu)\n",
223 INT_GET(block->bb_leftsib, ARCH_CONVERT),
224 bm_cursor->level[level].fsbno);
225 do_warn(
226 "\tin inode %llu (%s fork) bmap btree block %llu\n",
227 ino, forkname, bno);
228 return(1);
229 }
230 } else {
231 /*
232 * This is the first or only block on this level.
233 * Check that the left sibling pointer is NULL
234 */
235 if (INT_GET(block->bb_leftsib, ARCH_CONVERT) !=
236 NULLDFSBNO) {
237 do_warn(
238 "bad back (left) sibling pointer (saw %llu should be NULL (0))\n",
239 INT_GET(block->bb_leftsib, ARCH_CONVERT));
240 do_warn(
241 "\tin inode %llu (%s fork) bmap btree block %llu\n",
242 ino, forkname, bno);
243 return(1);
244 }
245 }
246
247 /*
248 * update cursor block pointers to reflect this block
249 */
250 bm_cursor->level[level].fsbno = bno;
251 bm_cursor->level[level].left_fsbno = INT_GET(block->bb_leftsib, ARCH_CONVERT);
252 bm_cursor->level[level].right_fsbno = INT_GET(block->bb_rightsib, ARCH_CONVERT);
253
254 switch (get_fsbno_state(mp, bno)) {
255 case XR_E_UNKNOWN:
256 case XR_E_FREE1:
257 case XR_E_FREE:
258 set_fsbno_state(mp, bno, XR_E_INUSE);
259 break;
260 case XR_E_FS_MAP:
261 case XR_E_INUSE:
262 /*
263 * we'll try and continue searching here since
264 * the block looks like it's been claimed by file
265 * to store user data, a directory to store directory
266 * data, or the space allocation btrees but since
267 * we made it here, the block probably
268 * contains btree data.
269 */
270 set_fsbno_state(mp, bno, XR_E_MULT);
271 do_warn(
272 "inode 0x%llx bmap block 0x%llx claimed, state is %d\n",
273 ino, (__uint64_t) bno,
274 get_fsbno_state(mp, bno));
275 break;
276 case XR_E_MULT:
277 case XR_E_INUSE_FS:
278 set_fsbno_state(mp, bno, XR_E_MULT);
279 do_warn(
280 "inode 0x%llx bmap block 0x%llx claimed, state is %d\n",
281 ino, (__uint64_t) bno,
282 get_fsbno_state(mp, bno));
283 /*
284 * if we made it to here, this is probably a bmap block
285 * that is being used by *another* file as a bmap block
286 * so the block will be valid. Both files should be
287 * trashed along with any other file that impinges on
288 * any blocks referenced by either file. So we
289 * continue searching down this btree to mark all
290 * blocks duplicate
291 */
292 break;
293 case XR_E_BAD_STATE:
294 default:
295 do_warn(
296 "bad state %d, inode 0x%llx bmap block 0x%llx\n",
297 get_fsbno_state(mp, bno),
298 ino, (__uint64_t) bno);
299 break;
300 }
301 } else {
302 /*
303 * attribute fork for realtime files is in the regular
304 * filesystem
305 */
306 if (type != XR_INO_RTDATA || whichfork != XFS_DATA_FORK) {
307 if (search_dup_extent(mp, XFS_FSB_TO_AGNO(mp, bno),
308 XFS_FSB_TO_AGBNO(mp, bno)))
309 return(1);
310 } else {
311 if (search_rt_dup_extent(mp, bno))
312 return(1);
313 }
314 }
315 (*tot)++;
316 if (level == 0) {
317 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_bmap_dmxr[0] ||
318 (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_bmap_dmnr[0])) {
319 do_warn("inode 0x%llx bad # of bmap records (%u, min - %u, max - %u)\n",
320 ino, INT_GET(block->bb_numrecs, ARCH_CONVERT),
321 mp->m_bmap_dmnr[0], mp->m_bmap_dmxr[0]);
322 return(1);
323 }
324 rp = (xfs_bmbt_rec_32_t *)
325 XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
326 block, 1, mp->m_bmap_dmxr[0]);
327 *nex += INT_GET(block->bb_numrecs, ARCH_CONVERT);
328 /*
329 * XXX - if we were going to fix up the btree record,
330 * we'd do it right here. For now, if there's a problem,
331 * we'll bail out and presumably clear the inode.
332 */
333 if (check_dups == 0) {
334 err = process_bmbt_reclist(mp, rp, INT_GET(block->bb_numrecs, ARCH_CONVERT),
335 type, ino, tot, blkmapp,
336 &first_key, &last_key,
337 whichfork);
338 if (err)
339 return(1);
340 /*
341 * check that key ordering is monotonically increasing.
342 * if the last_key value in the cursor is set to
343 * NULLDFILOFF, then we know this is the first block
344 * on the leaf level and we shouldn't check the
345 * last_key value.
346 */
347 if (first_key <= bm_cursor->level[level].last_key &&
348 bm_cursor->level[level].last_key !=
349 NULLDFILOFF) {
350 do_warn(
351 "out-of-order bmap key (file offset) in inode %llu, %s fork, fsbno %llu\n",
352 ino, forkname, bno);
353 return(1);
354 }
355 /*
356 * update cursor keys to reflect this block.
357 * don't have to check if last_key is > first_key
358 * since that gets checked by process_bmbt_reclist.
359 */
360 bm_cursor->level[level].first_key = first_key;
361 bm_cursor->level[level].last_key = last_key;
362
363 return(0);
364 } else
365 return(scan_bmbt_reclist(mp, rp, INT_GET(block->bb_numrecs, ARCH_CONVERT),
366 type, ino, tot, whichfork));
367 }
368 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_bmap_dmxr[1] ||
369 (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_bmap_dmnr[1])) {
370 do_warn("inode 0x%llx bad # of bmap records (%u, min - %u, max - %u)\n",
371 ino, INT_GET(block->bb_numrecs, ARCH_CONVERT),
372 mp->m_bmap_dmnr[1], mp->m_bmap_dmxr[1]);
373 return(1);
374 }
375 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1,
376 mp->m_bmap_dmxr[1]);
377 pkey = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1,
378 mp->m_bmap_dmxr[1]);
379
380 last_key = NULLDFILOFF;
381
382 for (i = 0, err = 0; i < INT_GET(block->bb_numrecs, ARCH_CONVERT); i++) {
383 /*
384 * XXX - if we were going to fix up the interior btree nodes,
385 * we'd do it right here. For now, if there's a problem,
386 * we'll bail out and presumably clear the inode.
387 */
388 if (!verify_dfsbno(mp, INT_GET(pp[i], ARCH_CONVERT))) {
389 do_warn("bad bmap btree ptr 0x%llx in ino %llu\n",
390 INT_GET(pp[i], ARCH_CONVERT), ino);
391 return(1);
392 }
393
394 err = scan_lbtree(INT_GET(pp[i], ARCH_CONVERT), level, scanfunc_bmap, type, whichfork,
395 ino, tot, nex, blkmapp, bm_cursor, 0,
396 check_dups);
397 if (err)
398 return(1);
399
400 /*
401 * fix key (offset) mismatches between the first key
402 * in the child block (as recorded in the cursor) and the
403 * key in the interior node referencing the child block.
404 *
405 * fixes cases where entries have been shifted between
406 * child blocks but the parent hasn't been updated. We
407 * don't have to worry about the key values in the cursor
408 * not being set since we only look at the key values of
409 * our child and those are guaranteed to be set by the
410 * call to scan_lbtree() above.
411 */
412 if (check_dups == 0 && INT_GET(pkey[i].br_startoff, ARCH_CONVERT) !=
413 bm_cursor->level[level-1].first_key) {
414 if (!no_modify) {
415 do_warn(
416 "correcting bt key (was %llu, now %llu) in inode %llu\n",
417 INT_GET(pkey[i].br_startoff, ARCH_CONVERT),
418 bm_cursor->level[level-1].first_key,
419 ino);
420 do_warn("\t\t%s fork, btree block %llu\n",
421 forkname, bno);
422 *dirty = 1;
423 INT_SET(pkey[i].br_startoff, ARCH_CONVERT, bm_cursor->level[level-1].first_key);
424 } else {
425 do_warn(
426 "bad btree key (is %llu, should be %llu) in inode %llu\n",
427 INT_GET(pkey[i].br_startoff, ARCH_CONVERT),
428 bm_cursor->level[level-1].first_key,
429 ino);
430 do_warn("\t\t%s fork, btree block %llu\n",
431 forkname, bno);
432 }
433 }
434 }
435
436 /*
437 * Check that the last child block's forward sibling pointer
438 * is NULL.
439 */
440 if (check_dups == 0 &&
441 bm_cursor->level[level - 1].right_fsbno != NULLDFSBNO) {
442 do_warn(
443 "bad fwd (right) sibling pointer (saw %llu should be NULLDFSBNO)\n",
444 bm_cursor->level[level - 1].right_fsbno);
445 do_warn(
446 "\tin inode %llu (%s fork) bmap btree block %llu\n",
447 ino, forkname,
448 bm_cursor->level[level].fsbno);
449 return(1);
450 }
451
452 /*
453 * update cursor keys to reflect this block
454 */
455 if (check_dups == 0) {
456 bm_cursor->level[level].first_key =
457 INT_GET(pkey[0].br_startoff, ARCH_CONVERT);
458 i = INT_GET(block->bb_numrecs, ARCH_CONVERT) - 1;
459 bm_cursor->level[level].last_key =
460 INT_GET(pkey[i].br_startoff, ARCH_CONVERT);
461 }
462
463 return(0);
464 }
465
466 void
467 scanfunc_bno(
468 xfs_btree_sblock_t *ablock,
469 int level,
470 xfs_agblock_t bno,
471 xfs_agnumber_t agno,
472 int suspect,
473 int isroot
474 )
475 {
476 xfs_agblock_t b;
477 xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
478 int i;
479 xfs_alloc_ptr_t *pp;
480 xfs_alloc_rec_t *rp;
481 int hdr_errors = 0;
482 int numrecs;
483 int state;
484
485 if (INT_GET(block->bb_magic, ARCH_CONVERT) != XFS_ABTB_MAGIC) {
486 do_warn("bad magic # %#x in btbno block %d/%d\n",
487 INT_GET(block->bb_magic, ARCH_CONVERT), agno, bno);
488 hdr_errors++;
489 if (suspect)
490 return;
491 }
492 if (INT_GET(block->bb_level, ARCH_CONVERT) != level) {
493 do_warn("expected level %d got %d in btbno block %d/%d\n",
494 level, INT_GET(block->bb_level, ARCH_CONVERT), agno, bno);
495 hdr_errors++;
496 if (suspect)
497 return;
498 }
499
500 /*
501 * check for btree blocks multiply claimed
502 */
503 state = get_agbno_state(mp, agno, bno);
504
505 switch (state) {
506 case XR_E_UNKNOWN:
507 set_agbno_state(mp, agno, bno, XR_E_FS_MAP);
508 break;
509 default:
510 set_agbno_state(mp, agno, bno, XR_E_MULT);
511 do_warn(
512 "bno freespace btree block claimed (state %d), agno %d, bno %d, suspect %d\n",
513 state, agno, bno, suspect);
514 return;
515 }
516
517 if (level == 0) {
518 numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
519
520 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_alloc_mxr[0]) {
521 numrecs = mp->m_alloc_mxr[0];
522 hdr_errors++;
523 }
524 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_alloc_mnr[0]) {
525 numrecs = mp->m_alloc_mnr[0];
526 hdr_errors++;
527 }
528
529 if (hdr_errors)
530 suspect++;
531
532 rp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, block,
533 1, mp->m_alloc_mxr[0]);
534 for (i = 0; i < numrecs; i++) {
535 if (INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) == 0 ||
536 INT_GET(rp[i].ar_startblock, ARCH_CONVERT) == 0 ||
537 !verify_agbno(mp, agno, INT_GET(rp[i].ar_startblock, ARCH_CONVERT)) ||
538 INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) > MAXEXTLEN)
539 continue;
540
541 bno_agffreeblks += INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
542 if (INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) > bno_agflongest)
543 bno_agflongest = INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
544 for (b = INT_GET(rp[i].ar_startblock, ARCH_CONVERT);
545 b < INT_GET(rp[i].ar_startblock, ARCH_CONVERT) + INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
546 b++) {
547 if (get_agbno_state(mp, agno, b)
548 == XR_E_UNKNOWN)
549 set_agbno_state(mp, agno, b,
550 XR_E_FREE1);
551 else {
552 do_warn("block (%d,%d) multiply claimed by bno space tree, state - %d\n",
553 agno, b, get_agbno_state(mp, agno, b));
554 }
555 }
556 }
557 return;
558 }
559
560 /*
561 * interior record
562 */
563 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, block, 1,
564 mp->m_alloc_mxr[1]);
565
566 numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
567 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_alloc_mxr[1]) {
568 numrecs = mp->m_alloc_mxr[1];
569 hdr_errors++;
570 }
571 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_alloc_mnr[1]) {
572 numrecs = mp->m_alloc_mnr[1];
573 hdr_errors++;
574 }
575
576 /*
577 * don't pass bogus tree flag down further if this block
578 * looked ok. bail out if two levels in a row look bad.
579 */
580
581 if (suspect && !hdr_errors)
582 suspect = 0;
583
584 if (hdr_errors) {
585 if (suspect)
586 return;
587 else suspect++;
588 }
589
590 for (i = 0; i < numrecs; i++) {
591 /*
592 * XXX - put sibling detection right here.
593 * we know our sibling chain is good. So as we go,
594 * we check the entry before and after each entry.
595 * If either of the entries references a different block,
596 * check the sibling pointer. If there's a sibling
597 * pointer mismatch, try and extract as much data
598 * as possible.
599 */
600 if (INT_GET(pp[i], ARCH_CONVERT) != 0 && verify_agbno(mp, agno, INT_GET(pp[i], ARCH_CONVERT)))
601 scan_sbtree(INT_GET(pp[i], ARCH_CONVERT), level, agno, suspect,
602 scanfunc_bno, 0);
603 }
604 }
605
606 void
607 scanfunc_cnt(
608 xfs_btree_sblock_t *ablock,
609 int level,
610 xfs_agblock_t bno,
611 xfs_agnumber_t agno,
612 int suspect,
613 int isroot
614 )
615 {
616 xfs_alloc_block_t *block;
617 xfs_alloc_ptr_t *pp;
618 xfs_alloc_rec_t *rp;
619 xfs_agblock_t b;
620 int i;
621 int hdr_errors;
622 int numrecs;
623 int state;
624
625 block = (xfs_alloc_block_t *)ablock;
626 hdr_errors = 0;
627
628 if (INT_GET(block->bb_magic, ARCH_CONVERT) != XFS_ABTC_MAGIC) {
629 do_warn("bad magic # %#x in btcnt block %d/%d\n",
630 INT_GET(block->bb_magic, ARCH_CONVERT), agno, bno);
631 hdr_errors++;
632 if (suspect)
633 return;
634 }
635 if (INT_GET(block->bb_level, ARCH_CONVERT) != level) {
636 do_warn("expected level %d got %d in btcnt block %d/%d\n",
637 level, INT_GET(block->bb_level, ARCH_CONVERT), agno, bno);
638 hdr_errors++;
639 if (suspect)
640 return;
641 }
642
643 /*
644 * check for btree blocks multiply claimed
645 */
646 state = get_agbno_state(mp, agno, bno);
647
648 switch (state) {
649 case XR_E_UNKNOWN:
650 set_agbno_state(mp, agno, bno, XR_E_FS_MAP);
651 break;
652 default:
653 set_agbno_state(mp, agno, bno, XR_E_MULT);
654 do_warn(
655 "bcnt freespace btree block claimed (state %d), agno %d, bno %d, suspect %d\n",
656 state, agno, bno, suspect);
657 return;
658 }
659
660 if (level == 0) {
661 numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
662
663 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_alloc_mxr[0]) {
664 numrecs = mp->m_alloc_mxr[0];
665 hdr_errors++;
666 }
667 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_alloc_mnr[0]) {
668 numrecs = mp->m_alloc_mnr[0];
669 hdr_errors++;
670 }
671
672 if (hdr_errors)
673 suspect++;
674
675 rp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, block,
676 1, mp->m_alloc_mxr[0]);
677 for (i = 0; i < numrecs; i++) {
678 if (INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) == 0 ||
679 INT_GET(rp[i].ar_startblock, ARCH_CONVERT) == 0 ||
680 !verify_agbno(mp, agno, INT_GET(rp[i].ar_startblock, ARCH_CONVERT)) ||
681 INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) > MAXEXTLEN)
682 continue;
683
684 cnt_agffreeblks += INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
685 if (INT_GET(rp[i].ar_blockcount, ARCH_CONVERT) > cnt_agflongest)
686 cnt_agflongest = INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
687 for (b = INT_GET(rp[i].ar_startblock, ARCH_CONVERT);
688 b < INT_GET(rp[i].ar_startblock, ARCH_CONVERT) + INT_GET(rp[i].ar_blockcount, ARCH_CONVERT);
689 b++) {
690 state = get_agbno_state(mp, agno, b);
691 /*
692 * no warning messages -- we'll catch
693 * FREE1 blocks later
694 */
695 switch (state) {
696 case XR_E_FREE1:
697 set_agbno_state(mp, agno, b, XR_E_FREE);
698 break;
699 case XR_E_UNKNOWN:
700 set_agbno_state(mp, agno, b,
701 XR_E_FREE1);
702 break;
703 default:
704 do_warn(
705 "block (%d,%d) already used, state %d\n",
706 agno, b, state);
707 break;
708 }
709 }
710 }
711 return;
712 }
713
714 /*
715 * interior record
716 */
717 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_alloc, block, 1,
718 mp->m_alloc_mxr[1]);
719
720 numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
721 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_alloc_mxr[1]) {
722 numrecs = mp->m_alloc_mxr[1];
723 hdr_errors++;
724 }
725 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_alloc_mnr[1]) {
726 numrecs = mp->m_alloc_mnr[1];
727 hdr_errors++;
728 }
729
730 /*
731 * don't pass bogus tree flag down further if this block
732 * looked ok. bail out if two levels in a row look bad.
733 */
734
735 if (suspect && !hdr_errors)
736 suspect = 0;
737
738 if (hdr_errors) {
739 if (suspect)
740 return;
741 else suspect++;
742 }
743
744 for (i = 0; i < numrecs; i++)
745 if (INT_GET(pp[i], ARCH_CONVERT) != 0 && verify_agbno(mp, agno, INT_GET(pp[i], ARCH_CONVERT)))
746 scan_sbtree(INT_GET(pp[i], ARCH_CONVERT), level, agno,
747 suspect, scanfunc_cnt, 0);
748 }
749
750 /*
751 * this one walks the inode btrees sucking the info there into
752 * the incore avl tree. We try and rescue corrupted btree records
753 * to minimize our chances of losing inodes. Inode info from potentially
754 * corrupt sources could be bogus so rather than put the info straight
755 * into the tree, instead we put it on a list and try and verify the
756 * info in the next phase by examining what's on disk. At that point,
757 * we'll be able to figure out what's what and stick the corrected info
758 * into the tree. We do bail out at some point and give up on a subtree
759 * so as to avoid walking randomly all over the ag.
760 *
761 * Note that it's also ok if the free/inuse info wrong, we can correct
762 * that when we examine the on-disk inode. The important thing is to
763 * get the start and alignment of the inode chunks right. Those chunks
764 * that we aren't sure about go into the uncertain list.
765 */
766 void
767 scanfunc_ino(
768 xfs_btree_sblock_t *ablock,
769 int level,
770 xfs_agblock_t bno,
771 xfs_agnumber_t agno,
772 int suspect,
773 int isroot
774 )
775 {
776 xfs_ino_t lino;
777 xfs_inobt_block_t *block;
778 int i;
779 xfs_agino_t ino;
780 xfs_agblock_t agbno;
781 int j;
782 int nfree;
783 int off;
784 int numrecs;
785 int state;
786 xfs_inobt_ptr_t *pp;
787 xfs_inobt_rec_t *rp;
788 ino_tree_node_t *ino_rec, *first_rec, *last_rec;
789 int hdr_errors;
790
791 block = (xfs_inobt_block_t *)ablock;
792 hdr_errors = 0;
793
794 if (INT_GET(block->bb_magic, ARCH_CONVERT) != XFS_IBT_MAGIC) {
795 do_warn("bad magic # %#x in inobt block %d/%d\n",
796 INT_GET(block->bb_magic, ARCH_CONVERT), agno, bno);
797 hdr_errors++;
798 bad_ino_btree = 1;
799 if (suspect)
800 return;
801 }
802 if (INT_GET(block->bb_level, ARCH_CONVERT) != level) {
803 do_warn("expected level %d got %d in inobt block %d/%d\n",
804 level, INT_GET(block->bb_level, ARCH_CONVERT), agno, bno);
805 hdr_errors++;
806 bad_ino_btree = 1;
807 if (suspect)
808 return;
809 }
810
811 /*
812 * check for btree blocks multiply claimed, any unknown/free state
813 * is ok in the bitmap block.
814 */
815 state = get_agbno_state(mp, agno, bno);
816
817 switch (state) {
818 case XR_E_UNKNOWN:
819 case XR_E_FREE1:
820 case XR_E_FREE:
821 set_agbno_state(mp, agno, bno, XR_E_FS_MAP);
822 break;
823 default:
824 set_agbno_state(mp, agno, bno, XR_E_MULT);
825 do_warn(
826 "inode btree block claimed (state %d), agno %d, bno %d, suspect %d\n",
827 state, agno, bno, suspect);
828 }
829
830 numrecs = INT_GET(block->bb_numrecs, ARCH_CONVERT);
831
832 /*
833 * leaf record in btree
834 */
835 if (level == 0) {
836 /* check for trashed btree block */
837
838 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_inobt_mxr[0]) {
839 numrecs = mp->m_inobt_mxr[0];
840 hdr_errors++;
841 }
842 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_inobt_mnr[0]) {
843 numrecs = mp->m_inobt_mnr[0];
844 hdr_errors++;
845 }
846
847 if (hdr_errors) {
848 bad_ino_btree = 1;
849 do_warn("dubious inode btree block header %d/%d\n",
850 agno, bno);
851 suspect++;
852 }
853
854 rp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_inobt, block,
855 1, mp->m_inobt_mxr[0]);
856
857 /*
858 * step through the records, each record points to
859 * a chunk of inodes. The start of inode chunks should
860 * be block-aligned. Each inode btree rec should point
861 * to the start of a block of inodes or the start of a group
862 * of INODES_PER_CHUNK (64) inodes. off is the offset into
863 * the block. skip processing of bogus records.
864 */
865 for (i = 0; i < numrecs; i++) {
866 ino = INT_GET(rp[i].ir_startino, ARCH_CONVERT);
867 off = XFS_AGINO_TO_OFFSET(mp, ino);
868 agbno = XFS_AGINO_TO_AGBNO(mp, ino);
869 lino = XFS_AGINO_TO_INO(mp, agno, ino);
870 /*
871 * on multi-block block chunks, all chunks start
872 * at the beginning of the block. with multi-chunk
873 * blocks, all chunks must start on 64-inode boundaries
874 * since each block can hold N complete chunks. if
875 * fs has aligned inodes, all chunks must start
876 * at a fs_ino_alignment*N'th agbno. skip recs
877 * with badly aligned starting inodes.
878 */
879 if (ino == 0 ||
880 (inodes_per_block <= XFS_INODES_PER_CHUNK &&
881 off != 0) ||
882 (inodes_per_block > XFS_INODES_PER_CHUNK &&
883 off % XFS_INODES_PER_CHUNK != 0) ||
884 (fs_aligned_inodes &&
885 agbno % fs_ino_alignment != 0)) {
886 do_warn(
887 "badly aligned inode rec (starting inode = %llu)\n",
888 lino);
889 suspect++;
890 }
891
892 /*
893 * verify numeric validity of inode chunk first
894 * before inserting into a tree. don't have to
895 * worry about the overflow case because the
896 * starting ino number of a chunk can only get
897 * within 255 inodes of max (NULLAGINO). if it
898 * gets closer, the agino number will be illegal
899 * as the agbno will be too large.
900 */
901 if (verify_aginum(mp, agno, ino)) {
902 do_warn(
903 "bad starting inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n",
904 lino, agno, ino);
905 suspect++;
906 continue;
907 }
908
909 if (verify_aginum(mp, agno,
910 ino + XFS_INODES_PER_CHUNK - 1)) {
911 do_warn(
912 "bad ending inode # (%llu (0x%x 0x%x)) in ino rec, skipping rec\n",
913 lino + XFS_INODES_PER_CHUNK - 1,
914 agno, ino + XFS_INODES_PER_CHUNK - 1);
915 suspect++;
916 continue;
917 }
918
919 /*
920 * set state of each block containing inodes
921 */
922 if (off == 0 && !suspect) {
923 for (j = 0;
924 j < XFS_INODES_PER_CHUNK;
925 j += mp->m_sb.sb_inopblock) {
926 agbno = XFS_AGINO_TO_AGBNO(mp, ino + j);
927 state = get_agbno_state(mp,
928 agno, agbno);
929
930 if (state == XR_E_UNKNOWN) {
931 set_agbno_state(mp, agno,
932 agbno, XR_E_INO);
933 } else if (state == XR_E_INUSE_FS &&
934 agno == 0 &&
935 ino + j >= first_prealloc_ino &&
936 ino + j < last_prealloc_ino) {
937 set_agbno_state(mp, agno,
938 agbno, XR_E_INO);
939 } else {
940 do_warn(
941 "inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n",
942 agno, bno,
943 mp->m_sb.sb_inopblock);
944 suspect++;
945 /*
946 * XXX - maybe should mark
947 * block a duplicate
948 */
949 continue;
950 }
951 }
952 }
953 /*
954 * ensure only one avl entry per chunk
955 */
956 find_inode_rec_range(agno, ino,
957 ino + XFS_INODES_PER_CHUNK,
958 &first_rec,
959 &last_rec);
960 if (first_rec != NULL) {
961 /*
962 * this chunk overlaps with one (or more)
963 * already in the tree
964 */
965 do_warn(
966 "inode rec for ino %llu (%d/%d) overlaps existing rec (start %d/%d)\n",
967 lino, agno, ino,
968 agno, first_rec->ino_startnum);
969 suspect++;
970
971 /*
972 * if the 2 chunks start at the same place,
973 * then we don't have to put this one
974 * in the uncertain list. go to the next one.
975 */
976 if (first_rec->ino_startnum == ino)
977 continue;
978 }
979
980 agicount += XFS_INODES_PER_CHUNK;
981 agifreecount += INT_GET(rp[i].ir_freecount, ARCH_CONVERT);
982 nfree = 0;
983
984 /*
985 * now mark all the inodes as existing and free or used.
986 * if the tree is suspect, put them into the uncertain
987 * inode tree.
988 */
989 if (!suspect) {
990 if (XFS_INOBT_IS_FREE(&rp[i], 0, ARCH_CONVERT)) {
991 nfree++;
992 ino_rec = set_inode_free_alloc(agno,
993 ino);
994 } else {
995 ino_rec = set_inode_used_alloc(agno,
996 ino);
997 }
998 for (j = 1; j < XFS_INODES_PER_CHUNK; j++) {
999 if (XFS_INOBT_IS_FREE(&rp[i], j, ARCH_CONVERT)) {
1000 nfree++;
1001 set_inode_free(ino_rec, j);
1002 } else {
1003 set_inode_used(ino_rec, j);
1004 }
1005 }
1006 } else {
1007 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
1008 if (XFS_INOBT_IS_FREE(&rp[i], j, ARCH_CONVERT)) {
1009 nfree++;
1010 add_aginode_uncertain(agno,
1011 ino + j, 1);
1012 } else {
1013 add_aginode_uncertain(agno,
1014 ino + j, 0);
1015 }
1016 }
1017 }
1018
1019 if (nfree != INT_GET(rp[i].ir_freecount, ARCH_CONVERT)) {
1020 do_warn( "ir_freecount/free mismatch, inode chunk \
1021 %d/%d, freecount %d nfree %d\n",
1022 agno, ino, INT_GET(rp[i].ir_freecount, ARCH_CONVERT), nfree);
1023 }
1024 }
1025
1026 if (suspect)
1027 bad_ino_btree = 1;
1028
1029 return;
1030 }
1031
1032 /*
1033 * interior record, continue on
1034 */
1035 if (INT_GET(block->bb_numrecs, ARCH_CONVERT) > mp->m_inobt_mxr[1]) {
1036 numrecs = mp->m_inobt_mxr[1];
1037 hdr_errors++;
1038 }
1039 if (isroot == 0 && INT_GET(block->bb_numrecs, ARCH_CONVERT) < mp->m_inobt_mnr[1]) {
1040 numrecs = mp->m_inobt_mnr[1];
1041 hdr_errors++;
1042 }
1043
1044 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_inobt, block, 1,
1045 mp->m_inobt_mxr[1]);
1046
1047 /*
1048 * don't pass bogus tree flag down further if this block
1049 * looked ok. bail out if two levels in a row look bad.
1050 */
1051
1052 if (suspect && !hdr_errors)
1053 suspect = 0;
1054
1055 if (hdr_errors) {
1056 bad_ino_btree = 1;
1057 if (suspect)
1058 return;
1059 else suspect++;
1060 }
1061
1062 for (i = 0; i < numrecs; i++) {
1063 if (INT_GET(pp[i], ARCH_CONVERT) != 0 && verify_agbno(mp, agno, INT_GET(pp[i], ARCH_CONVERT)))
1064 scan_sbtree(INT_GET(pp[i], ARCH_CONVERT), level, agno, suspect,
1065 scanfunc_ino, 0);
1066 }
1067 }
1068
1069 void
1070 scan_freelist(
1071 xfs_agf_t *agf)
1072 {
1073 xfs_agfl_t *agfl;
1074 xfs_buf_t *agflbuf;
1075 xfs_agblock_t bno;
1076 int count;
1077 int i;
1078
1079 if (XFS_SB_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1080 XFS_AGF_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1081 XFS_AGI_BLOCK(mp) != XFS_AGFL_BLOCK(mp))
1082 set_agbno_state(mp, INT_GET(agf->agf_seqno, ARCH_CONVERT),
1083 XFS_AGFL_BLOCK(mp), XR_E_FS_MAP);
1084 if (INT_GET(agf->agf_flcount, ARCH_CONVERT) == 0)
1085 return;
1086 agflbuf = libxfs_readbuf(mp->m_dev,
1087 XFS_AG_DADDR(mp, INT_GET(agf->agf_seqno, ARCH_CONVERT),
1088 XFS_AGFL_DADDR), 1, 0);
1089 if (!agflbuf) {
1090 do_abort("can't read agfl block for ag %d\n",
1091 INT_GET(agf->agf_seqno, ARCH_CONVERT));
1092 return;
1093 }
1094 agfl = XFS_BUF_TO_AGFL(agflbuf);
1095 i = INT_GET(agf->agf_flfirst, ARCH_CONVERT);
1096 count = 0;
1097 for (;;) {
1098 bno = INT_GET(agfl->agfl_bno[i], ARCH_CONVERT);
1099 if (verify_agbno(mp, INT_GET(agf->agf_seqno,ARCH_CONVERT), bno))
1100 set_agbno_state(mp,
1101 INT_GET(agf->agf_seqno, ARCH_CONVERT),
1102 bno, XR_E_FREE);
1103 else
1104 do_warn("bad agbno %u in agfl, agno %d\n",
1105 bno, INT_GET(agf->agf_seqno, ARCH_CONVERT));
1106 count++;
1107 if (i == INT_GET(agf->agf_fllast, ARCH_CONVERT))
1108 break;
1109 if (++i == XFS_AGFL_SIZE)
1110 i = 0;
1111 }
1112 if (count != INT_GET(agf->agf_flcount, ARCH_CONVERT)) {
1113 do_warn("freeblk count %d != flcount %d in ag %d\n", count,
1114 INT_GET(agf->agf_flcount, ARCH_CONVERT),
1115 INT_GET(agf->agf_seqno, ARCH_CONVERT));
1116 }
1117 libxfs_putbuf(agflbuf);
1118 }
1119
1120 void
1121 scan_ag(
1122 xfs_agnumber_t agno)
1123 {
1124 xfs_agf_t *agf;
1125 xfs_buf_t *agfbuf;
1126 int agf_dirty;
1127 xfs_agi_t *agi;
1128 xfs_buf_t *agibuf;
1129 int agi_dirty;
1130 xfs_sb_t *sb;
1131 xfs_buf_t *sbbuf;
1132 int sb_dirty;
1133 int status;
1134
1135 cnt_agffreeblks = cnt_agflongest = 0;
1136 bno_agffreeblks = bno_agflongest = 0;
1137
1138 agi_dirty = agf_dirty = sb_dirty = 0;
1139
1140 agicount = agifreecount = 0;
1141
1142 sbbuf = libxfs_readbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
1143 1, 0);
1144 if (!sbbuf) {
1145 do_error("can't get root superblock for ag %d\n", agno);
1146 return;
1147 }
1148
1149 sb = (xfs_sb_t *)calloc(BBSIZE, 1);
1150 if (!sb) {
1151 do_error("can't allocate memory for superblock\n");
1152 libxfs_putbuf(sbbuf);
1153 return;
1154 }
1155 libxfs_xlate_sb(XFS_BUF_TO_SBP(sbbuf), sb, 1, ARCH_CONVERT,
1156 XFS_SB_ALL_BITS);
1157
1158 agfbuf = libxfs_readbuf(mp->m_dev,
1159 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR), 1, 0);
1160 if (!agfbuf) {
1161 do_error("can't read agf block for ag %d\n", agno);
1162 libxfs_putbuf(sbbuf);
1163 free(sb);
1164 return;
1165 }
1166 agf = XFS_BUF_TO_AGF(agfbuf);
1167
1168 agibuf = libxfs_readbuf(mp->m_dev,
1169 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR), 1, 0);
1170 if (!agibuf) {
1171 do_error("can't read agi block for ag %d\n", agno);
1172 libxfs_putbuf(agfbuf);
1173 libxfs_putbuf(sbbuf);
1174 free(sb);
1175 return;
1176 }
1177 agi = XFS_BUF_TO_AGI(agibuf);
1178
1179 /* fix up bad ag headers */
1180
1181 status = verify_set_agheader(mp, sbbuf, sb, agf, agi, agno);
1182
1183 if (status & XR_AG_SB_SEC) {
1184 if (!no_modify)
1185 sb_dirty = 1;
1186 /*
1187 * clear bad sector bit because we don't want
1188 * to skip further processing. we just want to
1189 * ensure that we write out the modified sb buffer.
1190 */
1191 status &= ~XR_AG_SB_SEC;
1192 }
1193 if (status & XR_AG_SB) {
1194 if (!no_modify)
1195 sb_dirty = 1;
1196 else
1197 do_warn("would ");
1198
1199 do_warn("reset bad sb for ag %d\n", agno);
1200 }
1201 if (status & XR_AG_AGF) {
1202 if (!no_modify)
1203 agf_dirty = 1;
1204 else
1205 do_warn("would ");
1206
1207 do_warn("reset bad agf for ag %d\n", agno);
1208 }
1209 if (status & XR_AG_AGI) {
1210 if (!no_modify)
1211 agi_dirty = 1;
1212 else
1213 do_warn("would ");
1214
1215 do_warn("reset bad agi for ag %d\n", agno);
1216 }
1217
1218 if (status && no_modify) {
1219 libxfs_putbuf(agibuf);
1220 libxfs_putbuf(agfbuf);
1221 libxfs_putbuf(sbbuf);
1222 free(sb);
1223
1224 do_warn("bad uncorrected agheader %d, skipping ag...\n", agno);
1225
1226 return;
1227 }
1228
1229 scan_freelist(agf);
1230
1231 if (INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT) != 0 &&
1232 verify_agbno(mp, agno, INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT)))
1233 scan_sbtree(INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT),
1234 INT_GET(agf->agf_levels[XFS_BTNUM_BNO], ARCH_CONVERT),
1235 agno, 0, scanfunc_bno, 1);
1236 else
1237 do_warn("bad agbno %u for btbno root, agno %d\n",
1238 INT_GET(agf->agf_roots[XFS_BTNUM_BNO], ARCH_CONVERT), agno);
1239
1240 if (INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT) != 0 &&
1241 verify_agbno(mp, agno, INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT)))
1242 scan_sbtree(INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT),
1243 INT_GET(agf->agf_levels[XFS_BTNUM_CNT], ARCH_CONVERT),
1244 agno, 0, scanfunc_cnt, 1);
1245 else
1246 do_warn("bad agbno %u for btbcnt root, agno %d\n",
1247 INT_GET(agf->agf_roots[XFS_BTNUM_CNT], ARCH_CONVERT), agno);
1248
1249 if (INT_GET(agi->agi_root, ARCH_CONVERT) != 0 && verify_agbno(mp, agno, INT_GET(agi->agi_root, ARCH_CONVERT)))
1250 scan_sbtree(INT_GET(agi->agi_root, ARCH_CONVERT), INT_GET(agi->agi_level, ARCH_CONVERT), agno, 0,
1251 scanfunc_ino, 1);
1252 else
1253 do_warn("bad agbno %u for inobt root, agno %d\n",
1254 INT_GET(agi->agi_root, ARCH_CONVERT), agno);
1255
1256 ASSERT(agi_dirty == 0 || (agi_dirty && !no_modify));
1257
1258 if (agi_dirty && !no_modify)
1259 libxfs_writebuf(agibuf, 0);
1260 else
1261 libxfs_putbuf(agibuf);
1262
1263 ASSERT(agf_dirty == 0 || (agf_dirty && !no_modify));
1264
1265 if (agf_dirty && !no_modify)
1266 libxfs_writebuf(agfbuf, 0);
1267 else
1268 libxfs_putbuf(agfbuf);
1269
1270 ASSERT(sb_dirty == 0 || (sb_dirty && !no_modify));
1271
1272 if (sb_dirty && !no_modify) {
1273 libxfs_xlate_sb(XFS_BUF_PTR(sbbuf), sb, -1, ARCH_CONVERT,
1274 XFS_SB_ALL_BITS);
1275 libxfs_writebuf(sbbuf, 0);
1276 } else
1277 libxfs_putbuf(sbbuf);
1278 free(sb);
1279 }