]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/scan.c
repair: per AG locks contend for cachelines
[thirdparty/xfsprogs-dev.git] / repair / scan.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <libxfs.h>
20 #include "avl.h"
21 #include "globals.h"
22 #include "agheader.h"
23 #include "incore.h"
24 #include "protos.h"
25 #include "err_protos.h"
26 #include "dinode.h"
27 #include "scan.h"
28 #include "versions.h"
29 #include "bmap.h"
30 #include "progress.h"
31 #include "threads.h"
32
33 static xfs_mount_t *mp = NULL;
34
35 /*
36 * Variables to validate AG header values against the manual count
37 * from the btree traversal.
38 */
39 struct aghdr_cnts {
40 xfs_agnumber_t agno;
41 xfs_extlen_t agffreeblks;
42 xfs_extlen_t agflongest;
43 __uint64_t agfbtreeblks;
44 __uint32_t agicount;
45 __uint32_t agifreecount;
46 __uint64_t fdblocks;
47 __uint64_t icount;
48 __uint64_t ifreecount;
49 };
50
51 void
52 set_mp(xfs_mount_t *mpp)
53 {
54 libxfs_bcache_purge();
55 mp = mpp;
56 }
57
58 static void
59 scan_sbtree(
60 xfs_agblock_t root,
61 int nlevels,
62 xfs_agnumber_t agno,
63 int suspect,
64 void (*func)(struct xfs_btree_block *block,
65 int level,
66 xfs_agblock_t bno,
67 xfs_agnumber_t agno,
68 int suspect,
69 int isroot,
70 __uint32_t magic,
71 void *priv),
72 int isroot,
73 __uint32_t magic,
74 void *priv,
75 const struct xfs_buf_ops *ops)
76 {
77 xfs_buf_t *bp;
78
79 bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, root),
80 XFS_FSB_TO_BB(mp, 1), 0, ops);
81 if (!bp) {
82 do_error(_("can't read btree block %d/%d\n"), agno, root);
83 return;
84 }
85 (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1, root, agno, suspect,
86 isroot, magic, priv);
87 libxfs_putbuf(bp);
88 }
89
90 /*
91 * returns 1 on bad news (inode needs to be cleared), 0 on good
92 */
93 int
94 scan_lbtree(
95 xfs_dfsbno_t root,
96 int nlevels,
97 int (*func)(struct xfs_btree_block *block,
98 int level,
99 int type,
100 int whichfork,
101 xfs_dfsbno_t bno,
102 xfs_ino_t ino,
103 xfs_drfsbno_t *tot,
104 __uint64_t *nex,
105 blkmap_t **blkmapp,
106 bmap_cursor_t *bm_cursor,
107 int isroot,
108 int check_dups,
109 int *dirty,
110 __uint64_t magic),
111 int type,
112 int whichfork,
113 xfs_ino_t ino,
114 xfs_drfsbno_t *tot,
115 __uint64_t *nex,
116 blkmap_t **blkmapp,
117 bmap_cursor_t *bm_cursor,
118 int isroot,
119 int check_dups,
120 __uint64_t magic,
121 const struct xfs_buf_ops *ops)
122 {
123 xfs_buf_t *bp;
124 int err;
125 int dirty = 0;
126
127 bp = libxfs_readbuf(mp->m_dev, XFS_FSB_TO_DADDR(mp, root),
128 XFS_FSB_TO_BB(mp, 1), 0, ops);
129 if (!bp) {
130 do_error(_("can't read btree block %d/%d\n"),
131 XFS_FSB_TO_AGNO(mp, root),
132 XFS_FSB_TO_AGBNO(mp, root));
133 return(1);
134 }
135 err = (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1,
136 type, whichfork, root, ino, tot, nex, blkmapp,
137 bm_cursor, isroot, check_dups, &dirty,
138 magic);
139
140 ASSERT(dirty == 0 || (dirty && !no_modify));
141
142 if (dirty && !no_modify)
143 libxfs_writebuf(bp, 0);
144 else
145 libxfs_putbuf(bp);
146
147 return(err);
148 }
149
150 int
151 scan_bmapbt(
152 struct xfs_btree_block *block,
153 int level,
154 int type,
155 int whichfork,
156 xfs_dfsbno_t bno,
157 xfs_ino_t ino,
158 xfs_drfsbno_t *tot,
159 __uint64_t *nex,
160 blkmap_t **blkmapp,
161 bmap_cursor_t *bm_cursor,
162 int isroot,
163 int check_dups,
164 int *dirty,
165 __uint64_t magic)
166 {
167 int i;
168 int err;
169 xfs_bmbt_ptr_t *pp;
170 xfs_bmbt_key_t *pkey;
171 xfs_bmbt_rec_t *rp;
172 xfs_dfiloff_t first_key;
173 xfs_dfiloff_t last_key;
174 char *forkname = get_forkname(whichfork);
175 int numrecs;
176 xfs_agnumber_t agno;
177 xfs_agblock_t agbno;
178 int state;
179
180 /*
181 * unlike the ag freeblock btrees, if anything looks wrong
182 * in an inode bmap tree, just bail. it's possible that
183 * we'll miss a case where the to-be-toasted inode and
184 * another inode are claiming the same block but that's
185 * highly unlikely.
186 */
187 if (be32_to_cpu(block->bb_magic) != magic) {
188 do_warn(
189 _("bad magic # %#x in inode %" PRIu64 " (%s fork) bmbt block %" PRIu64 "\n"),
190 be32_to_cpu(block->bb_magic), ino, forkname, bno);
191 return(1);
192 }
193 if (be16_to_cpu(block->bb_level) != level) {
194 do_warn(
195 _("expected level %d got %d in inode %" PRIu64 ", (%s fork) bmbt block %" PRIu64 "\n"),
196 level, be16_to_cpu(block->bb_level),
197 ino, forkname, bno);
198 return(1);
199 }
200
201 if (magic == XFS_BMAP_CRC_MAGIC) {
202 /* verify owner */
203 if (be64_to_cpu(block->bb_u.l.bb_owner) != ino) {
204 do_warn(
205 _("expected owner inode %" PRIu64 ", got %llu, bmbt block %" PRIu64 "\n"),
206 ino, be64_to_cpu(block->bb_u.l.bb_owner), bno);
207 return(1);
208 }
209 }
210
211 if (check_dups == 0) {
212 /*
213 * check sibling pointers. if bad we have a conflict
214 * between the sibling pointers and the child pointers
215 * in the parent block. blow out the inode if that happens
216 */
217 if (bm_cursor->level[level].fsbno != NULLDFSBNO) {
218 /*
219 * this is not the first block on this level
220 * so the cursor for this level has recorded the
221 * values for this's block left-sibling.
222 */
223 if (bno != bm_cursor->level[level].right_fsbno) {
224 do_warn(
225 _("bad fwd (right) sibling pointer (saw %" PRIu64 " parent block says %" PRIu64 ")\n"
226 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
227 bm_cursor->level[level].right_fsbno,
228 bno, ino, forkname,
229 bm_cursor->level[level].fsbno);
230 return(1);
231 }
232 if (be64_to_cpu(block->bb_u.l.bb_leftsib) !=
233 bm_cursor->level[level].fsbno) {
234 do_warn(
235 _("bad back (left) sibling pointer (saw %llu parent block says %" PRIu64 ")\n"
236 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
237 (unsigned long long)
238 be64_to_cpu(block->bb_u.l.bb_leftsib),
239 bm_cursor->level[level].fsbno,
240 ino, forkname, bno);
241 return(1);
242 }
243 } else {
244 /*
245 * This is the first or only block on this level.
246 * Check that the left sibling pointer is NULL
247 */
248 if (be64_to_cpu(block->bb_u.l.bb_leftsib) != NULLDFSBNO) {
249 do_warn(
250 _("bad back (left) sibling pointer (saw %llu should be NULL (0))\n"
251 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
252 (unsigned long long)
253 be64_to_cpu(block->bb_u.l.bb_leftsib),
254 ino, forkname, bno);
255 return(1);
256 }
257 }
258
259 /*
260 * update cursor block pointers to reflect this block
261 */
262 bm_cursor->level[level].fsbno = bno;
263 bm_cursor->level[level].left_fsbno =
264 be64_to_cpu(block->bb_u.l.bb_leftsib);
265 bm_cursor->level[level].right_fsbno =
266 be64_to_cpu(block->bb_u.l.bb_rightsib);
267
268 agno = XFS_FSB_TO_AGNO(mp, bno);
269 agbno = XFS_FSB_TO_AGBNO(mp, bno);
270
271 pthread_mutex_lock(&ag_locks[agno].lock);
272 state = get_bmap(agno, agbno);
273 switch (state) {
274 case XR_E_UNKNOWN:
275 case XR_E_FREE1:
276 case XR_E_FREE:
277 set_bmap(agno, agbno, XR_E_INUSE);
278 break;
279 case XR_E_FS_MAP:
280 case XR_E_INUSE:
281 /*
282 * we'll try and continue searching here since
283 * the block looks like it's been claimed by file
284 * to store user data, a directory to store directory
285 * data, or the space allocation btrees but since
286 * we made it here, the block probably
287 * contains btree data.
288 */
289 set_bmap(agno, agbno, XR_E_MULT);
290 do_warn(
291 _("inode 0x%" PRIx64 "bmap block 0x%" PRIx64 " claimed, state is %d\n"),
292 ino, bno, state);
293 break;
294 case XR_E_MULT:
295 case XR_E_INUSE_FS:
296 set_bmap(agno, agbno, XR_E_MULT);
297 do_warn(
298 _("inode 0x%" PRIx64 " bmap block 0x%" PRIx64 " claimed, state is %d\n"),
299 ino, bno, state);
300 /*
301 * if we made it to here, this is probably a bmap block
302 * that is being used by *another* file as a bmap block
303 * so the block will be valid. Both files should be
304 * trashed along with any other file that impinges on
305 * any blocks referenced by either file. So we
306 * continue searching down this btree to mark all
307 * blocks duplicate
308 */
309 break;
310 case XR_E_BAD_STATE:
311 default:
312 do_warn(
313 _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
314 state, ino, bno);
315 break;
316 }
317 pthread_mutex_unlock(&ag_locks[agno].lock);
318 } else {
319 /*
320 * attribute fork for realtime files is in the regular
321 * filesystem
322 */
323 if (type != XR_INO_RTDATA || whichfork != XFS_DATA_FORK) {
324 if (search_dup_extent(XFS_FSB_TO_AGNO(mp, bno),
325 XFS_FSB_TO_AGBNO(mp, bno),
326 XFS_FSB_TO_AGBNO(mp, bno) + 1))
327 return(1);
328 } else {
329 if (search_rt_dup_extent(mp, bno))
330 return(1);
331 }
332 }
333 (*tot)++;
334 numrecs = be16_to_cpu(block->bb_numrecs);
335
336 if (level == 0) {
337 if (numrecs > mp->m_bmap_dmxr[0] || (isroot == 0 && numrecs <
338 mp->m_bmap_dmnr[0])) {
339 do_warn(
340 _("inode %" PRIu64 " bad # of bmap records (%u, min - %u, max - %u)\n"),
341 ino, numrecs, mp->m_bmap_dmnr[0],
342 mp->m_bmap_dmxr[0]);
343 return(1);
344 }
345 rp = XFS_BMBT_REC_ADDR(mp, block, 1);
346 *nex += numrecs;
347 /*
348 * XXX - if we were going to fix up the btree record,
349 * we'd do it right here. For now, if there's a problem,
350 * we'll bail out and presumably clear the inode.
351 */
352 if (check_dups == 0) {
353 err = process_bmbt_reclist(mp, rp, &numrecs, type, ino,
354 tot, blkmapp, &first_key,
355 &last_key, whichfork);
356 if (err)
357 return 1;
358
359 /*
360 * check that key ordering is monotonically increasing.
361 * if the last_key value in the cursor is set to
362 * NULLDFILOFF, then we know this is the first block
363 * on the leaf level and we shouldn't check the
364 * last_key value.
365 */
366 if (first_key <= bm_cursor->level[level].last_key &&
367 bm_cursor->level[level].last_key !=
368 NULLDFILOFF) {
369 do_warn(
370 _("out-of-order bmap key (file offset) in inode %" PRIu64 ", %s fork, fsbno %" PRIu64 "\n"),
371 ino, forkname, bno);
372 return(1);
373 }
374 /*
375 * update cursor keys to reflect this block.
376 * don't have to check if last_key is > first_key
377 * since that gets checked by process_bmbt_reclist.
378 */
379 bm_cursor->level[level].first_key = first_key;
380 bm_cursor->level[level].last_key = last_key;
381
382 return 0;
383 } else {
384 return scan_bmbt_reclist(mp, rp, &numrecs, type, ino,
385 tot, whichfork);
386 }
387 }
388 if (numrecs > mp->m_bmap_dmxr[1] || (isroot == 0 && numrecs <
389 mp->m_bmap_dmnr[1])) {
390 do_warn(
391 _("inode %" PRIu64 " bad # of bmap records (%u, min - %u, max - %u)\n"),
392 ino, numrecs, mp->m_bmap_dmnr[1], mp->m_bmap_dmxr[1]);
393 return(1);
394 }
395 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
396 pkey = XFS_BMBT_KEY_ADDR(mp, block, 1);
397
398 last_key = NULLDFILOFF;
399
400 for (i = 0, err = 0; i < numrecs; i++) {
401 /*
402 * XXX - if we were going to fix up the interior btree nodes,
403 * we'd do it right here. For now, if there's a problem,
404 * we'll bail out and presumably clear the inode.
405 */
406 if (!verify_dfsbno(mp, be64_to_cpu(pp[i]))) {
407 do_warn(
408 _("bad bmap btree ptr 0x%llx in ino %" PRIu64 "\n"),
409 (unsigned long long) be64_to_cpu(pp[i]), ino);
410 return(1);
411 }
412
413 err = scan_lbtree(be64_to_cpu(pp[i]), level, scan_bmapbt,
414 type, whichfork, ino, tot, nex, blkmapp,
415 bm_cursor, 0, check_dups, magic,
416 &xfs_bmbt_buf_ops);
417 if (err)
418 return(1);
419
420 /*
421 * fix key (offset) mismatches between the first key
422 * in the child block (as recorded in the cursor) and the
423 * key in the interior node referencing the child block.
424 *
425 * fixes cases where entries have been shifted between
426 * child blocks but the parent hasn't been updated. We
427 * don't have to worry about the key values in the cursor
428 * not being set since we only look at the key values of
429 * our child and those are guaranteed to be set by the
430 * call to scan_lbtree() above.
431 */
432 if (check_dups == 0 && be64_to_cpu(pkey[i].br_startoff) !=
433 bm_cursor->level[level-1].first_key) {
434 if (!no_modify) {
435 do_warn(
436 _("correcting bt key (was %llu, now %" PRIu64 ") in inode %" PRIu64 "\n"
437 "\t\t%s fork, btree block %" PRIu64 "\n"),
438 (unsigned long long)
439 be64_to_cpu(pkey[i].br_startoff),
440 bm_cursor->level[level-1].first_key,
441 ino,
442 forkname, bno);
443 *dirty = 1;
444 pkey[i].br_startoff = cpu_to_be64(
445 bm_cursor->level[level-1].first_key);
446 } else {
447 do_warn(
448 _("bad btree key (is %llu, should be %" PRIu64 ") in inode %" PRIu64 "\n"
449 "\t\t%s fork, btree block %" PRIu64 "\n"),
450 (unsigned long long)
451 be64_to_cpu(pkey[i].br_startoff),
452 bm_cursor->level[level-1].first_key,
453 ino, forkname, bno);
454 }
455 }
456 }
457
458 /*
459 * If we're the last node at our level, check that the last child
460 * block's forward sibling pointer is NULL.
461 */
462 if (check_dups == 0 &&
463 bm_cursor->level[level].right_fsbno == NULLDFSBNO &&
464 bm_cursor->level[level - 1].right_fsbno != NULLDFSBNO) {
465 do_warn(
466 _("bad fwd (right) sibling pointer (saw %" PRIu64 " should be NULLDFSBNO)\n"
467 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
468 bm_cursor->level[level - 1].right_fsbno,
469 ino, forkname, bm_cursor->level[level - 1].fsbno);
470 return(1);
471 }
472
473 /*
474 * update cursor keys to reflect this block
475 */
476 if (check_dups == 0) {
477 bm_cursor->level[level].first_key =
478 be64_to_cpu(pkey[0].br_startoff);
479 bm_cursor->level[level].last_key =
480 be64_to_cpu(pkey[numrecs - 1].br_startoff);
481 }
482
483 return(0);
484 }
485
486 static void
487 scan_allocbt(
488 struct xfs_btree_block *block,
489 int level,
490 xfs_agblock_t bno,
491 xfs_agnumber_t agno,
492 int suspect,
493 int isroot,
494 __uint32_t magic,
495 void *priv)
496 {
497 struct aghdr_cnts *agcnts = priv;
498 const char *name;
499 int i;
500 xfs_alloc_ptr_t *pp;
501 xfs_alloc_rec_t *rp;
502 int hdr_errors = 0;
503 int numrecs;
504 int state;
505 xfs_extlen_t lastcount = 0;
506 xfs_agblock_t lastblock = 0;
507
508 switch (magic) {
509 case XFS_ABTB_CRC_MAGIC:
510 case XFS_ABTB_MAGIC:
511 name = "bno";
512 break;
513 case XFS_ABTC_CRC_MAGIC:
514 case XFS_ABTC_MAGIC:
515 name = "cnt";
516 break;
517 default:
518 name = "(unknown)";
519 assert(0);
520 break;
521 }
522
523 if (be32_to_cpu(block->bb_magic) != magic) {
524 do_warn(_("bad magic # %#x in bt%s block %d/%d\n"),
525 be32_to_cpu(block->bb_magic), name, agno, bno);
526 hdr_errors++;
527 if (suspect)
528 return;
529 }
530
531 /*
532 * All freespace btree blocks except the roots are freed for a
533 * fully used filesystem, thus they are counted towards the
534 * free data block counter.
535 */
536 if (!isroot) {
537 agcnts->agfbtreeblks++;
538 agcnts->fdblocks++;
539 }
540
541 if (be16_to_cpu(block->bb_level) != level) {
542 do_warn(_("expected level %d got %d in bt%s block %d/%d\n"),
543 level, be16_to_cpu(block->bb_level), name, agno, bno);
544 hdr_errors++;
545 if (suspect)
546 return;
547 }
548
549 /*
550 * check for btree blocks multiply claimed
551 */
552 state = get_bmap(agno, bno);
553 if (state != XR_E_UNKNOWN) {
554 set_bmap(agno, bno, XR_E_MULT);
555 do_warn(
556 _("%s freespace btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
557 name, state, agno, bno, suspect);
558 return;
559 }
560 set_bmap(agno, bno, XR_E_FS_MAP);
561
562 numrecs = be16_to_cpu(block->bb_numrecs);
563
564 if (level == 0) {
565 if (numrecs > mp->m_alloc_mxr[0]) {
566 numrecs = mp->m_alloc_mxr[0];
567 hdr_errors++;
568 }
569 if (isroot == 0 && numrecs < mp->m_alloc_mnr[0]) {
570 numrecs = mp->m_alloc_mnr[0];
571 hdr_errors++;
572 }
573
574 if (hdr_errors) {
575 do_warn(
576 _("bad btree nrecs (%u, min=%u, max=%u) in bt%s block %u/%u\n"),
577 be16_to_cpu(block->bb_numrecs),
578 mp->m_alloc_mnr[0], mp->m_alloc_mxr[0],
579 name, agno, bno);
580 suspect++;
581 }
582
583 rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
584 for (i = 0; i < numrecs; i++) {
585 xfs_agblock_t b, end;
586 xfs_extlen_t len, blen;
587
588 b = be32_to_cpu(rp[i].ar_startblock);
589 len = be32_to_cpu(rp[i].ar_blockcount);
590 end = b + len;
591
592 if (b == 0 || !verify_agbno(mp, agno, b)) {
593 do_warn(
594 _("invalid start block %u in record %u of %s btree block %u/%u\n"),
595 b, i, name, agno, bno);
596 continue;
597 }
598 if (len == 0 || !verify_agbno(mp, agno, end - 1)) {
599 do_warn(
600 _("invalid length %u in record %u of %s btree block %u/%u\n"),
601 len, i, name, agno, bno);
602 continue;
603 }
604
605 if (magic == XFS_ABTB_MAGIC ||
606 magic == XFS_ABTB_CRC_MAGIC) {
607 if (b <= lastblock) {
608 do_warn(_(
609 "out-of-order bno btree record %d (%u %u) block %u/%u\n"),
610 i, b, len, agno, bno);
611 } else {
612 lastblock = b;
613 }
614 } else {
615 agcnts->fdblocks += len;
616 agcnts->agffreeblks += len;
617 if (len > agcnts->agflongest)
618 agcnts->agflongest = len;
619 if (len < lastcount) {
620 do_warn(_(
621 "out-of-order cnt btree record %d (%u %u) block %u/%u\n"),
622 i, b, len, agno, bno);
623 } else {
624 lastcount = len;
625 }
626 }
627
628 for ( ; b < end; b += blen) {
629 state = get_bmap_ext(agno, b, end, &blen);
630 switch (state) {
631 case XR_E_UNKNOWN:
632 set_bmap(agno, b, XR_E_FREE1);
633 break;
634 case XR_E_FREE1:
635 /*
636 * no warning messages -- we'll catch
637 * FREE1 blocks later
638 */
639 if (magic == XFS_ABTC_MAGIC ||
640 magic == XFS_ABTC_CRC_MAGIC) {
641 set_bmap_ext(agno, b, blen,
642 XR_E_FREE);
643 break;
644 }
645 default:
646 do_warn(
647 _("block (%d,%d-%d) multiply claimed by %s space tree, state - %d\n"),
648 agno, b, b + blen - 1,
649 name, state);
650 break;
651 }
652 }
653 }
654 return;
655 }
656
657 /*
658 * interior record
659 */
660 pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
661
662 if (numrecs > mp->m_alloc_mxr[1]) {
663 numrecs = mp->m_alloc_mxr[1];
664 hdr_errors++;
665 }
666 if (isroot == 0 && numrecs < mp->m_alloc_mnr[1]) {
667 numrecs = mp->m_alloc_mnr[1];
668 hdr_errors++;
669 }
670
671 /*
672 * don't pass bogus tree flag down further if this block
673 * looked ok. bail out if two levels in a row look bad.
674 */
675 if (hdr_errors) {
676 do_warn(
677 _("bad btree nrecs (%u, min=%u, max=%u) in bt%s block %u/%u\n"),
678 be16_to_cpu(block->bb_numrecs),
679 mp->m_alloc_mnr[1], mp->m_alloc_mxr[1],
680 name, agno, bno);
681 if (suspect)
682 return;
683 suspect++;
684 } else if (suspect) {
685 suspect = 0;
686 }
687
688 for (i = 0; i < numrecs; i++) {
689 xfs_agblock_t bno = be32_to_cpu(pp[i]);
690
691 /*
692 * XXX - put sibling detection right here.
693 * we know our sibling chain is good. So as we go,
694 * we check the entry before and after each entry.
695 * If either of the entries references a different block,
696 * check the sibling pointer. If there's a sibling
697 * pointer mismatch, try and extract as much data
698 * as possible.
699 */
700 if (bno != 0 && verify_agbno(mp, agno, bno)) {
701 switch (magic) {
702 case XFS_ABTB_CRC_MAGIC:
703 case XFS_ABTB_MAGIC:
704 scan_sbtree(bno, level, agno, suspect,
705 scan_allocbt, 0, magic, priv,
706 &xfs_allocbt_buf_ops);
707 break;
708 case XFS_ABTC_CRC_MAGIC:
709 case XFS_ABTC_MAGIC:
710 scan_sbtree(bno, level, agno, suspect,
711 scan_allocbt, 0, magic, priv,
712 &xfs_allocbt_buf_ops);
713 break;
714 }
715 }
716 }
717 }
718
719 static int
720 scan_single_ino_chunk(
721 xfs_agnumber_t agno,
722 xfs_inobt_rec_t *rp,
723 int suspect)
724 {
725 xfs_ino_t lino;
726 xfs_agino_t ino;
727 xfs_agblock_t agbno;
728 int j;
729 int nfree;
730 int off;
731 int state;
732 ino_tree_node_t *ino_rec, *first_rec, *last_rec;
733
734 ino = be32_to_cpu(rp->ir_startino);
735 off = XFS_AGINO_TO_OFFSET(mp, ino);
736 agbno = XFS_AGINO_TO_AGBNO(mp, ino);
737 lino = XFS_AGINO_TO_INO(mp, agno, ino);
738
739 /*
740 * on multi-block block chunks, all chunks start
741 * at the beginning of the block. with multi-chunk
742 * blocks, all chunks must start on 64-inode boundaries
743 * since each block can hold N complete chunks. if
744 * fs has aligned inodes, all chunks must start
745 * at a fs_ino_alignment*N'th agbno. skip recs
746 * with badly aligned starting inodes.
747 */
748 if (ino == 0 ||
749 (inodes_per_block <= XFS_INODES_PER_CHUNK && off != 0) ||
750 (inodes_per_block > XFS_INODES_PER_CHUNK &&
751 off % XFS_INODES_PER_CHUNK != 0) ||
752 (fs_aligned_inodes && agbno % fs_ino_alignment != 0)) {
753 do_warn(
754 _("badly aligned inode rec (starting inode = %" PRIu64 ")\n"),
755 lino);
756 suspect++;
757 }
758
759 /*
760 * verify numeric validity of inode chunk first
761 * before inserting into a tree. don't have to
762 * worry about the overflow case because the
763 * starting ino number of a chunk can only get
764 * within 255 inodes of max (NULLAGINO). if it
765 * gets closer, the agino number will be illegal
766 * as the agbno will be too large.
767 */
768 if (verify_aginum(mp, agno, ino)) {
769 do_warn(
770 _("bad starting inode # (%" PRIu64 " (0x%x 0x%x)) in ino rec, skipping rec\n"),
771 lino, agno, ino);
772 return ++suspect;
773 }
774
775 if (verify_aginum(mp, agno,
776 ino + XFS_INODES_PER_CHUNK - 1)) {
777 do_warn(
778 _("bad ending inode # (%" PRIu64 " (0x%x 0x%zx)) in ino rec, skipping rec\n"),
779 lino + XFS_INODES_PER_CHUNK - 1,
780 agno,
781 ino + XFS_INODES_PER_CHUNK - 1);
782 return ++suspect;
783 }
784
785 /*
786 * set state of each block containing inodes
787 */
788 if (off == 0 && !suspect) {
789 for (j = 0;
790 j < XFS_INODES_PER_CHUNK;
791 j += mp->m_sb.sb_inopblock) {
792 agbno = XFS_AGINO_TO_AGBNO(mp, ino + j);
793
794 state = get_bmap(agno, agbno);
795 if (state == XR_E_UNKNOWN) {
796 set_bmap(agno, agbno, XR_E_INO);
797 } else if (state == XR_E_INUSE_FS && agno == 0 &&
798 ino + j >= first_prealloc_ino &&
799 ino + j < last_prealloc_ino) {
800 set_bmap(agno, agbno, XR_E_INO);
801 } else {
802 do_warn(
803 _("inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n"),
804 agno, agbno, mp->m_sb.sb_inopblock);
805 /*
806 * XXX - maybe should mark
807 * block a duplicate
808 */
809 return ++suspect;
810 }
811 }
812 }
813
814 /*
815 * ensure only one avl entry per chunk
816 */
817 find_inode_rec_range(mp, agno, ino, ino + XFS_INODES_PER_CHUNK,
818 &first_rec, &last_rec);
819 if (first_rec != NULL) {
820 /*
821 * this chunk overlaps with one (or more)
822 * already in the tree
823 */
824 do_warn(
825 _("inode rec for ino %" PRIu64 " (%d/%d) overlaps existing rec (start %d/%d)\n"),
826 lino, agno, ino, agno, first_rec->ino_startnum);
827 suspect++;
828
829 /*
830 * if the 2 chunks start at the same place,
831 * then we don't have to put this one
832 * in the uncertain list. go to the next one.
833 */
834 if (first_rec->ino_startnum == ino)
835 return suspect;
836 }
837
838 nfree = 0;
839
840 /*
841 * now mark all the inodes as existing and free or used.
842 * if the tree is suspect, put them into the uncertain
843 * inode tree.
844 */
845 if (!suspect) {
846 if (XFS_INOBT_IS_FREE_DISK(rp, 0)) {
847 nfree++;
848 ino_rec = set_inode_free_alloc(mp, agno, ino);
849 } else {
850 ino_rec = set_inode_used_alloc(mp, agno, ino);
851 }
852 for (j = 1; j < XFS_INODES_PER_CHUNK; j++) {
853 if (XFS_INOBT_IS_FREE_DISK(rp, j)) {
854 nfree++;
855 set_inode_free(ino_rec, j);
856 } else {
857 set_inode_used(ino_rec, j);
858 }
859 }
860 } else {
861 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
862 if (XFS_INOBT_IS_FREE_DISK(rp, j)) {
863 nfree++;
864 add_aginode_uncertain(mp, agno, ino + j, 1);
865 } else {
866 add_aginode_uncertain(mp, agno, ino + j, 0);
867 }
868 }
869 }
870
871 if (nfree != be32_to_cpu(rp->ir_freecount)) {
872 do_warn(_("ir_freecount/free mismatch, inode "
873 "chunk %d/%u, freecount %d nfree %d\n"),
874 agno, ino, be32_to_cpu(rp->ir_freecount), nfree);
875 }
876
877 return suspect;
878 }
879
880
881 /*
882 * this one walks the inode btrees sucking the info there into
883 * the incore avl tree. We try and rescue corrupted btree records
884 * to minimize our chances of losing inodes. Inode info from potentially
885 * corrupt sources could be bogus so rather than put the info straight
886 * into the tree, instead we put it on a list and try and verify the
887 * info in the next phase by examining what's on disk. At that point,
888 * we'll be able to figure out what's what and stick the corrected info
889 * into the tree. We do bail out at some point and give up on a subtree
890 * so as to avoid walking randomly all over the ag.
891 *
892 * Note that it's also ok if the free/inuse info wrong, we can correct
893 * that when we examine the on-disk inode. The important thing is to
894 * get the start and alignment of the inode chunks right. Those chunks
895 * that we aren't sure about go into the uncertain list.
896 */
897 static void
898 scan_inobt(
899 struct xfs_btree_block *block,
900 int level,
901 xfs_agblock_t bno,
902 xfs_agnumber_t agno,
903 int suspect,
904 int isroot,
905 __uint32_t magic,
906 void *priv)
907 {
908 struct aghdr_cnts *agcnts = priv;
909 int i;
910 int numrecs;
911 int state;
912 xfs_inobt_ptr_t *pp;
913 xfs_inobt_rec_t *rp;
914 int hdr_errors;
915
916 hdr_errors = 0;
917
918 if (be32_to_cpu(block->bb_magic) != magic) {
919 do_warn(_("bad magic # %#x in inobt block %d/%d\n"),
920 be32_to_cpu(block->bb_magic), agno, bno);
921 hdr_errors++;
922 bad_ino_btree = 1;
923 if (suspect)
924 return;
925 }
926 if (be16_to_cpu(block->bb_level) != level) {
927 do_warn(_("expected level %d got %d in inobt block %d/%d\n"),
928 level, be16_to_cpu(block->bb_level), agno, bno);
929 hdr_errors++;
930 bad_ino_btree = 1;
931 if (suspect)
932 return;
933 }
934
935 /*
936 * check for btree blocks multiply claimed, any unknown/free state
937 * is ok in the bitmap block.
938 */
939 state = get_bmap(agno, bno);
940 switch (state) {
941 case XR_E_UNKNOWN:
942 case XR_E_FREE1:
943 case XR_E_FREE:
944 set_bmap(agno, bno, XR_E_FS_MAP);
945 break;
946 default:
947 set_bmap(agno, bno, XR_E_MULT);
948 do_warn(
949 _("inode btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
950 state, agno, bno, suspect);
951 }
952
953 numrecs = be16_to_cpu(block->bb_numrecs);
954
955 /*
956 * leaf record in btree
957 */
958 if (level == 0) {
959 /* check for trashed btree block */
960
961 if (numrecs > mp->m_inobt_mxr[0]) {
962 numrecs = mp->m_inobt_mxr[0];
963 hdr_errors++;
964 }
965 if (isroot == 0 && numrecs < mp->m_inobt_mnr[0]) {
966 numrecs = mp->m_inobt_mnr[0];
967 hdr_errors++;
968 }
969
970 if (hdr_errors) {
971 bad_ino_btree = 1;
972 do_warn(_("dubious inode btree block header %d/%d\n"),
973 agno, bno);
974 suspect++;
975 }
976
977 rp = XFS_INOBT_REC_ADDR(mp, block, 1);
978
979 /*
980 * step through the records, each record points to
981 * a chunk of inodes. The start of inode chunks should
982 * be block-aligned. Each inode btree rec should point
983 * to the start of a block of inodes or the start of a group
984 * of INODES_PER_CHUNK (64) inodes. off is the offset into
985 * the block. skip processing of bogus records.
986 */
987 for (i = 0; i < numrecs; i++) {
988 agcnts->agicount += XFS_INODES_PER_CHUNK;
989 agcnts->icount += XFS_INODES_PER_CHUNK;
990 agcnts->agifreecount += be32_to_cpu(rp[i].ir_freecount);
991 agcnts->ifreecount += be32_to_cpu(rp[i].ir_freecount);
992
993 suspect = scan_single_ino_chunk(agno, &rp[i], suspect);
994 }
995
996 if (suspect)
997 bad_ino_btree = 1;
998
999 return;
1000 }
1001
1002 /*
1003 * interior record, continue on
1004 */
1005 if (numrecs > mp->m_inobt_mxr[1]) {
1006 numrecs = mp->m_inobt_mxr[1];
1007 hdr_errors++;
1008 }
1009 if (isroot == 0 && numrecs < mp->m_inobt_mnr[1]) {
1010 numrecs = mp->m_inobt_mnr[1];
1011 hdr_errors++;
1012 }
1013
1014 pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
1015
1016 /*
1017 * don't pass bogus tree flag down further if this block
1018 * looked ok. bail out if two levels in a row look bad.
1019 */
1020
1021 if (suspect && !hdr_errors)
1022 suspect = 0;
1023
1024 if (hdr_errors) {
1025 bad_ino_btree = 1;
1026 if (suspect)
1027 return;
1028 else suspect++;
1029 }
1030
1031 for (i = 0; i < numrecs; i++) {
1032 if (be32_to_cpu(pp[i]) != 0 && verify_agbno(mp, agno,
1033 be32_to_cpu(pp[i])))
1034 scan_sbtree(be32_to_cpu(pp[i]), level, agno,
1035 suspect, scan_inobt, 0, magic, priv,
1036 &xfs_inobt_buf_ops);
1037 }
1038 }
1039
1040 static void
1041 scan_freelist(
1042 xfs_agf_t *agf,
1043 struct aghdr_cnts *agcnts)
1044 {
1045 xfs_buf_t *agflbuf;
1046 xfs_agnumber_t agno;
1047 xfs_agblock_t bno;
1048 int count;
1049 int i;
1050 __be32 *freelist;
1051
1052 agno = be32_to_cpu(agf->agf_seqno);
1053
1054 if (XFS_SB_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1055 XFS_AGF_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1056 XFS_AGI_BLOCK(mp) != XFS_AGFL_BLOCK(mp))
1057 set_bmap(agno, XFS_AGFL_BLOCK(mp), XR_E_FS_MAP);
1058
1059 if (be32_to_cpu(agf->agf_flcount) == 0)
1060 return;
1061
1062 agflbuf = libxfs_readbuf(mp->m_dev,
1063 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1064 XFS_FSS_TO_BB(mp, 1), 0, &xfs_agfl_buf_ops);
1065 if (!agflbuf) {
1066 do_abort(_("can't read agfl block for ag %d\n"), agno);
1067 return;
1068 }
1069 freelist = XFS_BUF_TO_AGFL_BNO(mp, agflbuf);
1070 i = be32_to_cpu(agf->agf_flfirst);
1071
1072 if (no_modify) {
1073 /* agf values not fixed in verify_set_agf, so recheck */
1074 if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp) ||
1075 be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
1076 do_warn(_("agf %d freelist blocks bad, skipping "
1077 "freelist scan\n"), i);
1078 return;
1079 }
1080 }
1081
1082 count = 0;
1083 for (;;) {
1084 bno = be32_to_cpu(freelist[i]);
1085 if (verify_agbno(mp, agno, bno))
1086 set_bmap(agno, bno, XR_E_FREE);
1087 else
1088 do_warn(_("bad agbno %u in agfl, agno %d\n"),
1089 bno, agno);
1090 count++;
1091 if (i == be32_to_cpu(agf->agf_fllast))
1092 break;
1093 if (++i == XFS_AGFL_SIZE(mp))
1094 i = 0;
1095 }
1096 if (count != be32_to_cpu(agf->agf_flcount)) {
1097 do_warn(_("freeblk count %d != flcount %d in ag %d\n"), count,
1098 be32_to_cpu(agf->agf_flcount), agno);
1099 }
1100
1101 agcnts->fdblocks += count;
1102
1103 libxfs_putbuf(agflbuf);
1104 }
1105
1106 static void
1107 validate_agf(
1108 struct xfs_agf *agf,
1109 xfs_agnumber_t agno,
1110 struct aghdr_cnts *agcnts)
1111 {
1112 xfs_agblock_t bno;
1113 __uint32_t magic;
1114
1115 bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
1116 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1117 magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_ABTB_CRC_MAGIC
1118 : XFS_ABTB_MAGIC;
1119 scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
1120 agno, 0, scan_allocbt, 1, magic, agcnts,
1121 &xfs_allocbt_buf_ops);
1122 } else {
1123 do_warn(_("bad agbno %u for btbno root, agno %d\n"),
1124 bno, agno);
1125 }
1126
1127 bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
1128 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1129 magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_ABTC_CRC_MAGIC
1130 : XFS_ABTC_MAGIC;
1131 scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
1132 agno, 0, scan_allocbt, 1, magic, agcnts,
1133 &xfs_allocbt_buf_ops);
1134 } else {
1135 do_warn(_("bad agbno %u for btbcnt root, agno %d\n"),
1136 bno, agno);
1137 }
1138
1139 if (be32_to_cpu(agf->agf_freeblks) != agcnts->agffreeblks) {
1140 do_warn(_("agf_freeblks %u, counted %u in ag %u\n"),
1141 be32_to_cpu(agf->agf_freeblks), agcnts->agffreeblks, agno);
1142 }
1143
1144 if (be32_to_cpu(agf->agf_longest) != agcnts->agflongest) {
1145 do_warn(_("agf_longest %u, counted %u in ag %u\n"),
1146 be32_to_cpu(agf->agf_longest), agcnts->agflongest, agno);
1147 }
1148
1149 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1150 be32_to_cpu(agf->agf_btreeblks) != agcnts->agfbtreeblks) {
1151 do_warn(_("agf_btreeblks %u, counted %" PRIu64 " in ag %u\n"),
1152 be32_to_cpu(agf->agf_btreeblks), agcnts->agfbtreeblks, agno);
1153 }
1154 }
1155
1156 static void
1157 validate_agi(
1158 struct xfs_agi *agi,
1159 xfs_agnumber_t agno,
1160 struct aghdr_cnts *agcnts)
1161 {
1162 xfs_agblock_t bno;
1163 int i;
1164 __uint32_t magic;
1165
1166 bno = be32_to_cpu(agi->agi_root);
1167 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1168 magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_IBT_CRC_MAGIC
1169 : XFS_IBT_MAGIC;
1170 scan_sbtree(bno, be32_to_cpu(agi->agi_level),
1171 agno, 0, scan_inobt, 1, magic, agcnts,
1172 &xfs_inobt_buf_ops);
1173 } else {
1174 do_warn(_("bad agbno %u for inobt root, agno %d\n"),
1175 be32_to_cpu(agi->agi_root), agno);
1176 }
1177
1178 if (be32_to_cpu(agi->agi_count) != agcnts->agicount) {
1179 do_warn(_("agi_count %u, counted %u in ag %u\n"),
1180 be32_to_cpu(agi->agi_count), agcnts->agicount, agno);
1181 }
1182
1183 if (be32_to_cpu(agi->agi_freecount) != agcnts->agifreecount) {
1184 do_warn(_("agi_freecount %u, counted %u in ag %u\n"),
1185 be32_to_cpu(agi->agi_freecount), agcnts->agifreecount, agno);
1186 }
1187
1188 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
1189 xfs_agino_t agino = be32_to_cpu(agi->agi_unlinked[i]);
1190
1191 if (agino != NULLAGINO) {
1192 do_warn(
1193 _("agi unlinked bucket %d is %u in ag %u (inode=%" PRIu64 ")\n"),
1194 i, agino, agno,
1195 XFS_AGINO_TO_INO(mp, agno, agino));
1196 }
1197 }
1198 }
1199
1200 /*
1201 * Scan an AG for obvious corruption.
1202 */
1203 static void
1204 scan_ag(
1205 work_queue_t *wq,
1206 xfs_agnumber_t agno,
1207 void *arg)
1208 {
1209 struct aghdr_cnts *agcnts = arg;
1210 xfs_agf_t *agf;
1211 xfs_buf_t *agfbuf;
1212 int agf_dirty = 0;
1213 xfs_agi_t *agi;
1214 xfs_buf_t *agibuf;
1215 int agi_dirty = 0;
1216 xfs_sb_t *sb;
1217 xfs_buf_t *sbbuf;
1218 int sb_dirty = 0;
1219 int status;
1220
1221 sbbuf = libxfs_readbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
1222 XFS_FSS_TO_BB(mp, 1), 0, &xfs_sb_buf_ops);
1223 if (!sbbuf) {
1224 do_error(_("can't get root superblock for ag %d\n"), agno);
1225 return;
1226 }
1227
1228 sb = (xfs_sb_t *)calloc(BBSIZE, 1);
1229 if (!sb) {
1230 do_error(_("can't allocate memory for superblock\n"));
1231 libxfs_putbuf(sbbuf);
1232 return;
1233 }
1234 libxfs_sb_from_disk(sb, XFS_BUF_TO_SBP(sbbuf));
1235
1236 agfbuf = libxfs_readbuf(mp->m_dev,
1237 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1238 XFS_FSS_TO_BB(mp, 1), 0, &xfs_agf_buf_ops);
1239 if (!agfbuf) {
1240 do_error(_("can't read agf block for ag %d\n"), agno);
1241 libxfs_putbuf(sbbuf);
1242 free(sb);
1243 return;
1244 }
1245 agf = XFS_BUF_TO_AGF(agfbuf);
1246
1247 agibuf = libxfs_readbuf(mp->m_dev,
1248 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1249 XFS_FSS_TO_BB(mp, 1), 0, &xfs_agi_buf_ops);
1250 if (!agibuf) {
1251 do_error(_("can't read agi block for ag %d\n"), agno);
1252 libxfs_putbuf(agfbuf);
1253 libxfs_putbuf(sbbuf);
1254 free(sb);
1255 return;
1256 }
1257 agi = XFS_BUF_TO_AGI(agibuf);
1258
1259 /* fix up bad ag headers */
1260
1261 status = verify_set_agheader(mp, sbbuf, sb, agf, agi, agno);
1262
1263 if (status & XR_AG_SB_SEC) {
1264 if (!no_modify)
1265 sb_dirty = 1;
1266 /*
1267 * clear bad sector bit because we don't want
1268 * to skip further processing. we just want to
1269 * ensure that we write out the modified sb buffer.
1270 */
1271 status &= ~XR_AG_SB_SEC;
1272 }
1273 if (status & XR_AG_SB) {
1274 if (!no_modify) {
1275 do_warn(_("reset bad sb for ag %d\n"), agno);
1276 sb_dirty = 1;
1277 } else {
1278 do_warn(_("would reset bad sb for ag %d\n"), agno);
1279 }
1280 }
1281 if (status & XR_AG_AGF) {
1282 if (!no_modify) {
1283 do_warn(_("reset bad agf for ag %d\n"), agno);
1284 agf_dirty = 1;
1285 } else {
1286 do_warn(_("would reset bad agf for ag %d\n"), agno);
1287 }
1288 }
1289 if (status & XR_AG_AGI) {
1290 if (!no_modify) {
1291 do_warn(_("reset bad agi for ag %d\n"), agno);
1292 agi_dirty = 1;
1293 } else {
1294 do_warn(_("would reset bad agi for ag %d\n"), agno);
1295 }
1296 }
1297
1298 if (status && no_modify) {
1299 libxfs_putbuf(agibuf);
1300 libxfs_putbuf(agfbuf);
1301 libxfs_putbuf(sbbuf);
1302 free(sb);
1303
1304 do_warn(_("bad uncorrected agheader %d, skipping ag...\n"),
1305 agno);
1306
1307 return;
1308 }
1309
1310 scan_freelist(agf, agcnts);
1311
1312 validate_agf(agf, agno, agcnts);
1313 validate_agi(agi, agno, agcnts);
1314
1315 ASSERT(agi_dirty == 0 || (agi_dirty && !no_modify));
1316
1317 if (agi_dirty && !no_modify)
1318 libxfs_writebuf(agibuf, 0);
1319 else
1320 libxfs_putbuf(agibuf);
1321
1322 ASSERT(agf_dirty == 0 || (agf_dirty && !no_modify));
1323
1324 if (agf_dirty && !no_modify)
1325 libxfs_writebuf(agfbuf, 0);
1326 else
1327 libxfs_putbuf(agfbuf);
1328
1329 ASSERT(sb_dirty == 0 || (sb_dirty && !no_modify));
1330
1331 if (sb_dirty && !no_modify) {
1332 if (agno == 0)
1333 memcpy(&mp->m_sb, sb, sizeof(xfs_sb_t));
1334 libxfs_sb_to_disk(XFS_BUF_TO_SBP(sbbuf), sb, XFS_SB_ALL_BITS);
1335 libxfs_writebuf(sbbuf, 0);
1336 } else
1337 libxfs_putbuf(sbbuf);
1338 free(sb);
1339 PROG_RPT_INC(prog_rpt_done[agno], 1);
1340
1341 #ifdef XR_INODE_TRACE
1342 print_inode_list(i);
1343 #endif
1344 return;
1345 }
1346
1347 #define SCAN_THREADS 32
1348
1349 void
1350 scan_ags(
1351 struct xfs_mount *mp,
1352 int scan_threads)
1353 {
1354 struct aghdr_cnts *agcnts;
1355 __uint64_t fdblocks = 0;
1356 __uint64_t icount = 0;
1357 __uint64_t ifreecount = 0;
1358 xfs_agnumber_t i;
1359 work_queue_t wq;
1360
1361 agcnts = malloc(mp->m_sb.sb_agcount * sizeof(*agcnts));
1362 if (!agcnts) {
1363 do_abort(_("no memory for ag header counts\n"));
1364 return;
1365 }
1366 memset(agcnts, 0, mp->m_sb.sb_agcount * sizeof(*agcnts));
1367
1368 create_work_queue(&wq, mp, scan_threads);
1369
1370 for (i = 0; i < mp->m_sb.sb_agcount; i++)
1371 queue_work(&wq, scan_ag, i, &agcnts[i]);
1372
1373 destroy_work_queue(&wq);
1374
1375 /* tally up the counts */
1376 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
1377 fdblocks += agcnts[i].fdblocks;
1378 icount += agcnts[i].icount;
1379 ifreecount += agcnts[i].ifreecount;
1380 }
1381
1382 free(agcnts);
1383
1384 /*
1385 * Validate that our manual counts match the superblock.
1386 */
1387 if (mp->m_sb.sb_icount != icount) {
1388 do_warn(_("sb_icount %" PRIu64 ", counted %" PRIu64 "\n"),
1389 mp->m_sb.sb_icount, icount);
1390 }
1391
1392 if (mp->m_sb.sb_ifree != ifreecount) {
1393 do_warn(_("sb_ifree %" PRIu64 ", counted %" PRIu64 "\n"),
1394 mp->m_sb.sb_ifree, ifreecount);
1395 }
1396
1397 if (mp->m_sb.sb_fdblocks != fdblocks) {
1398 do_warn(_("sb_fdblocks %" PRIu64 ", counted %" PRIu64 "\n"),
1399 mp->m_sb.sb_fdblocks, fdblocks);
1400 }
1401 }
1402