]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/scan.c
repair: mark local functions static
[thirdparty/xfsprogs-dev.git] / repair / scan.c
1 /*
2 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <libxfs.h>
20 #include "avl.h"
21 #include "globals.h"
22 #include "agheader.h"
23 #include "incore.h"
24 #include "protos.h"
25 #include "err_protos.h"
26 #include "dinode.h"
27 #include "scan.h"
28 #include "versions.h"
29 #include "bmap.h"
30 #include "progress.h"
31 #include "threads.h"
32
33 extern int verify_set_agheader(xfs_mount_t *mp, xfs_buf_t *sbuf, xfs_sb_t *sb,
34 xfs_agf_t *agf, xfs_agi_t *agi, xfs_agnumber_t i);
35
36 static xfs_mount_t *mp = NULL;
37
38 /*
39 * Variables to validate AG header values against the manual count
40 * from the btree traversal.
41 */
42 struct aghdr_cnts {
43 xfs_agnumber_t agno;
44 xfs_extlen_t agffreeblks;
45 xfs_extlen_t agflongest;
46 __uint64_t agfbtreeblks;
47 __uint32_t agicount;
48 __uint32_t agifreecount;
49 __uint64_t fdblocks;
50 __uint64_t icount;
51 __uint64_t ifreecount;
52 };
53
54 static void
55 scanfunc_allocbt(
56 struct xfs_btree_block *block,
57 int level,
58 xfs_agblock_t bno,
59 xfs_agnumber_t agno,
60 int suspect,
61 int isroot,
62 __uint32_t magic,
63 struct aghdr_cnts *agcnts);
64
65 void
66 set_mp(xfs_mount_t *mpp)
67 {
68 libxfs_bcache_purge();
69 mp = mpp;
70 }
71
72 static void
73 scan_sbtree(
74 xfs_agblock_t root,
75 int nlevels,
76 xfs_agnumber_t agno,
77 int suspect,
78 void (*func)(struct xfs_btree_block *block,
79 int level,
80 xfs_agblock_t bno,
81 xfs_agnumber_t agno,
82 int suspect,
83 int isroot,
84 void *priv),
85 int isroot,
86 void *priv)
87 {
88 xfs_buf_t *bp;
89
90 bp = libxfs_readbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, root),
91 XFS_FSB_TO_BB(mp, 1), 0);
92 if (!bp) {
93 do_error(_("can't read btree block %d/%d\n"), agno, root);
94 return;
95 }
96 (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1, root, agno, suspect,
97 isroot, priv);
98 libxfs_putbuf(bp);
99 }
100
101 /*
102 * returns 1 on bad news (inode needs to be cleared), 0 on good
103 */
104 int
105 scan_lbtree(
106 xfs_dfsbno_t root,
107 int nlevels,
108 int (*func)(struct xfs_btree_block *block,
109 int level,
110 int type,
111 int whichfork,
112 xfs_dfsbno_t bno,
113 xfs_ino_t ino,
114 xfs_drfsbno_t *tot,
115 __uint64_t *nex,
116 blkmap_t **blkmapp,
117 bmap_cursor_t *bm_cursor,
118 int isroot,
119 int check_dups,
120 int *dirty),
121 int type,
122 int whichfork,
123 xfs_ino_t ino,
124 xfs_drfsbno_t *tot,
125 __uint64_t *nex,
126 blkmap_t **blkmapp,
127 bmap_cursor_t *bm_cursor,
128 int isroot,
129 int check_dups)
130 {
131 xfs_buf_t *bp;
132 int err;
133 int dirty = 0;
134
135 bp = libxfs_readbuf(mp->m_dev, XFS_FSB_TO_DADDR(mp, root),
136 XFS_FSB_TO_BB(mp, 1), 0);
137 if (!bp) {
138 do_error(_("can't read btree block %d/%d\n"),
139 XFS_FSB_TO_AGNO(mp, root),
140 XFS_FSB_TO_AGBNO(mp, root));
141 return(1);
142 }
143 err = (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1,
144 type, whichfork, root, ino, tot, nex, blkmapp,
145 bm_cursor, isroot, check_dups, &dirty);
146
147 ASSERT(dirty == 0 || (dirty && !no_modify));
148
149 if (dirty && !no_modify)
150 libxfs_writebuf(bp, 0);
151 else
152 libxfs_putbuf(bp);
153
154 return(err);
155 }
156
157 int
158 scanfunc_bmap(
159 struct xfs_btree_block *block,
160 int level,
161 int type,
162 int whichfork,
163 xfs_dfsbno_t bno,
164 xfs_ino_t ino,
165 xfs_drfsbno_t *tot,
166 __uint64_t *nex,
167 blkmap_t **blkmapp,
168 bmap_cursor_t *bm_cursor,
169 int isroot,
170 int check_dups,
171 int *dirty)
172 {
173 int i;
174 int err;
175 xfs_bmbt_ptr_t *pp;
176 xfs_bmbt_key_t *pkey;
177 xfs_bmbt_rec_t *rp;
178 xfs_dfiloff_t first_key;
179 xfs_dfiloff_t last_key;
180 char *forkname;
181 int numrecs;
182 xfs_agnumber_t agno;
183 xfs_agblock_t agbno;
184 int state;
185
186 if (whichfork == XFS_DATA_FORK)
187 forkname = _("data");
188 else
189 forkname = _("attr");
190
191 /*
192 * unlike the ag freeblock btrees, if anything looks wrong
193 * in an inode bmap tree, just bail. it's possible that
194 * we'll miss a case where the to-be-toasted inode and
195 * another inode are claiming the same block but that's
196 * highly unlikely.
197 */
198 if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC) {
199 do_warn(
200 _("bad magic # %#x in inode %" PRIu64 " (%s fork) bmbt block %" PRIu64 "\n"),
201 be32_to_cpu(block->bb_magic), ino, forkname, bno);
202 return(1);
203 }
204 if (be16_to_cpu(block->bb_level) != level) {
205 do_warn(
206 _("expected level %d got %d in inode %" PRIu64 ", (%s fork) bmbt block %" PRIu64 "\n"),
207 level, be16_to_cpu(block->bb_level),
208 ino, forkname, bno);
209 return(1);
210 }
211
212 if (check_dups == 0) {
213 /*
214 * check sibling pointers. if bad we have a conflict
215 * between the sibling pointers and the child pointers
216 * in the parent block. blow out the inode if that happens
217 */
218 if (bm_cursor->level[level].fsbno != NULLDFSBNO) {
219 /*
220 * this is not the first block on this level
221 * so the cursor for this level has recorded the
222 * values for this's block left-sibling.
223 */
224 if (bno != bm_cursor->level[level].right_fsbno) {
225 do_warn(
226 _("bad fwd (right) sibling pointer (saw %" PRIu64 " parent block says %" PRIu64 ")\n"
227 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
228 bm_cursor->level[level].right_fsbno,
229 bno, ino, forkname,
230 bm_cursor->level[level].fsbno);
231 return(1);
232 }
233 if (be64_to_cpu(block->bb_u.l.bb_leftsib) !=
234 bm_cursor->level[level].fsbno) {
235 do_warn(
236 _("bad back (left) sibling pointer (saw %llu parent block says %" PRIu64 ")\n"
237 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
238 (unsigned long long)
239 be64_to_cpu(block->bb_u.l.bb_leftsib),
240 bm_cursor->level[level].fsbno,
241 ino, forkname, bno);
242 return(1);
243 }
244 } else {
245 /*
246 * This is the first or only block on this level.
247 * Check that the left sibling pointer is NULL
248 */
249 if (be64_to_cpu(block->bb_u.l.bb_leftsib) != NULLDFSBNO) {
250 do_warn(
251 _("bad back (left) sibling pointer (saw %llu should be NULL (0))\n"
252 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
253 (unsigned long long)
254 be64_to_cpu(block->bb_u.l.bb_leftsib),
255 ino, forkname, bno);
256 return(1);
257 }
258 }
259
260 /*
261 * update cursor block pointers to reflect this block
262 */
263 bm_cursor->level[level].fsbno = bno;
264 bm_cursor->level[level].left_fsbno =
265 be64_to_cpu(block->bb_u.l.bb_leftsib);
266 bm_cursor->level[level].right_fsbno =
267 be64_to_cpu(block->bb_u.l.bb_rightsib);
268
269 agno = XFS_FSB_TO_AGNO(mp, bno);
270 agbno = XFS_FSB_TO_AGBNO(mp, bno);
271
272 pthread_mutex_lock(&ag_locks[agno]);
273 state = get_bmap(agno, agbno);
274 switch (state) {
275 case XR_E_UNKNOWN:
276 case XR_E_FREE1:
277 case XR_E_FREE:
278 set_bmap(agno, agbno, XR_E_INUSE);
279 break;
280 case XR_E_FS_MAP:
281 case XR_E_INUSE:
282 /*
283 * we'll try and continue searching here since
284 * the block looks like it's been claimed by file
285 * to store user data, a directory to store directory
286 * data, or the space allocation btrees but since
287 * we made it here, the block probably
288 * contains btree data.
289 */
290 set_bmap(agno, agbno, XR_E_MULT);
291 do_warn(
292 _("inode 0x%" PRIx64 "bmap block 0x%" PRIx64 " claimed, state is %d\n"),
293 ino, bno, state);
294 break;
295 case XR_E_MULT:
296 case XR_E_INUSE_FS:
297 set_bmap(agno, agbno, XR_E_MULT);
298 do_warn(
299 _("inode 0x%" PRIx64 " bmap block 0x%" PRIx64 " claimed, state is %d\n"),
300 ino, bno, state);
301 /*
302 * if we made it to here, this is probably a bmap block
303 * that is being used by *another* file as a bmap block
304 * so the block will be valid. Both files should be
305 * trashed along with any other file that impinges on
306 * any blocks referenced by either file. So we
307 * continue searching down this btree to mark all
308 * blocks duplicate
309 */
310 break;
311 case XR_E_BAD_STATE:
312 default:
313 do_warn(
314 _("bad state %d, inode %" PRIu64 " bmap block 0x%" PRIx64 "\n"),
315 state, ino, bno);
316 break;
317 }
318 pthread_mutex_unlock(&ag_locks[agno]);
319 } else {
320 /*
321 * attribute fork for realtime files is in the regular
322 * filesystem
323 */
324 if (type != XR_INO_RTDATA || whichfork != XFS_DATA_FORK) {
325 if (search_dup_extent(XFS_FSB_TO_AGNO(mp, bno),
326 XFS_FSB_TO_AGBNO(mp, bno),
327 XFS_FSB_TO_AGBNO(mp, bno) + 1))
328 return(1);
329 } else {
330 if (search_rt_dup_extent(mp, bno))
331 return(1);
332 }
333 }
334 (*tot)++;
335 numrecs = be16_to_cpu(block->bb_numrecs);
336
337 if (level == 0) {
338 if (numrecs > mp->m_bmap_dmxr[0] || (isroot == 0 && numrecs <
339 mp->m_bmap_dmnr[0])) {
340 do_warn(
341 _("inode %" PRIu64 " bad # of bmap records (%u, min - %u, max - %u)\n"),
342 ino, numrecs, mp->m_bmap_dmnr[0],
343 mp->m_bmap_dmxr[0]);
344 return(1);
345 }
346 rp = XFS_BMBT_REC_ADDR(mp, block, 1);
347 *nex += numrecs;
348 /*
349 * XXX - if we were going to fix up the btree record,
350 * we'd do it right here. For now, if there's a problem,
351 * we'll bail out and presumably clear the inode.
352 */
353 if (check_dups == 0) {
354 err = process_bmbt_reclist(mp, rp, numrecs,
355 type, ino, tot, blkmapp,
356 &first_key, &last_key,
357 whichfork);
358 if (err)
359 return(1);
360 /*
361 * check that key ordering is monotonically increasing.
362 * if the last_key value in the cursor is set to
363 * NULLDFILOFF, then we know this is the first block
364 * on the leaf level and we shouldn't check the
365 * last_key value.
366 */
367 if (first_key <= bm_cursor->level[level].last_key &&
368 bm_cursor->level[level].last_key !=
369 NULLDFILOFF) {
370 do_warn(
371 _("out-of-order bmap key (file offset) in inode %" PRIu64 ", %s fork, fsbno %" PRIu64 "\n"),
372 ino, forkname, bno);
373 return(1);
374 }
375 /*
376 * update cursor keys to reflect this block.
377 * don't have to check if last_key is > first_key
378 * since that gets checked by process_bmbt_reclist.
379 */
380 bm_cursor->level[level].first_key = first_key;
381 bm_cursor->level[level].last_key = last_key;
382
383 return(0);
384 } else
385 return(scan_bmbt_reclist(mp, rp, numrecs,
386 type, ino, tot, whichfork));
387 }
388 if (numrecs > mp->m_bmap_dmxr[1] || (isroot == 0 && numrecs <
389 mp->m_bmap_dmnr[1])) {
390 do_warn(
391 _("inode %" PRIu64 " bad # of bmap records (%u, min - %u, max - %u)\n"),
392 ino, numrecs, mp->m_bmap_dmnr[1], mp->m_bmap_dmxr[1]);
393 return(1);
394 }
395 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
396 pkey = XFS_BMBT_KEY_ADDR(mp, block, 1);
397
398 last_key = NULLDFILOFF;
399
400 for (i = 0, err = 0; i < numrecs; i++) {
401 /*
402 * XXX - if we were going to fix up the interior btree nodes,
403 * we'd do it right here. For now, if there's a problem,
404 * we'll bail out and presumably clear the inode.
405 */
406 if (!verify_dfsbno(mp, be64_to_cpu(pp[i]))) {
407 do_warn(
408 _("bad bmap btree ptr 0x%llx in ino %" PRIu64 "\n"),
409 (unsigned long long) be64_to_cpu(pp[i]), ino);
410 return(1);
411 }
412
413 err = scan_lbtree(be64_to_cpu(pp[i]), level, scanfunc_bmap,
414 type, whichfork, ino, tot, nex, blkmapp,
415 bm_cursor, 0, check_dups);
416 if (err)
417 return(1);
418
419 /*
420 * fix key (offset) mismatches between the first key
421 * in the child block (as recorded in the cursor) and the
422 * key in the interior node referencing the child block.
423 *
424 * fixes cases where entries have been shifted between
425 * child blocks but the parent hasn't been updated. We
426 * don't have to worry about the key values in the cursor
427 * not being set since we only look at the key values of
428 * our child and those are guaranteed to be set by the
429 * call to scan_lbtree() above.
430 */
431 if (check_dups == 0 && be64_to_cpu(pkey[i].br_startoff) !=
432 bm_cursor->level[level-1].first_key) {
433 if (!no_modify) {
434 do_warn(
435 _("correcting bt key (was %llu, now %" PRIu64 ") in inode %" PRIu64 "\n"
436 "\t\t%s fork, btree block %" PRIu64 "\n"),
437 (unsigned long long)
438 be64_to_cpu(pkey[i].br_startoff),
439 bm_cursor->level[level-1].first_key,
440 ino,
441 forkname, bno);
442 *dirty = 1;
443 pkey[i].br_startoff = cpu_to_be64(
444 bm_cursor->level[level-1].first_key);
445 } else {
446 do_warn(
447 _("bad btree key (is %llu, should be %" PRIu64 ") in inode %" PRIu64 "\n"
448 "\t\t%s fork, btree block %" PRIu64 "\n"),
449 (unsigned long long)
450 be64_to_cpu(pkey[i].br_startoff),
451 bm_cursor->level[level-1].first_key,
452 ino, forkname, bno);
453 }
454 }
455 }
456
457 /*
458 * If we're the last node at our level, check that the last child
459 * block's forward sibling pointer is NULL.
460 */
461 if (check_dups == 0 &&
462 bm_cursor->level[level].right_fsbno == NULLDFSBNO &&
463 bm_cursor->level[level - 1].right_fsbno != NULLDFSBNO) {
464 do_warn(
465 _("bad fwd (right) sibling pointer (saw %" PRIu64 " should be NULLDFSBNO)\n"
466 "\tin inode %" PRIu64 " (%s fork) bmap btree block %" PRIu64 "\n"),
467 bm_cursor->level[level - 1].right_fsbno,
468 ino, forkname, bm_cursor->level[level - 1].fsbno);
469 return(1);
470 }
471
472 /*
473 * update cursor keys to reflect this block
474 */
475 if (check_dups == 0) {
476 bm_cursor->level[level].first_key =
477 be64_to_cpu(pkey[0].br_startoff);
478 bm_cursor->level[level].last_key =
479 be64_to_cpu(pkey[numrecs - 1].br_startoff);
480 }
481
482 return(0);
483 }
484
485 static void
486 scanfunc_bno(
487 struct xfs_btree_block *block,
488 int level,
489 xfs_agblock_t bno,
490 xfs_agnumber_t agno,
491 int suspect,
492 int isroot,
493 void *agcnts)
494 {
495 return scanfunc_allocbt(block, level, bno, agno,
496 suspect, isroot, XFS_ABTB_MAGIC, agcnts);
497 }
498
499 static void
500 scanfunc_cnt(
501 struct xfs_btree_block *block,
502 int level,
503 xfs_agblock_t bno,
504 xfs_agnumber_t agno,
505 int suspect,
506 int isroot,
507 void *agcnts)
508 {
509 return scanfunc_allocbt(block, level, bno, agno,
510 suspect, isroot, XFS_ABTC_MAGIC, agcnts);
511 }
512
513 static void
514 scanfunc_allocbt(
515 struct xfs_btree_block *block,
516 int level,
517 xfs_agblock_t bno,
518 xfs_agnumber_t agno,
519 int suspect,
520 int isroot,
521 __uint32_t magic,
522 struct aghdr_cnts *agcnts)
523 {
524 const char *name;
525 int i;
526 xfs_alloc_ptr_t *pp;
527 xfs_alloc_rec_t *rp;
528 int hdr_errors = 0;
529 int numrecs;
530 int state;
531 xfs_extlen_t lastcount = 0;
532 xfs_agblock_t lastblock = 0;
533
534 assert(magic == XFS_ABTB_MAGIC || magic == XFS_ABTC_MAGIC);
535
536 name = (magic == XFS_ABTB_MAGIC) ? "bno" : "cnt";
537
538 if (be32_to_cpu(block->bb_magic) != magic) {
539 do_warn(_("bad magic # %#x in bt%s block %d/%d\n"),
540 be32_to_cpu(block->bb_magic), name, agno, bno);
541 hdr_errors++;
542 if (suspect)
543 return;
544 }
545
546 /*
547 * All freespace btree blocks except the roots are freed for a
548 * fully used filesystem, thus they are counted towards the
549 * free data block counter.
550 */
551 if (!isroot) {
552 agcnts->agfbtreeblks++;
553 agcnts->fdblocks++;
554 }
555
556 if (be16_to_cpu(block->bb_level) != level) {
557 do_warn(_("expected level %d got %d in bt%s block %d/%d\n"),
558 level, be16_to_cpu(block->bb_level), name, agno, bno);
559 hdr_errors++;
560 if (suspect)
561 return;
562 }
563
564 /*
565 * check for btree blocks multiply claimed
566 */
567 state = get_bmap(agno, bno);
568 if (state != XR_E_UNKNOWN) {
569 set_bmap(agno, bno, XR_E_MULT);
570 do_warn(
571 _("%s freespace btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
572 name, state, agno, bno, suspect);
573 return;
574 }
575 set_bmap(agno, bno, XR_E_FS_MAP);
576
577 numrecs = be16_to_cpu(block->bb_numrecs);
578
579 if (level == 0) {
580 if (numrecs > mp->m_alloc_mxr[0]) {
581 numrecs = mp->m_alloc_mxr[0];
582 hdr_errors++;
583 }
584 if (isroot == 0 && numrecs < mp->m_alloc_mnr[0]) {
585 numrecs = mp->m_alloc_mnr[0];
586 hdr_errors++;
587 }
588
589 if (hdr_errors) {
590 do_warn(
591 _("bad btree nrecs (%u, min=%u, max=%u) in bt%s block %u/%u\n"),
592 be16_to_cpu(block->bb_numrecs),
593 mp->m_alloc_mnr[0], mp->m_alloc_mxr[0],
594 name, agno, bno);
595 suspect++;
596 }
597
598 rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
599 for (i = 0; i < numrecs; i++) {
600 xfs_agblock_t b, end;
601 xfs_extlen_t len, blen;
602
603 b = be32_to_cpu(rp[i].ar_startblock);
604 len = be32_to_cpu(rp[i].ar_blockcount);
605 end = b + len;
606
607 if (b == 0 || !verify_agbno(mp, agno, b)) {
608 do_warn(
609 _("invalid start block %u in record %u of %s btree block %u/%u\n"),
610 b, i, name, agno, bno);
611 continue;
612 }
613 if (len == 0 || !verify_agbno(mp, agno, end - 1)) {
614 do_warn(
615 _("invalid length %u in record %u of %s btree block %u/%u\n"),
616 len, i, name, agno, bno);
617 continue;
618 }
619
620 if (magic == XFS_ABTB_MAGIC) {
621 if (b <= lastblock) {
622 do_warn(_(
623 "out-of-order bno btree record %d (%u %u) block %u/%u\n"),
624 i, b, len, agno, bno);
625 } else {
626 lastblock = b;
627 }
628 } else {
629 agcnts->fdblocks += len;
630 agcnts->agffreeblks += len;
631 if (len > agcnts->agflongest)
632 agcnts->agflongest = len;
633 if (len < lastcount) {
634 do_warn(_(
635 "out-of-order cnt btree record %d (%u %u) block %u/%u\n"),
636 i, b, len, agno, bno);
637 } else {
638 lastcount = len;
639 }
640 }
641
642 for ( ; b < end; b += blen) {
643 state = get_bmap_ext(agno, b, end, &blen);
644 switch (state) {
645 case XR_E_UNKNOWN:
646 set_bmap(agno, b, XR_E_FREE1);
647 break;
648 case XR_E_FREE1:
649 /*
650 * no warning messages -- we'll catch
651 * FREE1 blocks later
652 */
653 if (magic == XFS_ABTC_MAGIC) {
654 set_bmap_ext(agno, b, blen,
655 XR_E_FREE);
656 break;
657 }
658 default:
659 do_warn(
660 _("block (%d,%d-%d) multiply claimed by %s space tree, state - %d\n"),
661 agno, b, b + blen - 1,
662 name, state);
663 break;
664 }
665 }
666 }
667 return;
668 }
669
670 /*
671 * interior record
672 */
673 pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
674
675 if (numrecs > mp->m_alloc_mxr[1]) {
676 numrecs = mp->m_alloc_mxr[1];
677 hdr_errors++;
678 }
679 if (isroot == 0 && numrecs < mp->m_alloc_mnr[1]) {
680 numrecs = mp->m_alloc_mnr[1];
681 hdr_errors++;
682 }
683
684 /*
685 * don't pass bogus tree flag down further if this block
686 * looked ok. bail out if two levels in a row look bad.
687 */
688 if (hdr_errors) {
689 do_warn(
690 _("bad btree nrecs (%u, min=%u, max=%u) in bt%s block %u/%u\n"),
691 be16_to_cpu(block->bb_numrecs),
692 mp->m_alloc_mnr[1], mp->m_alloc_mxr[1],
693 name, agno, bno);
694 if (suspect)
695 return;
696 suspect++;
697 } else if (suspect) {
698 suspect = 0;
699 }
700
701 for (i = 0; i < numrecs; i++) {
702 xfs_agblock_t bno = be32_to_cpu(pp[i]);
703
704 /*
705 * XXX - put sibling detection right here.
706 * we know our sibling chain is good. So as we go,
707 * we check the entry before and after each entry.
708 * If either of the entries references a different block,
709 * check the sibling pointer. If there's a sibling
710 * pointer mismatch, try and extract as much data
711 * as possible.
712 */
713 if (bno != 0 && verify_agbno(mp, agno, bno)) {
714 scan_sbtree(bno, level, agno, suspect,
715 (magic == XFS_ABTB_MAGIC) ?
716 scanfunc_bno : scanfunc_cnt, 0,
717 (void *)agcnts);
718 }
719 }
720 }
721
722 static int
723 scan_single_ino_chunk(
724 xfs_agnumber_t agno,
725 xfs_inobt_rec_t *rp,
726 int suspect)
727 {
728 xfs_ino_t lino;
729 xfs_agino_t ino;
730 xfs_agblock_t agbno;
731 int j;
732 int nfree;
733 int off;
734 int state;
735 ino_tree_node_t *ino_rec, *first_rec, *last_rec;
736
737 ino = be32_to_cpu(rp->ir_startino);
738 off = XFS_AGINO_TO_OFFSET(mp, ino);
739 agbno = XFS_AGINO_TO_AGBNO(mp, ino);
740 lino = XFS_AGINO_TO_INO(mp, agno, ino);
741
742 /*
743 * on multi-block block chunks, all chunks start
744 * at the beginning of the block. with multi-chunk
745 * blocks, all chunks must start on 64-inode boundaries
746 * since each block can hold N complete chunks. if
747 * fs has aligned inodes, all chunks must start
748 * at a fs_ino_alignment*N'th agbno. skip recs
749 * with badly aligned starting inodes.
750 */
751 if (ino == 0 ||
752 (inodes_per_block <= XFS_INODES_PER_CHUNK && off != 0) ||
753 (inodes_per_block > XFS_INODES_PER_CHUNK &&
754 off % XFS_INODES_PER_CHUNK != 0) ||
755 (fs_aligned_inodes && agbno % fs_ino_alignment != 0)) {
756 do_warn(
757 _("badly aligned inode rec (starting inode = %" PRIu64 ")\n"),
758 lino);
759 suspect++;
760 }
761
762 /*
763 * verify numeric validity of inode chunk first
764 * before inserting into a tree. don't have to
765 * worry about the overflow case because the
766 * starting ino number of a chunk can only get
767 * within 255 inodes of max (NULLAGINO). if it
768 * gets closer, the agino number will be illegal
769 * as the agbno will be too large.
770 */
771 if (verify_aginum(mp, agno, ino)) {
772 do_warn(
773 _("bad starting inode # (%" PRIu64 " (0x%x 0x%x)) in ino rec, skipping rec\n"),
774 lino, agno, ino);
775 return ++suspect;
776 }
777
778 if (verify_aginum(mp, agno,
779 ino + XFS_INODES_PER_CHUNK - 1)) {
780 do_warn(
781 _("bad ending inode # (%" PRIu64 " (0x%x 0x%zx)) in ino rec, skipping rec\n"),
782 lino + XFS_INODES_PER_CHUNK - 1,
783 agno,
784 ino + XFS_INODES_PER_CHUNK - 1);
785 return ++suspect;
786 }
787
788 /*
789 * set state of each block containing inodes
790 */
791 if (off == 0 && !suspect) {
792 for (j = 0;
793 j < XFS_INODES_PER_CHUNK;
794 j += mp->m_sb.sb_inopblock) {
795 agbno = XFS_AGINO_TO_AGBNO(mp, ino + j);
796
797 state = get_bmap(agno, agbno);
798 if (state == XR_E_UNKNOWN) {
799 set_bmap(agno, agbno, XR_E_INO);
800 } else if (state == XR_E_INUSE_FS && agno == 0 &&
801 ino + j >= first_prealloc_ino &&
802 ino + j < last_prealloc_ino) {
803 set_bmap(agno, agbno, XR_E_INO);
804 } else {
805 do_warn(
806 _("inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n"),
807 agno, agbno, mp->m_sb.sb_inopblock);
808 /*
809 * XXX - maybe should mark
810 * block a duplicate
811 */
812 return ++suspect;
813 }
814 }
815 }
816
817 /*
818 * ensure only one avl entry per chunk
819 */
820 find_inode_rec_range(mp, agno, ino, ino + XFS_INODES_PER_CHUNK,
821 &first_rec, &last_rec);
822 if (first_rec != NULL) {
823 /*
824 * this chunk overlaps with one (or more)
825 * already in the tree
826 */
827 do_warn(
828 _("inode rec for ino %" PRIu64 " (%d/%d) overlaps existing rec (start %d/%d)\n"),
829 lino, agno, ino, agno, first_rec->ino_startnum);
830 suspect++;
831
832 /*
833 * if the 2 chunks start at the same place,
834 * then we don't have to put this one
835 * in the uncertain list. go to the next one.
836 */
837 if (first_rec->ino_startnum == ino)
838 return suspect;
839 }
840
841 nfree = 0;
842
843 /*
844 * now mark all the inodes as existing and free or used.
845 * if the tree is suspect, put them into the uncertain
846 * inode tree.
847 */
848 if (!suspect) {
849 if (XFS_INOBT_IS_FREE_DISK(rp, 0)) {
850 nfree++;
851 ino_rec = set_inode_free_alloc(mp, agno, ino);
852 } else {
853 ino_rec = set_inode_used_alloc(mp, agno, ino);
854 }
855 for (j = 1; j < XFS_INODES_PER_CHUNK; j++) {
856 if (XFS_INOBT_IS_FREE_DISK(rp, j)) {
857 nfree++;
858 set_inode_free(ino_rec, j);
859 } else {
860 set_inode_used(ino_rec, j);
861 }
862 }
863 } else {
864 for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
865 if (XFS_INOBT_IS_FREE_DISK(rp, j)) {
866 nfree++;
867 add_aginode_uncertain(agno, ino + j, 1);
868 } else {
869 add_aginode_uncertain(agno, ino + j, 0);
870 }
871 }
872 }
873
874 if (nfree != be32_to_cpu(rp->ir_freecount)) {
875 do_warn(_("ir_freecount/free mismatch, inode "
876 "chunk %d/%u, freecount %d nfree %d\n"),
877 agno, ino, be32_to_cpu(rp->ir_freecount), nfree);
878 }
879
880 return suspect;
881 }
882
883
884 /*
885 * this one walks the inode btrees sucking the info there into
886 * the incore avl tree. We try and rescue corrupted btree records
887 * to minimize our chances of losing inodes. Inode info from potentially
888 * corrupt sources could be bogus so rather than put the info straight
889 * into the tree, instead we put it on a list and try and verify the
890 * info in the next phase by examining what's on disk. At that point,
891 * we'll be able to figure out what's what and stick the corrected info
892 * into the tree. We do bail out at some point and give up on a subtree
893 * so as to avoid walking randomly all over the ag.
894 *
895 * Note that it's also ok if the free/inuse info wrong, we can correct
896 * that when we examine the on-disk inode. The important thing is to
897 * get the start and alignment of the inode chunks right. Those chunks
898 * that we aren't sure about go into the uncertain list.
899 */
900 static void
901 scanfunc_ino(
902 struct xfs_btree_block *block,
903 int level,
904 xfs_agblock_t bno,
905 xfs_agnumber_t agno,
906 int suspect,
907 int isroot,
908 void *priv)
909 {
910 struct aghdr_cnts *agcnts = priv;
911 int i;
912 int numrecs;
913 int state;
914 xfs_inobt_ptr_t *pp;
915 xfs_inobt_rec_t *rp;
916 int hdr_errors;
917
918 hdr_errors = 0;
919
920 if (be32_to_cpu(block->bb_magic) != XFS_IBT_MAGIC) {
921 do_warn(_("bad magic # %#x in inobt block %d/%d\n"),
922 be32_to_cpu(block->bb_magic), agno, bno);
923 hdr_errors++;
924 bad_ino_btree = 1;
925 if (suspect)
926 return;
927 }
928 if (be16_to_cpu(block->bb_level) != level) {
929 do_warn(_("expected level %d got %d in inobt block %d/%d\n"),
930 level, be16_to_cpu(block->bb_level), agno, bno);
931 hdr_errors++;
932 bad_ino_btree = 1;
933 if (suspect)
934 return;
935 }
936
937 /*
938 * check for btree blocks multiply claimed, any unknown/free state
939 * is ok in the bitmap block.
940 */
941 state = get_bmap(agno, bno);
942 switch (state) {
943 case XR_E_UNKNOWN:
944 case XR_E_FREE1:
945 case XR_E_FREE:
946 set_bmap(agno, bno, XR_E_FS_MAP);
947 break;
948 default:
949 set_bmap(agno, bno, XR_E_MULT);
950 do_warn(
951 _("inode btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
952 state, agno, bno, suspect);
953 }
954
955 numrecs = be16_to_cpu(block->bb_numrecs);
956
957 /*
958 * leaf record in btree
959 */
960 if (level == 0) {
961 /* check for trashed btree block */
962
963 if (numrecs > mp->m_inobt_mxr[0]) {
964 numrecs = mp->m_inobt_mxr[0];
965 hdr_errors++;
966 }
967 if (isroot == 0 && numrecs < mp->m_inobt_mnr[0]) {
968 numrecs = mp->m_inobt_mnr[0];
969 hdr_errors++;
970 }
971
972 if (hdr_errors) {
973 bad_ino_btree = 1;
974 do_warn(_("dubious inode btree block header %d/%d\n"),
975 agno, bno);
976 suspect++;
977 }
978
979 rp = XFS_INOBT_REC_ADDR(mp, block, 1);
980
981 /*
982 * step through the records, each record points to
983 * a chunk of inodes. The start of inode chunks should
984 * be block-aligned. Each inode btree rec should point
985 * to the start of a block of inodes or the start of a group
986 * of INODES_PER_CHUNK (64) inodes. off is the offset into
987 * the block. skip processing of bogus records.
988 */
989 for (i = 0; i < numrecs; i++) {
990 agcnts->agicount += XFS_INODES_PER_CHUNK;
991 agcnts->icount += XFS_INODES_PER_CHUNK;
992 agcnts->agifreecount += be32_to_cpu(rp[i].ir_freecount);
993 agcnts->ifreecount += be32_to_cpu(rp[i].ir_freecount);
994
995 suspect = scan_single_ino_chunk(agno, &rp[i], suspect);
996 }
997
998 if (suspect)
999 bad_ino_btree = 1;
1000
1001 return;
1002 }
1003
1004 /*
1005 * interior record, continue on
1006 */
1007 if (numrecs > mp->m_inobt_mxr[1]) {
1008 numrecs = mp->m_inobt_mxr[1];
1009 hdr_errors++;
1010 }
1011 if (isroot == 0 && numrecs < mp->m_inobt_mnr[1]) {
1012 numrecs = mp->m_inobt_mnr[1];
1013 hdr_errors++;
1014 }
1015
1016 pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
1017
1018 /*
1019 * don't pass bogus tree flag down further if this block
1020 * looked ok. bail out if two levels in a row look bad.
1021 */
1022
1023 if (suspect && !hdr_errors)
1024 suspect = 0;
1025
1026 if (hdr_errors) {
1027 bad_ino_btree = 1;
1028 if (suspect)
1029 return;
1030 else suspect++;
1031 }
1032
1033 for (i = 0; i < numrecs; i++) {
1034 if (be32_to_cpu(pp[i]) != 0 && verify_agbno(mp, agno,
1035 be32_to_cpu(pp[i])))
1036 scan_sbtree(be32_to_cpu(pp[i]), level, agno,
1037 suspect, scanfunc_ino, 0, priv);
1038 }
1039 }
1040
1041 static void
1042 scan_freelist(
1043 xfs_agf_t *agf,
1044 struct aghdr_cnts *agcnts)
1045 {
1046 xfs_agfl_t *agfl;
1047 xfs_buf_t *agflbuf;
1048 xfs_agnumber_t agno;
1049 xfs_agblock_t bno;
1050 int count;
1051 int i;
1052
1053 agno = be32_to_cpu(agf->agf_seqno);
1054
1055 if (XFS_SB_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1056 XFS_AGF_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
1057 XFS_AGI_BLOCK(mp) != XFS_AGFL_BLOCK(mp))
1058 set_bmap(agno, XFS_AGFL_BLOCK(mp), XR_E_FS_MAP);
1059
1060 if (be32_to_cpu(agf->agf_flcount) == 0)
1061 return;
1062
1063 agflbuf = libxfs_readbuf(mp->m_dev,
1064 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1065 XFS_FSS_TO_BB(mp, 1), 0);
1066 if (!agflbuf) {
1067 do_abort(_("can't read agfl block for ag %d\n"), agno);
1068 return;
1069 }
1070 agfl = XFS_BUF_TO_AGFL(agflbuf);
1071 i = be32_to_cpu(agf->agf_flfirst);
1072 count = 0;
1073 for (;;) {
1074 bno = be32_to_cpu(agfl->agfl_bno[i]);
1075 if (verify_agbno(mp, agno, bno))
1076 set_bmap(agno, bno, XR_E_FREE);
1077 else
1078 do_warn(_("bad agbno %u in agfl, agno %d\n"),
1079 bno, agno);
1080 count++;
1081 if (i == be32_to_cpu(agf->agf_fllast))
1082 break;
1083 if (++i == XFS_AGFL_SIZE(mp))
1084 i = 0;
1085 }
1086 if (count != be32_to_cpu(agf->agf_flcount)) {
1087 do_warn(_("freeblk count %d != flcount %d in ag %d\n"), count,
1088 be32_to_cpu(agf->agf_flcount), agno);
1089 }
1090
1091 agcnts->fdblocks += count;
1092
1093 libxfs_putbuf(agflbuf);
1094 }
1095
1096 static void
1097 validate_agf(
1098 struct xfs_agf *agf,
1099 xfs_agnumber_t agno,
1100 struct aghdr_cnts *agcnts)
1101 {
1102 xfs_agblock_t bno;
1103
1104 bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
1105 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1106 scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
1107 agno, 0, scanfunc_bno, 1, agcnts);
1108 } else {
1109 do_warn(_("bad agbno %u for btbno root, agno %d\n"),
1110 bno, agno);
1111 }
1112
1113 bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
1114 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1115 scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
1116 agno, 0, scanfunc_cnt, 1, agcnts);
1117 } else {
1118 do_warn(_("bad agbno %u for btbcnt root, agno %d\n"),
1119 bno, agno);
1120 }
1121
1122 if (be32_to_cpu(agf->agf_freeblks) != agcnts->agffreeblks) {
1123 do_warn(_("agf_freeblks %u, counted %u in ag %u\n"),
1124 be32_to_cpu(agf->agf_freeblks), agcnts->agffreeblks, agno);
1125 }
1126
1127 if (be32_to_cpu(agf->agf_longest) != agcnts->agflongest) {
1128 do_warn(_("agf_longest %u, counted %u in ag %u\n"),
1129 be32_to_cpu(agf->agf_longest), agcnts->agflongest, agno);
1130 }
1131
1132 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1133 be32_to_cpu(agf->agf_btreeblks) != agcnts->agfbtreeblks) {
1134 do_warn(_("agf_btreeblks %u, counted %" PRIu64 " in ag %u\n"),
1135 be32_to_cpu(agf->agf_btreeblks), agcnts->agfbtreeblks, agno);
1136 }
1137 }
1138
1139 static void
1140 validate_agi(
1141 struct xfs_agi *agi,
1142 xfs_agnumber_t agno,
1143 struct aghdr_cnts *agcnts)
1144 {
1145 xfs_agblock_t bno;
1146 int i;
1147
1148 bno = be32_to_cpu(agi->agi_root);
1149 if (bno != 0 && verify_agbno(mp, agno, bno)) {
1150 scan_sbtree(bno, be32_to_cpu(agi->agi_level),
1151 agno, 0, scanfunc_ino, 1, agcnts);
1152 } else {
1153 do_warn(_("bad agbno %u for inobt root, agno %d\n"),
1154 be32_to_cpu(agi->agi_root), agno);
1155 }
1156
1157 if (be32_to_cpu(agi->agi_count) != agcnts->agicount) {
1158 do_warn(_("agi_count %u, counted %u in ag %u\n"),
1159 be32_to_cpu(agi->agi_count), agcnts->agicount, agno);
1160 }
1161
1162 if (be32_to_cpu(agi->agi_freecount) != agcnts->agifreecount) {
1163 do_warn(_("agi_freecount %u, counted %u in ag %u\n"),
1164 be32_to_cpu(agi->agi_freecount), agcnts->agifreecount, agno);
1165 }
1166
1167 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
1168 xfs_agino_t agino = be32_to_cpu(agi->agi_unlinked[i]);
1169
1170 if (agino != NULLAGINO) {
1171 do_warn(
1172 _("agi unlinked bucket %d is %u in ag %u (inode=%" PRIu64 ")\n"),
1173 i, agino, agno,
1174 XFS_AGINO_TO_INO(mp, agno, agino));
1175 }
1176 }
1177 }
1178
1179 /*
1180 * Scan an AG for obvious corruption.
1181 */
1182 static void
1183 scan_ag(
1184 work_queue_t *wq,
1185 xfs_agnumber_t agno,
1186 void *arg)
1187 {
1188 struct aghdr_cnts *agcnts = arg;
1189 xfs_agf_t *agf;
1190 xfs_buf_t *agfbuf;
1191 int agf_dirty = 0;
1192 xfs_agi_t *agi;
1193 xfs_buf_t *agibuf;
1194 int agi_dirty = 0;
1195 xfs_sb_t *sb;
1196 xfs_buf_t *sbbuf;
1197 int sb_dirty = 0;
1198 int status;
1199
1200 sbbuf = libxfs_readbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
1201 XFS_FSS_TO_BB(mp, 1), 0);
1202 if (!sbbuf) {
1203 do_error(_("can't get root superblock for ag %d\n"), agno);
1204 return;
1205 }
1206
1207 sb = (xfs_sb_t *)calloc(BBSIZE, 1);
1208 if (!sb) {
1209 do_error(_("can't allocate memory for superblock\n"));
1210 libxfs_putbuf(sbbuf);
1211 return;
1212 }
1213 libxfs_sb_from_disk(sb, XFS_BUF_TO_SBP(sbbuf));
1214
1215 agfbuf = libxfs_readbuf(mp->m_dev,
1216 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1217 XFS_FSS_TO_BB(mp, 1), 0);
1218 if (!agfbuf) {
1219 do_error(_("can't read agf block for ag %d\n"), agno);
1220 libxfs_putbuf(sbbuf);
1221 free(sb);
1222 return;
1223 }
1224 agf = XFS_BUF_TO_AGF(agfbuf);
1225
1226 agibuf = libxfs_readbuf(mp->m_dev,
1227 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1228 XFS_FSS_TO_BB(mp, 1), 0);
1229 if (!agibuf) {
1230 do_error(_("can't read agi block for ag %d\n"), agno);
1231 libxfs_putbuf(agfbuf);
1232 libxfs_putbuf(sbbuf);
1233 free(sb);
1234 return;
1235 }
1236 agi = XFS_BUF_TO_AGI(agibuf);
1237
1238 /* fix up bad ag headers */
1239
1240 status = verify_set_agheader(mp, sbbuf, sb, agf, agi, agno);
1241
1242 if (status & XR_AG_SB_SEC) {
1243 if (!no_modify)
1244 sb_dirty = 1;
1245 /*
1246 * clear bad sector bit because we don't want
1247 * to skip further processing. we just want to
1248 * ensure that we write out the modified sb buffer.
1249 */
1250 status &= ~XR_AG_SB_SEC;
1251 }
1252 if (status & XR_AG_SB) {
1253 if (!no_modify) {
1254 do_warn(_("reset bad sb for ag %d\n"), agno);
1255 sb_dirty = 1;
1256 } else {
1257 do_warn(_("would reset bad sb for ag %d\n"), agno);
1258 }
1259 }
1260 if (status & XR_AG_AGF) {
1261 if (!no_modify) {
1262 do_warn(_("reset bad agf for ag %d\n"), agno);
1263 agf_dirty = 1;
1264 } else {
1265 do_warn(_("would reset bad agf for ag %d\n"), agno);
1266 }
1267 }
1268 if (status & XR_AG_AGI) {
1269 if (!no_modify) {
1270 do_warn(_("reset bad agi for ag %d\n"), agno);
1271 agi_dirty = 1;
1272 } else {
1273 do_warn(_("would reset bad agi for ag %d\n"), agno);
1274 }
1275 }
1276
1277 if (status && no_modify) {
1278 libxfs_putbuf(agibuf);
1279 libxfs_putbuf(agfbuf);
1280 libxfs_putbuf(sbbuf);
1281 free(sb);
1282
1283 do_warn(_("bad uncorrected agheader %d, skipping ag...\n"),
1284 agno);
1285
1286 return;
1287 }
1288
1289 scan_freelist(agf, agcnts);
1290
1291 validate_agf(agf, agno, agcnts);
1292 validate_agi(agi, agno, agcnts);
1293
1294 ASSERT(agi_dirty == 0 || (agi_dirty && !no_modify));
1295
1296 if (agi_dirty && !no_modify)
1297 libxfs_writebuf(agibuf, 0);
1298 else
1299 libxfs_putbuf(agibuf);
1300
1301 ASSERT(agf_dirty == 0 || (agf_dirty && !no_modify));
1302
1303 if (agf_dirty && !no_modify)
1304 libxfs_writebuf(agfbuf, 0);
1305 else
1306 libxfs_putbuf(agfbuf);
1307
1308 ASSERT(sb_dirty == 0 || (sb_dirty && !no_modify));
1309
1310 if (sb_dirty && !no_modify) {
1311 if (agno == 0)
1312 memcpy(&mp->m_sb, sb, sizeof(xfs_sb_t));
1313 libxfs_sb_to_disk(XFS_BUF_TO_SBP(sbbuf), sb, XFS_SB_ALL_BITS);
1314 libxfs_writebuf(sbbuf, 0);
1315 } else
1316 libxfs_putbuf(sbbuf);
1317 free(sb);
1318 PROG_RPT_INC(prog_rpt_done[agno], 1);
1319
1320 #ifdef XR_INODE_TRACE
1321 print_inode_list(i);
1322 #endif
1323 return;
1324 }
1325
1326 #define SCAN_THREADS 32
1327
1328 void
1329 scan_ags(
1330 struct xfs_mount *mp,
1331 int scan_threads)
1332 {
1333 struct aghdr_cnts *agcnts;
1334 __uint64_t fdblocks = 0;
1335 __uint64_t icount = 0;
1336 __uint64_t ifreecount = 0;
1337 xfs_agnumber_t i;
1338 work_queue_t wq;
1339
1340 agcnts = malloc(mp->m_sb.sb_agcount * sizeof(*agcnts));
1341 if (!agcnts) {
1342 do_abort(_("no memory for ag header counts\n"));
1343 return;
1344 }
1345 memset(agcnts, 0, mp->m_sb.sb_agcount * sizeof(*agcnts));
1346
1347 create_work_queue(&wq, mp, scan_threads);
1348
1349 for (i = 0; i < mp->m_sb.sb_agcount; i++)
1350 queue_work(&wq, scan_ag, i, &agcnts[i]);
1351
1352 destroy_work_queue(&wq);
1353
1354 /* tally up the counts */
1355 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
1356 fdblocks += agcnts[i].fdblocks;
1357 icount += agcnts[i].icount;
1358 ifreecount += agcnts[i].ifreecount;
1359 }
1360
1361 /*
1362 * Validate that our manual counts match the superblock.
1363 */
1364 if (mp->m_sb.sb_icount != icount) {
1365 do_warn(_("sb_icount %" PRIu64 ", counted %" PRIu64 "\n"),
1366 mp->m_sb.sb_icount, icount);
1367 }
1368
1369 if (mp->m_sb.sb_ifree != ifreecount) {
1370 do_warn(_("sb_ifree %" PRIu64 ", counted %" PRIu64 "\n"),
1371 mp->m_sb.sb_ifree, ifreecount);
1372 }
1373
1374 if (mp->m_sb.sb_fdblocks != fdblocks) {
1375 do_warn(_("sb_fdblocks %" PRIu64 ", counted %" PRIu64 "\n"),
1376 mp->m_sb.sb_fdblocks, fdblocks);
1377 }
1378 }
1379