]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/scrub/agheader.c
xfs: cross-reference bnobt records with cntbt
[people/ms/linux.git] / fs / xfs / scrub / agheader.c
CommitLineData
21fb4cb1
DW
1/*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_btree.h"
28#include "xfs_bit.h"
29#include "xfs_log_format.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_inode.h"
ab9d5dc5 33#include "xfs_alloc.h"
a12890ae 34#include "xfs_ialloc.h"
21fb4cb1
DW
35#include "scrub/xfs_scrub.h"
36#include "scrub/scrub.h"
37#include "scrub/common.h"
38#include "scrub/trace.h"
39
166d7641
DW
40/*
41 * Walk all the blocks in the AGFL. The fn function can return any negative
42 * error code or XFS_BTREE_QUERY_RANGE_ABORT.
43 */
ab9d5dc5
DW
44int
45xfs_scrub_walk_agfl(
46 struct xfs_scrub_context *sc,
47 int (*fn)(struct xfs_scrub_context *,
48 xfs_agblock_t bno, void *),
49 void *priv)
50{
51 struct xfs_agf *agf;
52 __be32 *agfl_bno;
53 struct xfs_mount *mp = sc->mp;
54 unsigned int flfirst;
55 unsigned int fllast;
56 int i;
57 int error;
58
59 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
60 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
61 flfirst = be32_to_cpu(agf->agf_flfirst);
62 fllast = be32_to_cpu(agf->agf_fllast);
63
64 /* Nothing to walk in an empty AGFL. */
65 if (agf->agf_flcount == cpu_to_be32(0))
66 return 0;
67
68 /* first to last is a consecutive list. */
69 if (fllast >= flfirst) {
70 for (i = flfirst; i <= fllast; i++) {
71 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
72 if (error)
73 return error;
74 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
75 return error;
76 }
77
78 return 0;
79 }
80
81 /* first to the end */
82 for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
83 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
84 if (error)
85 return error;
86 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
87 return error;
88 }
89
90 /* the start to last. */
91 for (i = 0; i <= fllast; i++) {
92 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
93 if (error)
94 return error;
95 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
96 return error;
97 }
98
99 return 0;
100}
101
21fb4cb1
DW
102/* Superblock */
103
166d7641
DW
104/* Cross-reference with the other btrees. */
105STATIC void
106xfs_scrub_superblock_xref(
107 struct xfs_scrub_context *sc,
108 struct xfs_buf *bp)
109{
52dc4b44
DW
110 struct xfs_mount *mp = sc->mp;
111 xfs_agnumber_t agno = sc->sm->sm_agno;
112 xfs_agblock_t agbno;
113 int error;
114
166d7641
DW
115 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
116 return;
52dc4b44
DW
117
118 agbno = XFS_SB_BLOCK(mp);
119
120 error = xfs_scrub_ag_init(sc, agno, &sc->sa);
121 if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
122 return;
123
124 xfs_scrub_xref_is_used_space(sc, agbno, 1);
125
126 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
127}
128
21fb4cb1
DW
129/*
130 * Scrub the filesystem superblock.
131 *
132 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
133 * responsible for validating all the geometry information in sb 0, so
134 * if the filesystem is capable of initiating online scrub, then clearly
135 * sb 0 is ok and we can use its information to check everything else.
136 */
137int
138xfs_scrub_superblock(
139 struct xfs_scrub_context *sc)
140{
141 struct xfs_mount *mp = sc->mp;
142 struct xfs_buf *bp;
143 struct xfs_dsb *sb;
144 xfs_agnumber_t agno;
145 uint32_t v2_ok;
146 __be32 features_mask;
147 int error;
148 __be16 vernum_mask;
149
150 agno = sc->sm->sm_agno;
151 if (agno == 0)
152 return 0;
153
154 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
155 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
156 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
e5b37faa
DW
157 /*
158 * The superblock verifier can return several different error codes
159 * if it thinks the superblock doesn't look right. For a mount these
160 * would all get bounced back to userspace, but if we're here then the
161 * fs mounted successfully, which means that this secondary superblock
162 * is simply incorrect. Treat all these codes the same way we treat
163 * any corruption.
164 */
165 switch (error) {
166 case -EINVAL: /* also -EWRONGFS */
167 case -ENOSYS:
168 case -EFBIG:
169 error = -EFSCORRUPTED;
170 default:
171 break;
172 }
21fb4cb1
DW
173 if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
174 return error;
175
176 sb = XFS_BUF_TO_SBP(bp);
177
178 /*
179 * Verify the geometries match. Fields that are permanently
180 * set by mkfs are checked; fields that can be updated later
181 * (and are not propagated to backup superblocks) are preen
182 * checked.
183 */
184 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
185 xfs_scrub_block_set_corrupt(sc, bp);
186
187 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
188 xfs_scrub_block_set_corrupt(sc, bp);
189
190 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
191 xfs_scrub_block_set_corrupt(sc, bp);
192
193 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
194 xfs_scrub_block_set_corrupt(sc, bp);
195
196 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
197 xfs_scrub_block_set_preen(sc, bp);
198
199 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
200 xfs_scrub_block_set_corrupt(sc, bp);
201
202 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
203 xfs_scrub_block_set_preen(sc, bp);
204
205 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
206 xfs_scrub_block_set_preen(sc, bp);
207
208 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
209 xfs_scrub_block_set_preen(sc, bp);
210
211 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
212 xfs_scrub_block_set_corrupt(sc, bp);
213
214 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
215 xfs_scrub_block_set_corrupt(sc, bp);
216
217 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
218 xfs_scrub_block_set_corrupt(sc, bp);
219
220 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
221 xfs_scrub_block_set_corrupt(sc, bp);
222
223 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
224 xfs_scrub_block_set_corrupt(sc, bp);
225
226 /* Check sb_versionnum bits that are set at mkfs time. */
227 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
228 XFS_SB_VERSION_NUMBITS |
229 XFS_SB_VERSION_ALIGNBIT |
230 XFS_SB_VERSION_DALIGNBIT |
231 XFS_SB_VERSION_SHAREDBIT |
232 XFS_SB_VERSION_LOGV2BIT |
233 XFS_SB_VERSION_SECTORBIT |
234 XFS_SB_VERSION_EXTFLGBIT |
235 XFS_SB_VERSION_DIRV2BIT);
236 if ((sb->sb_versionnum & vernum_mask) !=
237 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
238 xfs_scrub_block_set_corrupt(sc, bp);
239
240 /* Check sb_versionnum bits that can be set after mkfs time. */
241 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
242 XFS_SB_VERSION_NLINKBIT |
243 XFS_SB_VERSION_QUOTABIT);
244 if ((sb->sb_versionnum & vernum_mask) !=
245 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
246 xfs_scrub_block_set_preen(sc, bp);
247
248 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
249 xfs_scrub_block_set_corrupt(sc, bp);
250
251 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
252 xfs_scrub_block_set_corrupt(sc, bp);
253
254 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
255 xfs_scrub_block_set_corrupt(sc, bp);
256
257 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
258 xfs_scrub_block_set_preen(sc, bp);
259
260 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
261 xfs_scrub_block_set_corrupt(sc, bp);
262
263 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
264 xfs_scrub_block_set_corrupt(sc, bp);
265
266 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
267 xfs_scrub_block_set_corrupt(sc, bp);
268
269 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
270 xfs_scrub_block_set_corrupt(sc, bp);
271
272 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
273 xfs_scrub_block_set_corrupt(sc, bp);
274
275 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
276 xfs_scrub_block_set_corrupt(sc, bp);
277
278 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
279 xfs_scrub_block_set_preen(sc, bp);
280
281 /*
282 * Skip the summary counters since we track them in memory anyway.
283 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
284 */
285
286 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
287 xfs_scrub_block_set_preen(sc, bp);
288
289 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
290 xfs_scrub_block_set_preen(sc, bp);
291
292 /*
293 * Skip the quota flags since repair will force quotacheck.
294 * sb_qflags
295 */
296
297 if (sb->sb_flags != mp->m_sb.sb_flags)
298 xfs_scrub_block_set_corrupt(sc, bp);
299
300 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
301 xfs_scrub_block_set_corrupt(sc, bp);
302
303 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
304 xfs_scrub_block_set_corrupt(sc, bp);
305
306 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
307 xfs_scrub_block_set_preen(sc, bp);
308
309 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
310 xfs_scrub_block_set_preen(sc, bp);
311
312 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
313 xfs_scrub_block_set_corrupt(sc, bp);
314
315 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
316 xfs_scrub_block_set_corrupt(sc, bp);
317
318 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
319 xfs_scrub_block_set_corrupt(sc, bp);
320
321 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
322 xfs_scrub_block_set_corrupt(sc, bp);
323
324 /* Do we see any invalid bits in sb_features2? */
325 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
326 if (sb->sb_features2 != 0)
327 xfs_scrub_block_set_corrupt(sc, bp);
328 } else {
329 v2_ok = XFS_SB_VERSION2_OKBITS;
330 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
331 v2_ok |= XFS_SB_VERSION2_CRCBIT;
332
333 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
334 xfs_scrub_block_set_corrupt(sc, bp);
335
336 if (sb->sb_features2 != sb->sb_bad_features2)
337 xfs_scrub_block_set_preen(sc, bp);
338 }
339
340 /* Check sb_features2 flags that are set at mkfs time. */
341 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
342 XFS_SB_VERSION2_PROJID32BIT |
343 XFS_SB_VERSION2_CRCBIT |
344 XFS_SB_VERSION2_FTYPE);
345 if ((sb->sb_features2 & features_mask) !=
346 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
347 xfs_scrub_block_set_corrupt(sc, bp);
348
349 /* Check sb_features2 flags that can be set after mkfs time. */
350 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
351 if ((sb->sb_features2 & features_mask) !=
352 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
353 xfs_scrub_block_set_corrupt(sc, bp);
354
355 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
356 /* all v5 fields must be zero */
357 if (memchr_inv(&sb->sb_features_compat, 0,
358 sizeof(struct xfs_dsb) -
359 offsetof(struct xfs_dsb, sb_features_compat)))
360 xfs_scrub_block_set_corrupt(sc, bp);
361 } else {
362 /* Check compat flags; all are set at mkfs time. */
363 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
364 if ((sb->sb_features_compat & features_mask) !=
365 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
366 xfs_scrub_block_set_corrupt(sc, bp);
367
368 /* Check ro compat flags; all are set at mkfs time. */
369 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
370 XFS_SB_FEAT_RO_COMPAT_FINOBT |
371 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
372 XFS_SB_FEAT_RO_COMPAT_REFLINK);
373 if ((sb->sb_features_ro_compat & features_mask) !=
374 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
375 features_mask))
376 xfs_scrub_block_set_corrupt(sc, bp);
377
378 /* Check incompat flags; all are set at mkfs time. */
379 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
380 XFS_SB_FEAT_INCOMPAT_FTYPE |
381 XFS_SB_FEAT_INCOMPAT_SPINODES |
382 XFS_SB_FEAT_INCOMPAT_META_UUID);
383 if ((sb->sb_features_incompat & features_mask) !=
384 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
385 features_mask))
386 xfs_scrub_block_set_corrupt(sc, bp);
387
388 /* Check log incompat flags; all are set at mkfs time. */
389 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
390 if ((sb->sb_features_log_incompat & features_mask) !=
391 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
392 features_mask))
393 xfs_scrub_block_set_corrupt(sc, bp);
394
395 /* Don't care about sb_crc */
396
397 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
398 xfs_scrub_block_set_corrupt(sc, bp);
399
400 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
401 xfs_scrub_block_set_preen(sc, bp);
402
403 /* Don't care about sb_lsn */
404 }
405
406 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
407 /* The metadata UUID must be the same for all supers */
408 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
409 xfs_scrub_block_set_corrupt(sc, bp);
410 }
411
412 /* Everything else must be zero. */
413 if (memchr_inv(sb + 1, 0,
414 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
415 xfs_scrub_block_set_corrupt(sc, bp);
416
166d7641
DW
417 xfs_scrub_superblock_xref(sc, bp);
418
21fb4cb1
DW
419 return error;
420}
ab9d5dc5
DW
421
422/* AGF */
423
52dc4b44
DW
424/* Tally freespace record lengths. */
425STATIC int
426xfs_scrub_agf_record_bno_lengths(
427 struct xfs_btree_cur *cur,
428 struct xfs_alloc_rec_incore *rec,
429 void *priv)
430{
431 xfs_extlen_t *blocks = priv;
432
433 (*blocks) += rec->ar_blockcount;
434 return 0;
435}
436
437/* Check agf_freeblks */
438static inline void
439xfs_scrub_agf_xref_freeblks(
440 struct xfs_scrub_context *sc)
441{
442 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
443 xfs_extlen_t blocks = 0;
444 int error;
445
446 if (!sc->sa.bno_cur)
447 return;
448
449 error = xfs_alloc_query_all(sc->sa.bno_cur,
450 xfs_scrub_agf_record_bno_lengths, &blocks);
451 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
452 return;
453 if (blocks != be32_to_cpu(agf->agf_freeblks))
454 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
455}
456
e1134b12
DW
457/* Cross reference the AGF with the cntbt (freespace by length btree) */
458static inline void
459xfs_scrub_agf_xref_cntbt(
460 struct xfs_scrub_context *sc)
461{
462 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
463 xfs_agblock_t agbno;
464 xfs_extlen_t blocks;
465 int have;
466 int error;
467
468 if (!sc->sa.cnt_cur)
469 return;
470
471 /* Any freespace at all? */
472 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
473 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
474 return;
475 if (!have) {
476 if (agf->agf_freeblks != be32_to_cpu(0))
477 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
478 return;
479 }
480
481 /* Check agf_longest */
482 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
483 if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
484 return;
485 if (!have || blocks != be32_to_cpu(agf->agf_longest))
486 xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
487}
488
166d7641
DW
489/* Cross-reference with the other btrees. */
490STATIC void
491xfs_scrub_agf_xref(
492 struct xfs_scrub_context *sc)
493{
52dc4b44
DW
494 struct xfs_mount *mp = sc->mp;
495 xfs_agblock_t agbno;
496 int error;
497
166d7641
DW
498 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
499 return;
52dc4b44
DW
500
501 agbno = XFS_AGF_BLOCK(mp);
502
503 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
504 if (error)
505 return;
506
507 xfs_scrub_xref_is_used_space(sc, agbno, 1);
508 xfs_scrub_agf_xref_freeblks(sc);
e1134b12 509 xfs_scrub_agf_xref_cntbt(sc);
52dc4b44
DW
510
511 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
512}
513
ab9d5dc5
DW
514/* Scrub the AGF. */
515int
516xfs_scrub_agf(
517 struct xfs_scrub_context *sc)
518{
519 struct xfs_mount *mp = sc->mp;
520 struct xfs_agf *agf;
521 xfs_agnumber_t agno;
522 xfs_agblock_t agbno;
523 xfs_agblock_t eoag;
524 xfs_agblock_t agfl_first;
525 xfs_agblock_t agfl_last;
526 xfs_agblock_t agfl_count;
527 xfs_agblock_t fl_count;
528 int level;
529 int error = 0;
530
531 agno = sc->sa.agno = sc->sm->sm_agno;
532 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
533 &sc->sa.agf_bp, &sc->sa.agfl_bp);
534 if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
535 goto out;
536
537 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
538
539 /* Check the AG length */
540 eoag = be32_to_cpu(agf->agf_length);
541 if (eoag != xfs_ag_block_count(mp, agno))
542 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
543
544 /* Check the AGF btree roots and levels */
545 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
546 if (!xfs_verify_agbno(mp, agno, agbno))
547 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
548
549 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
550 if (!xfs_verify_agbno(mp, agno, agbno))
551 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
552
553 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
554 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
555 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
556
557 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
558 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
559 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
560
561 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
562 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
563 if (!xfs_verify_agbno(mp, agno, agbno))
564 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
565
566 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
567 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
568 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
569 }
570
571 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
572 agbno = be32_to_cpu(agf->agf_refcount_root);
573 if (!xfs_verify_agbno(mp, agno, agbno))
574 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
575
576 level = be32_to_cpu(agf->agf_refcount_level);
577 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
578 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
579 }
580
581 /* Check the AGFL counters */
582 agfl_first = be32_to_cpu(agf->agf_flfirst);
583 agfl_last = be32_to_cpu(agf->agf_fllast);
584 agfl_count = be32_to_cpu(agf->agf_flcount);
585 if (agfl_last > agfl_first)
586 fl_count = agfl_last - agfl_first + 1;
587 else
588 fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
589 if (agfl_count != 0 && fl_count != agfl_count)
590 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
591
166d7641 592 xfs_scrub_agf_xref(sc);
ab9d5dc5
DW
593out:
594 return error;
595}
596
597/* AGFL */
598
d44b47fd
DW
599struct xfs_scrub_agfl_info {
600 unsigned int sz_entries;
601 unsigned int nr_entries;
602 xfs_agblock_t *entries;
603};
604
166d7641
DW
605/* Cross-reference with the other btrees. */
606STATIC void
607xfs_scrub_agfl_block_xref(
608 struct xfs_scrub_context *sc,
609 xfs_agblock_t agbno)
610{
611 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
612 return;
52dc4b44
DW
613
614 xfs_scrub_xref_is_used_space(sc, agbno, 1);
166d7641
DW
615}
616
ab9d5dc5
DW
617/* Scrub an AGFL block. */
618STATIC int
619xfs_scrub_agfl_block(
620 struct xfs_scrub_context *sc,
621 xfs_agblock_t agbno,
622 void *priv)
623{
624 struct xfs_mount *mp = sc->mp;
d44b47fd 625 struct xfs_scrub_agfl_info *sai = priv;
ab9d5dc5
DW
626 xfs_agnumber_t agno = sc->sa.agno;
627
d44b47fd
DW
628 if (xfs_verify_agbno(mp, agno, agbno) &&
629 sai->nr_entries < sai->sz_entries)
630 sai->entries[sai->nr_entries++] = agbno;
631 else
ab9d5dc5
DW
632 xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
633
166d7641
DW
634 xfs_scrub_agfl_block_xref(sc, agbno);
635
ab9d5dc5
DW
636 return 0;
637}
638
d44b47fd
DW
639static int
640xfs_scrub_agblock_cmp(
641 const void *pa,
642 const void *pb)
643{
644 const xfs_agblock_t *a = pa;
645 const xfs_agblock_t *b = pb;
646
647 return (int)*a - (int)*b;
648}
649
166d7641
DW
650/* Cross-reference with the other btrees. */
651STATIC void
652xfs_scrub_agfl_xref(
653 struct xfs_scrub_context *sc)
654{
52dc4b44
DW
655 struct xfs_mount *mp = sc->mp;
656 xfs_agblock_t agbno;
657 int error;
658
166d7641
DW
659 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
660 return;
52dc4b44
DW
661
662 agbno = XFS_AGFL_BLOCK(mp);
663
664 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
665 if (error)
666 return;
667
668 xfs_scrub_xref_is_used_space(sc, agbno, 1);
669
670 /*
671 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
672 * active so that the agfl block xref can use it too.
673 */
166d7641
DW
674}
675
ab9d5dc5
DW
676/* Scrub the AGFL. */
677int
678xfs_scrub_agfl(
679 struct xfs_scrub_context *sc)
680{
d44b47fd
DW
681 struct xfs_scrub_agfl_info sai = { 0 };
682 struct xfs_agf *agf;
ab9d5dc5 683 xfs_agnumber_t agno;
d44b47fd
DW
684 unsigned int agflcount;
685 unsigned int i;
ab9d5dc5
DW
686 int error;
687
688 agno = sc->sa.agno = sc->sm->sm_agno;
689 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
690 &sc->sa.agf_bp, &sc->sa.agfl_bp);
691 if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
692 goto out;
693 if (!sc->sa.agf_bp)
694 return -EFSCORRUPTED;
695
166d7641
DW
696 xfs_scrub_agfl_xref(sc);
697
698 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
699 goto out;
700
d44b47fd
DW
701 /* Allocate buffer to ensure uniqueness of AGFL entries. */
702 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
703 agflcount = be32_to_cpu(agf->agf_flcount);
704 if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
705 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
706 goto out;
707 }
708 sai.sz_entries = agflcount;
709 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
710 if (!sai.entries) {
711 error = -ENOMEM;
712 goto out;
713 }
714
ab9d5dc5 715 /* Check the blocks in the AGFL. */
d44b47fd
DW
716 error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
717 if (error)
718 goto out_free;
719
720 if (agflcount != sai.nr_entries) {
721 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
722 goto out_free;
723 }
724
725 /* Sort entries, check for duplicates. */
726 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
727 xfs_scrub_agblock_cmp, NULL);
728 for (i = 1; i < sai.nr_entries; i++) {
729 if (sai.entries[i] == sai.entries[i - 1]) {
730 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
731 break;
732 }
733 }
734
735out_free:
736 kmem_free(sai.entries);
ab9d5dc5
DW
737out:
738 return error;
739}
a12890ae
DW
740
741/* AGI */
742
166d7641
DW
743/* Cross-reference with the other btrees. */
744STATIC void
745xfs_scrub_agi_xref(
746 struct xfs_scrub_context *sc)
747{
52dc4b44
DW
748 struct xfs_mount *mp = sc->mp;
749 xfs_agblock_t agbno;
750 int error;
751
166d7641
DW
752 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
753 return;
52dc4b44
DW
754
755 agbno = XFS_AGI_BLOCK(mp);
756
757 error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
758 if (error)
759 return;
760
761 xfs_scrub_xref_is_used_space(sc, agbno, 1);
762
763 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
764}
765
a12890ae
DW
766/* Scrub the AGI. */
767int
768xfs_scrub_agi(
769 struct xfs_scrub_context *sc)
770{
771 struct xfs_mount *mp = sc->mp;
772 struct xfs_agi *agi;
773 xfs_agnumber_t agno;
774 xfs_agblock_t agbno;
775 xfs_agblock_t eoag;
776 xfs_agino_t agino;
777 xfs_agino_t first_agino;
778 xfs_agino_t last_agino;
779 xfs_agino_t icount;
780 int i;
781 int level;
782 int error = 0;
783
784 agno = sc->sa.agno = sc->sm->sm_agno;
785 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
786 &sc->sa.agf_bp, &sc->sa.agfl_bp);
787 if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
788 goto out;
789
790 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
791
792 /* Check the AG length */
793 eoag = be32_to_cpu(agi->agi_length);
794 if (eoag != xfs_ag_block_count(mp, agno))
795 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
796
797 /* Check btree roots and levels */
798 agbno = be32_to_cpu(agi->agi_root);
799 if (!xfs_verify_agbno(mp, agno, agbno))
800 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
801
802 level = be32_to_cpu(agi->agi_level);
803 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
804 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
805
806 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
807 agbno = be32_to_cpu(agi->agi_free_root);
808 if (!xfs_verify_agbno(mp, agno, agbno))
809 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
810
811 level = be32_to_cpu(agi->agi_free_level);
812 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
813 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
814 }
815
816 /* Check inode counters */
817 xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
818 icount = be32_to_cpu(agi->agi_count);
819 if (icount > last_agino - first_agino + 1 ||
820 icount < be32_to_cpu(agi->agi_freecount))
821 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
822
823 /* Check inode pointers */
824 agino = be32_to_cpu(agi->agi_newino);
825 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
826 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
827
828 agino = be32_to_cpu(agi->agi_dirino);
829 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
830 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
831
832 /* Check unlinked inode buckets */
833 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
834 agino = be32_to_cpu(agi->agi_unlinked[i]);
835 if (agino == NULLAGINO)
836 continue;
837 if (!xfs_verify_agino(mp, agno, agino))
838 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
839 }
840
841 if (agi->agi_pad32 != cpu_to_be32(0))
842 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
843
166d7641 844 xfs_scrub_agi_xref(sc);
a12890ae
DW
845out:
846 return error;
847}