]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/scrub/agheader.c
xfs: shorten struct xfs_scrub_context to struct xfs_scrub
[people/ms/linux.git] / fs / xfs / scrub / agheader.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0+
21fb4cb1
DW
2/*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
21fb4cb1 4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
21fb4cb1
DW
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_inode.h"
ab9d5dc5 19#include "xfs_alloc.h"
a12890ae 20#include "xfs_ialloc.h"
d852657c 21#include "xfs_rmap.h"
21fb4cb1
DW
22#include "scrub/xfs_scrub.h"
23#include "scrub/scrub.h"
24#include "scrub/common.h"
25#include "scrub/trace.h"
26
21fb4cb1
DW
27/* Superblock */
28
166d7641
DW
29/* Cross-reference with the other btrees. */
30STATIC void
c517b3aa 31xchk_superblock_xref(
1d8a748a 32 struct xfs_scrub *sc,
166d7641
DW
33 struct xfs_buf *bp)
34{
d852657c 35 struct xfs_owner_info oinfo;
52dc4b44
DW
36 struct xfs_mount *mp = sc->mp;
37 xfs_agnumber_t agno = sc->sm->sm_agno;
38 xfs_agblock_t agbno;
39 int error;
40
166d7641
DW
41 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
42 return;
52dc4b44
DW
43
44 agbno = XFS_SB_BLOCK(mp);
45
c517b3aa
DW
46 error = xchk_ag_init(sc, agno, &sc->sa);
47 if (!xchk_xref_process_error(sc, agno, agbno, &error))
52dc4b44
DW
48 return;
49
c517b3aa
DW
50 xchk_xref_is_used_space(sc, agbno, 1);
51 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
d852657c 52 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
c517b3aa
DW
53 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
54 xchk_xref_is_not_shared(sc, agbno, 1);
52dc4b44
DW
55
56 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
57}
58
21fb4cb1
DW
59/*
60 * Scrub the filesystem superblock.
61 *
62 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
63 * responsible for validating all the geometry information in sb 0, so
64 * if the filesystem is capable of initiating online scrub, then clearly
65 * sb 0 is ok and we can use its information to check everything else.
66 */
67int
c517b3aa 68xchk_superblock(
1d8a748a 69 struct xfs_scrub *sc)
21fb4cb1
DW
70{
71 struct xfs_mount *mp = sc->mp;
72 struct xfs_buf *bp;
73 struct xfs_dsb *sb;
74 xfs_agnumber_t agno;
75 uint32_t v2_ok;
76 __be32 features_mask;
77 int error;
78 __be16 vernum_mask;
79
80 agno = sc->sm->sm_agno;
81 if (agno == 0)
82 return 0;
83
689e11c8 84 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
e5b37faa
DW
85 /*
86 * The superblock verifier can return several different error codes
87 * if it thinks the superblock doesn't look right. For a mount these
88 * would all get bounced back to userspace, but if we're here then the
89 * fs mounted successfully, which means that this secondary superblock
90 * is simply incorrect. Treat all these codes the same way we treat
91 * any corruption.
92 */
93 switch (error) {
94 case -EINVAL: /* also -EWRONGFS */
95 case -ENOSYS:
96 case -EFBIG:
97 error = -EFSCORRUPTED;
98 default:
99 break;
100 }
c517b3aa 101 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
21fb4cb1
DW
102 return error;
103
104 sb = XFS_BUF_TO_SBP(bp);
105
106 /*
107 * Verify the geometries match. Fields that are permanently
108 * set by mkfs are checked; fields that can be updated later
109 * (and are not propagated to backup superblocks) are preen
110 * checked.
111 */
112 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
c517b3aa 113 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
114
115 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
c517b3aa 116 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
117
118 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
c517b3aa 119 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
120
121 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
c517b3aa 122 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
123
124 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
c517b3aa 125 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
126
127 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
c517b3aa 128 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
129
130 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
c517b3aa 131 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
132
133 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
c517b3aa 134 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
135
136 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
c517b3aa 137 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
138
139 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
c517b3aa 140 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
141
142 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
c517b3aa 143 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
144
145 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
c517b3aa 146 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
147
148 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
c517b3aa 149 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
150
151 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
c517b3aa 152 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
153
154 /* Check sb_versionnum bits that are set at mkfs time. */
155 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
156 XFS_SB_VERSION_NUMBITS |
157 XFS_SB_VERSION_ALIGNBIT |
158 XFS_SB_VERSION_DALIGNBIT |
159 XFS_SB_VERSION_SHAREDBIT |
160 XFS_SB_VERSION_LOGV2BIT |
161 XFS_SB_VERSION_SECTORBIT |
162 XFS_SB_VERSION_EXTFLGBIT |
163 XFS_SB_VERSION_DIRV2BIT);
164 if ((sb->sb_versionnum & vernum_mask) !=
165 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
c517b3aa 166 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
167
168 /* Check sb_versionnum bits that can be set after mkfs time. */
169 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
170 XFS_SB_VERSION_NLINKBIT |
171 XFS_SB_VERSION_QUOTABIT);
172 if ((sb->sb_versionnum & vernum_mask) !=
173 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
c517b3aa 174 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
175
176 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
c517b3aa 177 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
178
179 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
c517b3aa 180 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
181
182 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
c517b3aa 183 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
184
185 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
c517b3aa 186 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
187
188 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
c517b3aa 189 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
190
191 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
c517b3aa 192 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
193
194 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
c517b3aa 195 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
196
197 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
c517b3aa 198 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
199
200 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
c517b3aa 201 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
202
203 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
c517b3aa 204 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
205
206 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
c517b3aa 207 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
208
209 /*
210 * Skip the summary counters since we track them in memory anyway.
211 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
212 */
213
214 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
c517b3aa 215 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
216
217 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
c517b3aa 218 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
219
220 /*
221 * Skip the quota flags since repair will force quotacheck.
222 * sb_qflags
223 */
224
225 if (sb->sb_flags != mp->m_sb.sb_flags)
c517b3aa 226 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
227
228 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
c517b3aa 229 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
230
231 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
c517b3aa 232 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
233
234 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
c517b3aa 235 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
236
237 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
c517b3aa 238 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
239
240 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
c517b3aa 241 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
242
243 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
c517b3aa 244 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
245
246 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
c517b3aa 247 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
248
249 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
c517b3aa 250 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
251
252 /* Do we see any invalid bits in sb_features2? */
253 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
254 if (sb->sb_features2 != 0)
c517b3aa 255 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
256 } else {
257 v2_ok = XFS_SB_VERSION2_OKBITS;
258 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
259 v2_ok |= XFS_SB_VERSION2_CRCBIT;
260
261 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
c517b3aa 262 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
263
264 if (sb->sb_features2 != sb->sb_bad_features2)
c517b3aa 265 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
266 }
267
268 /* Check sb_features2 flags that are set at mkfs time. */
269 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
270 XFS_SB_VERSION2_PROJID32BIT |
271 XFS_SB_VERSION2_CRCBIT |
272 XFS_SB_VERSION2_FTYPE);
273 if ((sb->sb_features2 & features_mask) !=
274 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
c517b3aa 275 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
276
277 /* Check sb_features2 flags that can be set after mkfs time. */
278 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
279 if ((sb->sb_features2 & features_mask) !=
280 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
c517b3aa 281 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
282
283 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
284 /* all v5 fields must be zero */
285 if (memchr_inv(&sb->sb_features_compat, 0,
286 sizeof(struct xfs_dsb) -
287 offsetof(struct xfs_dsb, sb_features_compat)))
c517b3aa 288 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
289 } else {
290 /* Check compat flags; all are set at mkfs time. */
291 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
292 if ((sb->sb_features_compat & features_mask) !=
293 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
c517b3aa 294 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
295
296 /* Check ro compat flags; all are set at mkfs time. */
297 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
298 XFS_SB_FEAT_RO_COMPAT_FINOBT |
299 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
300 XFS_SB_FEAT_RO_COMPAT_REFLINK);
301 if ((sb->sb_features_ro_compat & features_mask) !=
302 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
303 features_mask))
c517b3aa 304 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
305
306 /* Check incompat flags; all are set at mkfs time. */
307 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
308 XFS_SB_FEAT_INCOMPAT_FTYPE |
309 XFS_SB_FEAT_INCOMPAT_SPINODES |
310 XFS_SB_FEAT_INCOMPAT_META_UUID);
311 if ((sb->sb_features_incompat & features_mask) !=
312 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
313 features_mask))
c517b3aa 314 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
315
316 /* Check log incompat flags; all are set at mkfs time. */
317 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
318 if ((sb->sb_features_log_incompat & features_mask) !=
319 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
320 features_mask))
c517b3aa 321 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
322
323 /* Don't care about sb_crc */
324
325 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
c517b3aa 326 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
327
328 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
c517b3aa 329 xchk_block_set_preen(sc, bp);
21fb4cb1
DW
330
331 /* Don't care about sb_lsn */
332 }
333
334 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
335 /* The metadata UUID must be the same for all supers */
336 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
c517b3aa 337 xchk_block_set_corrupt(sc, bp);
21fb4cb1
DW
338 }
339
340 /* Everything else must be zero. */
341 if (memchr_inv(sb + 1, 0,
342 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
c517b3aa 343 xchk_block_set_corrupt(sc, bp);
21fb4cb1 344
c517b3aa 345 xchk_superblock_xref(sc, bp);
166d7641 346
21fb4cb1
DW
347 return error;
348}
ab9d5dc5
DW
349
350/* AGF */
351
52dc4b44
DW
352/* Tally freespace record lengths. */
353STATIC int
c517b3aa 354xchk_agf_record_bno_lengths(
52dc4b44
DW
355 struct xfs_btree_cur *cur,
356 struct xfs_alloc_rec_incore *rec,
357 void *priv)
358{
359 xfs_extlen_t *blocks = priv;
360
361 (*blocks) += rec->ar_blockcount;
362 return 0;
363}
364
365/* Check agf_freeblks */
366static inline void
c517b3aa 367xchk_agf_xref_freeblks(
1d8a748a 368 struct xfs_scrub *sc)
52dc4b44
DW
369{
370 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
371 xfs_extlen_t blocks = 0;
372 int error;
373
374 if (!sc->sa.bno_cur)
375 return;
376
377 error = xfs_alloc_query_all(sc->sa.bno_cur,
c517b3aa
DW
378 xchk_agf_record_bno_lengths, &blocks);
379 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
52dc4b44
DW
380 return;
381 if (blocks != be32_to_cpu(agf->agf_freeblks))
c517b3aa 382 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
52dc4b44
DW
383}
384
e1134b12
DW
385/* Cross reference the AGF with the cntbt (freespace by length btree) */
386static inline void
c517b3aa 387xchk_agf_xref_cntbt(
1d8a748a 388 struct xfs_scrub *sc)
e1134b12
DW
389{
390 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
391 xfs_agblock_t agbno;
392 xfs_extlen_t blocks;
393 int have;
394 int error;
395
396 if (!sc->sa.cnt_cur)
397 return;
398
399 /* Any freespace at all? */
400 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
c517b3aa 401 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
e1134b12
DW
402 return;
403 if (!have) {
404 if (agf->agf_freeblks != be32_to_cpu(0))
c517b3aa 405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
e1134b12
DW
406 return;
407 }
408
409 /* Check agf_longest */
410 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
c517b3aa 411 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
e1134b12
DW
412 return;
413 if (!have || blocks != be32_to_cpu(agf->agf_longest))
c517b3aa 414 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
e1134b12
DW
415}
416
d852657c
DW
417/* Check the btree block counts in the AGF against the btrees. */
418STATIC void
c517b3aa 419xchk_agf_xref_btreeblks(
1d8a748a 420 struct xfs_scrub *sc)
d852657c
DW
421{
422 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
423 struct xfs_mount *mp = sc->mp;
424 xfs_agblock_t blocks;
425 xfs_agblock_t btreeblks;
426 int error;
427
428 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
429 if (sc->sa.rmap_cur) {
430 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
c517b3aa 431 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
d852657c
DW
432 return;
433 btreeblks = blocks - 1;
434 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
c517b3aa 435 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
d852657c
DW
436 } else {
437 btreeblks = 0;
438 }
439
440 /*
441 * No rmap cursor; we can't xref if we have the rmapbt feature.
442 * We also can't do it if we're missing the free space btree cursors.
443 */
444 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
445 !sc->sa.bno_cur || !sc->sa.cnt_cur)
446 return;
447
448 /* Check agf_btreeblks */
449 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
c517b3aa 450 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
d852657c
DW
451 return;
452 btreeblks += blocks - 1;
453
454 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
c517b3aa 455 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
d852657c
DW
456 return;
457 btreeblks += blocks - 1;
458
459 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
c517b3aa 460 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
d852657c
DW
461}
462
f6d5fc21
DW
463/* Check agf_refcount_blocks against tree size */
464static inline void
c517b3aa 465xchk_agf_xref_refcblks(
1d8a748a 466 struct xfs_scrub *sc)
f6d5fc21
DW
467{
468 struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
469 xfs_agblock_t blocks;
470 int error;
471
472 if (!sc->sa.refc_cur)
473 return;
474
475 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
c517b3aa 476 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
f6d5fc21
DW
477 return;
478 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
c517b3aa 479 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
f6d5fc21
DW
480}
481
166d7641
DW
482/* Cross-reference with the other btrees. */
483STATIC void
c517b3aa 484xchk_agf_xref(
1d8a748a 485 struct xfs_scrub *sc)
166d7641 486{
d852657c 487 struct xfs_owner_info oinfo;
52dc4b44
DW
488 struct xfs_mount *mp = sc->mp;
489 xfs_agblock_t agbno;
490 int error;
491
166d7641
DW
492 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
493 return;
52dc4b44
DW
494
495 agbno = XFS_AGF_BLOCK(mp);
496
c517b3aa 497 error = xchk_ag_btcur_init(sc, &sc->sa);
52dc4b44
DW
498 if (error)
499 return;
500
c517b3aa
DW
501 xchk_xref_is_used_space(sc, agbno, 1);
502 xchk_agf_xref_freeblks(sc);
503 xchk_agf_xref_cntbt(sc);
504 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
d852657c 505 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
c517b3aa
DW
506 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
507 xchk_agf_xref_btreeblks(sc);
508 xchk_xref_is_not_shared(sc, agbno, 1);
509 xchk_agf_xref_refcblks(sc);
52dc4b44
DW
510
511 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
512}
513
ab9d5dc5
DW
514/* Scrub the AGF. */
515int
c517b3aa 516xchk_agf(
1d8a748a 517 struct xfs_scrub *sc)
ab9d5dc5
DW
518{
519 struct xfs_mount *mp = sc->mp;
520 struct xfs_agf *agf;
521 xfs_agnumber_t agno;
522 xfs_agblock_t agbno;
523 xfs_agblock_t eoag;
524 xfs_agblock_t agfl_first;
525 xfs_agblock_t agfl_last;
526 xfs_agblock_t agfl_count;
527 xfs_agblock_t fl_count;
528 int level;
529 int error = 0;
530
531 agno = sc->sa.agno = sc->sm->sm_agno;
c517b3aa 532 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
ab9d5dc5 533 &sc->sa.agf_bp, &sc->sa.agfl_bp);
c517b3aa 534 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
ab9d5dc5 535 goto out;
c517b3aa 536 xchk_buffer_recheck(sc, sc->sa.agf_bp);
ab9d5dc5
DW
537
538 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
539
540 /* Check the AG length */
541 eoag = be32_to_cpu(agf->agf_length);
542 if (eoag != xfs_ag_block_count(mp, agno))
c517b3aa 543 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
544
545 /* Check the AGF btree roots and levels */
546 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
547 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 548 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
549
550 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
551 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 552 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
553
554 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
555 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 556 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
557
558 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
559 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 560 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
561
562 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
563 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
564 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 565 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
566
567 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
568 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 569 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
570 }
571
572 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
573 agbno = be32_to_cpu(agf->agf_refcount_root);
574 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 575 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
576
577 level = be32_to_cpu(agf->agf_refcount_level);
578 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 579 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5
DW
580 }
581
582 /* Check the AGFL counters */
583 agfl_first = be32_to_cpu(agf->agf_flfirst);
584 agfl_last = be32_to_cpu(agf->agf_fllast);
585 agfl_count = be32_to_cpu(agf->agf_flcount);
586 if (agfl_last > agfl_first)
587 fl_count = agfl_last - agfl_first + 1;
588 else
a78ee256 589 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
ab9d5dc5 590 if (agfl_count != 0 && fl_count != agfl_count)
c517b3aa 591 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
ab9d5dc5 592
c517b3aa 593 xchk_agf_xref(sc);
ab9d5dc5
DW
594out:
595 return error;
596}
597
598/* AGFL */
599
c517b3aa 600struct xchk_agfl_info {
d852657c 601 struct xfs_owner_info oinfo;
d44b47fd
DW
602 unsigned int sz_entries;
603 unsigned int nr_entries;
604 xfs_agblock_t *entries;
1d8a748a 605 struct xfs_scrub *sc;
d44b47fd
DW
606};
607
166d7641
DW
608/* Cross-reference with the other btrees. */
609STATIC void
c517b3aa 610xchk_agfl_block_xref(
1d8a748a 611 struct xfs_scrub *sc,
d852657c
DW
612 xfs_agblock_t agbno,
613 struct xfs_owner_info *oinfo)
166d7641
DW
614{
615 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
616 return;
52dc4b44 617
c517b3aa
DW
618 xchk_xref_is_used_space(sc, agbno, 1);
619 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
620 xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
621 xchk_xref_is_not_shared(sc, agbno, 1);
166d7641
DW
622}
623
ab9d5dc5
DW
624/* Scrub an AGFL block. */
625STATIC int
c517b3aa 626xchk_agfl_block(
9f3a080e 627 struct xfs_mount *mp,
ab9d5dc5
DW
628 xfs_agblock_t agbno,
629 void *priv)
630{
c517b3aa 631 struct xchk_agfl_info *sai = priv;
1d8a748a 632 struct xfs_scrub *sc = sai->sc;
ab9d5dc5
DW
633 xfs_agnumber_t agno = sc->sa.agno;
634
d44b47fd
DW
635 if (xfs_verify_agbno(mp, agno, agbno) &&
636 sai->nr_entries < sai->sz_entries)
637 sai->entries[sai->nr_entries++] = agbno;
638 else
c517b3aa 639 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
ab9d5dc5 640
c517b3aa 641 xchk_agfl_block_xref(sc, agbno, priv);
166d7641 642
9f3a080e
DW
643 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
644 return XFS_BTREE_QUERY_RANGE_ABORT;
645
ab9d5dc5
DW
646 return 0;
647}
648
d44b47fd 649static int
c517b3aa 650xchk_agblock_cmp(
d44b47fd
DW
651 const void *pa,
652 const void *pb)
653{
654 const xfs_agblock_t *a = pa;
655 const xfs_agblock_t *b = pb;
656
657 return (int)*a - (int)*b;
658}
659
166d7641
DW
660/* Cross-reference with the other btrees. */
661STATIC void
c517b3aa 662xchk_agfl_xref(
1d8a748a 663 struct xfs_scrub *sc)
166d7641 664{
d852657c 665 struct xfs_owner_info oinfo;
52dc4b44
DW
666 struct xfs_mount *mp = sc->mp;
667 xfs_agblock_t agbno;
668 int error;
669
166d7641
DW
670 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
671 return;
52dc4b44
DW
672
673 agbno = XFS_AGFL_BLOCK(mp);
674
c517b3aa 675 error = xchk_ag_btcur_init(sc, &sc->sa);
52dc4b44
DW
676 if (error)
677 return;
678
c517b3aa
DW
679 xchk_xref_is_used_space(sc, agbno, 1);
680 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
d852657c 681 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
c517b3aa
DW
682 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
683 xchk_xref_is_not_shared(sc, agbno, 1);
52dc4b44
DW
684
685 /*
686 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
687 * active so that the agfl block xref can use it too.
688 */
166d7641
DW
689}
690
ab9d5dc5
DW
691/* Scrub the AGFL. */
692int
c517b3aa 693xchk_agfl(
1d8a748a 694 struct xfs_scrub *sc)
ab9d5dc5 695{
c517b3aa 696 struct xchk_agfl_info sai;
d44b47fd 697 struct xfs_agf *agf;
ab9d5dc5 698 xfs_agnumber_t agno;
d44b47fd
DW
699 unsigned int agflcount;
700 unsigned int i;
ab9d5dc5
DW
701 int error;
702
703 agno = sc->sa.agno = sc->sm->sm_agno;
c517b3aa 704 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
ab9d5dc5 705 &sc->sa.agf_bp, &sc->sa.agfl_bp);
c517b3aa 706 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
ab9d5dc5
DW
707 goto out;
708 if (!sc->sa.agf_bp)
709 return -EFSCORRUPTED;
c517b3aa 710 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
ab9d5dc5 711
c517b3aa 712 xchk_agfl_xref(sc);
166d7641
DW
713
714 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
715 goto out;
716
d44b47fd
DW
717 /* Allocate buffer to ensure uniqueness of AGFL entries. */
718 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
719 agflcount = be32_to_cpu(agf->agf_flcount);
a78ee256 720 if (agflcount > xfs_agfl_size(sc->mp)) {
c517b3aa 721 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
d44b47fd
DW
722 goto out;
723 }
86516eff 724 memset(&sai, 0, sizeof(sai));
9f3a080e 725 sai.sc = sc;
d44b47fd 726 sai.sz_entries = agflcount;
631fc955
DW
727 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
728 KM_MAYFAIL);
d44b47fd
DW
729 if (!sai.entries) {
730 error = -ENOMEM;
731 goto out;
732 }
733
ab9d5dc5 734 /* Check the blocks in the AGFL. */
d852657c 735 xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
9f3a080e 736 error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
c517b3aa 737 sc->sa.agfl_bp, xchk_agfl_block, &sai);
9f3a080e
DW
738 if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
739 error = 0;
740 goto out_free;
741 }
d44b47fd
DW
742 if (error)
743 goto out_free;
744
745 if (agflcount != sai.nr_entries) {
c517b3aa 746 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
d44b47fd
DW
747 goto out_free;
748 }
749
750 /* Sort entries, check for duplicates. */
751 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
c517b3aa 752 xchk_agblock_cmp, NULL);
d44b47fd
DW
753 for (i = 1; i < sai.nr_entries; i++) {
754 if (sai.entries[i] == sai.entries[i - 1]) {
c517b3aa 755 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
d44b47fd
DW
756 break;
757 }
758 }
759
760out_free:
761 kmem_free(sai.entries);
ab9d5dc5
DW
762out:
763 return error;
764}
a12890ae
DW
765
766/* AGI */
767
2e6f2756
DW
768/* Check agi_count/agi_freecount */
769static inline void
c517b3aa 770xchk_agi_xref_icounts(
1d8a748a 771 struct xfs_scrub *sc)
2e6f2756
DW
772{
773 struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
774 xfs_agino_t icount;
775 xfs_agino_t freecount;
776 int error;
777
778 if (!sc->sa.ino_cur)
779 return;
780
781 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
c517b3aa 782 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
2e6f2756
DW
783 return;
784 if (be32_to_cpu(agi->agi_count) != icount ||
785 be32_to_cpu(agi->agi_freecount) != freecount)
c517b3aa 786 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
2e6f2756
DW
787}
788
166d7641
DW
789/* Cross-reference with the other btrees. */
790STATIC void
c517b3aa 791xchk_agi_xref(
1d8a748a 792 struct xfs_scrub *sc)
166d7641 793{
d852657c 794 struct xfs_owner_info oinfo;
52dc4b44
DW
795 struct xfs_mount *mp = sc->mp;
796 xfs_agblock_t agbno;
797 int error;
798
166d7641
DW
799 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
800 return;
52dc4b44
DW
801
802 agbno = XFS_AGI_BLOCK(mp);
803
c517b3aa 804 error = xchk_ag_btcur_init(sc, &sc->sa);
52dc4b44
DW
805 if (error)
806 return;
807
c517b3aa
DW
808 xchk_xref_is_used_space(sc, agbno, 1);
809 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
810 xchk_agi_xref_icounts(sc);
d852657c 811 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
c517b3aa
DW
812 xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
813 xchk_xref_is_not_shared(sc, agbno, 1);
52dc4b44
DW
814
815 /* scrub teardown will take care of sc->sa for us */
166d7641
DW
816}
817
a12890ae
DW
818/* Scrub the AGI. */
819int
c517b3aa 820xchk_agi(
1d8a748a 821 struct xfs_scrub *sc)
a12890ae
DW
822{
823 struct xfs_mount *mp = sc->mp;
824 struct xfs_agi *agi;
825 xfs_agnumber_t agno;
826 xfs_agblock_t agbno;
827 xfs_agblock_t eoag;
828 xfs_agino_t agino;
829 xfs_agino_t first_agino;
830 xfs_agino_t last_agino;
831 xfs_agino_t icount;
832 int i;
833 int level;
834 int error = 0;
835
836 agno = sc->sa.agno = sc->sm->sm_agno;
c517b3aa 837 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
a12890ae 838 &sc->sa.agf_bp, &sc->sa.agfl_bp);
c517b3aa 839 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
a12890ae 840 goto out;
c517b3aa 841 xchk_buffer_recheck(sc, sc->sa.agi_bp);
a12890ae
DW
842
843 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
844
845 /* Check the AG length */
846 eoag = be32_to_cpu(agi->agi_length);
847 if (eoag != xfs_ag_block_count(mp, agno))
c517b3aa 848 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
849
850 /* Check btree roots and levels */
851 agbno = be32_to_cpu(agi->agi_root);
852 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 853 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
854
855 level = be32_to_cpu(agi->agi_level);
856 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 857 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
858
859 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
860 agbno = be32_to_cpu(agi->agi_free_root);
861 if (!xfs_verify_agbno(mp, agno, agbno))
c517b3aa 862 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
863
864 level = be32_to_cpu(agi->agi_free_level);
865 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
c517b3aa 866 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
867 }
868
869 /* Check inode counters */
86210fbe 870 xfs_agino_range(mp, agno, &first_agino, &last_agino);
a12890ae
DW
871 icount = be32_to_cpu(agi->agi_count);
872 if (icount > last_agino - first_agino + 1 ||
873 icount < be32_to_cpu(agi->agi_freecount))
c517b3aa 874 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
875
876 /* Check inode pointers */
877 agino = be32_to_cpu(agi->agi_newino);
878 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
c517b3aa 879 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
880
881 agino = be32_to_cpu(agi->agi_dirino);
882 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
c517b3aa 883 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
884
885 /* Check unlinked inode buckets */
886 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
887 agino = be32_to_cpu(agi->agi_unlinked[i]);
888 if (agino == NULLAGINO)
889 continue;
890 if (!xfs_verify_agino(mp, agno, agino))
c517b3aa 891 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae
DW
892 }
893
894 if (agi->agi_pad32 != cpu_to_be32(0))
c517b3aa 895 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
a12890ae 896
c517b3aa 897 xchk_agi_xref(sc);
a12890ae
DW
898out:
899 return error;
900}