]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
21fb4cb1 DW |
2 | /* |
3 | * Copyright (C) 2017 Oracle. All Rights Reserved. | |
21fb4cb1 | 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
21fb4cb1 DW |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_shared.h" | |
9 | #include "xfs_format.h" | |
10 | #include "xfs_trans_resv.h" | |
11 | #include "xfs_mount.h" | |
21fb4cb1 | 12 | #include "xfs_btree.h" |
21fb4cb1 | 13 | #include "xfs_sb.h" |
ab9d5dc5 | 14 | #include "xfs_alloc.h" |
a12890ae | 15 | #include "xfs_ialloc.h" |
d852657c | 16 | #include "xfs_rmap.h" |
21fb4cb1 DW |
17 | #include "scrub/scrub.h" |
18 | #include "scrub/common.h" | |
21fb4cb1 | 19 | |
21fb4cb1 DW |
20 | /* Superblock */ |
21 | ||
166d7641 DW |
22 | /* Cross-reference with the other btrees. */ |
23 | STATIC void | |
c517b3aa | 24 | xchk_superblock_xref( |
1d8a748a | 25 | struct xfs_scrub *sc, |
032d91f9 | 26 | struct xfs_buf *bp) |
166d7641 | 27 | { |
032d91f9 DW |
28 | struct xfs_mount *mp = sc->mp; |
29 | xfs_agnumber_t agno = sc->sm->sm_agno; | |
30 | xfs_agblock_t agbno; | |
31 | int error; | |
52dc4b44 | 32 | |
166d7641 DW |
33 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
34 | return; | |
52dc4b44 DW |
35 | |
36 | agbno = XFS_SB_BLOCK(mp); | |
37 | ||
c517b3aa DW |
38 | error = xchk_ag_init(sc, agno, &sc->sa); |
39 | if (!xchk_xref_process_error(sc, agno, agbno, &error)) | |
52dc4b44 DW |
40 | return; |
41 | ||
c517b3aa DW |
42 | xchk_xref_is_used_space(sc, agbno, 1); |
43 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 44 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 45 | xchk_xref_is_not_shared(sc, agbno, 1); |
52dc4b44 DW |
46 | |
47 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
48 | } |
49 | ||
21fb4cb1 DW |
50 | /* |
51 | * Scrub the filesystem superblock. | |
52 | * | |
53 | * Note: We do /not/ attempt to check AG 0's superblock. Mount is | |
54 | * responsible for validating all the geometry information in sb 0, so | |
55 | * if the filesystem is capable of initiating online scrub, then clearly | |
56 | * sb 0 is ok and we can use its information to check everything else. | |
57 | */ | |
58 | int | |
c517b3aa | 59 | xchk_superblock( |
1d8a748a | 60 | struct xfs_scrub *sc) |
21fb4cb1 | 61 | { |
032d91f9 DW |
62 | struct xfs_mount *mp = sc->mp; |
63 | struct xfs_buf *bp; | |
64 | struct xfs_dsb *sb; | |
65 | xfs_agnumber_t agno; | |
66 | uint32_t v2_ok; | |
67 | __be32 features_mask; | |
68 | int error; | |
69 | __be16 vernum_mask; | |
21fb4cb1 DW |
70 | |
71 | agno = sc->sm->sm_agno; | |
72 | if (agno == 0) | |
73 | return 0; | |
74 | ||
689e11c8 | 75 | error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); |
e5b37faa DW |
76 | /* |
77 | * The superblock verifier can return several different error codes | |
78 | * if it thinks the superblock doesn't look right. For a mount these | |
79 | * would all get bounced back to userspace, but if we're here then the | |
80 | * fs mounted successfully, which means that this secondary superblock | |
81 | * is simply incorrect. Treat all these codes the same way we treat | |
82 | * any corruption. | |
83 | */ | |
84 | switch (error) { | |
85 | case -EINVAL: /* also -EWRONGFS */ | |
86 | case -ENOSYS: | |
87 | case -EFBIG: | |
88 | error = -EFSCORRUPTED; | |
89 | default: | |
90 | break; | |
91 | } | |
c517b3aa | 92 | if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) |
21fb4cb1 DW |
93 | return error; |
94 | ||
95 | sb = XFS_BUF_TO_SBP(bp); | |
96 | ||
97 | /* | |
98 | * Verify the geometries match. Fields that are permanently | |
99 | * set by mkfs are checked; fields that can be updated later | |
100 | * (and are not propagated to backup superblocks) are preen | |
101 | * checked. | |
102 | */ | |
103 | if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) | |
c517b3aa | 104 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
105 | |
106 | if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) | |
c517b3aa | 107 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
108 | |
109 | if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) | |
c517b3aa | 110 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
111 | |
112 | if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) | |
c517b3aa | 113 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
114 | |
115 | if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) | |
c517b3aa | 116 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
117 | |
118 | if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) | |
c517b3aa | 119 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
120 | |
121 | if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) | |
c517b3aa | 122 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
123 | |
124 | if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) | |
c517b3aa | 125 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
126 | |
127 | if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) | |
c517b3aa | 128 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
129 | |
130 | if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) | |
c517b3aa | 131 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
132 | |
133 | if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) | |
c517b3aa | 134 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
135 | |
136 | if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) | |
c517b3aa | 137 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
138 | |
139 | if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) | |
c517b3aa | 140 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
141 | |
142 | if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) | |
c517b3aa | 143 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
144 | |
145 | /* Check sb_versionnum bits that are set at mkfs time. */ | |
146 | vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | | |
147 | XFS_SB_VERSION_NUMBITS | | |
148 | XFS_SB_VERSION_ALIGNBIT | | |
149 | XFS_SB_VERSION_DALIGNBIT | | |
150 | XFS_SB_VERSION_SHAREDBIT | | |
151 | XFS_SB_VERSION_LOGV2BIT | | |
152 | XFS_SB_VERSION_SECTORBIT | | |
153 | XFS_SB_VERSION_EXTFLGBIT | | |
154 | XFS_SB_VERSION_DIRV2BIT); | |
155 | if ((sb->sb_versionnum & vernum_mask) != | |
156 | (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) | |
c517b3aa | 157 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
158 | |
159 | /* Check sb_versionnum bits that can be set after mkfs time. */ | |
160 | vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | | |
161 | XFS_SB_VERSION_NLINKBIT | | |
162 | XFS_SB_VERSION_QUOTABIT); | |
163 | if ((sb->sb_versionnum & vernum_mask) != | |
164 | (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) | |
c517b3aa | 165 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
166 | |
167 | if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) | |
c517b3aa | 168 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
169 | |
170 | if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) | |
c517b3aa | 171 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
172 | |
173 | if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) | |
c517b3aa | 174 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
175 | |
176 | if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) | |
c517b3aa | 177 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
178 | |
179 | if (sb->sb_blocklog != mp->m_sb.sb_blocklog) | |
c517b3aa | 180 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
181 | |
182 | if (sb->sb_sectlog != mp->m_sb.sb_sectlog) | |
c517b3aa | 183 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
184 | |
185 | if (sb->sb_inodelog != mp->m_sb.sb_inodelog) | |
c517b3aa | 186 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
187 | |
188 | if (sb->sb_inopblog != mp->m_sb.sb_inopblog) | |
c517b3aa | 189 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
190 | |
191 | if (sb->sb_agblklog != mp->m_sb.sb_agblklog) | |
c517b3aa | 192 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
193 | |
194 | if (sb->sb_rextslog != mp->m_sb.sb_rextslog) | |
c517b3aa | 195 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
196 | |
197 | if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) | |
c517b3aa | 198 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
199 | |
200 | /* | |
201 | * Skip the summary counters since we track them in memory anyway. | |
202 | * sb_icount, sb_ifree, sb_fdblocks, sb_frexents | |
203 | */ | |
204 | ||
205 | if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) | |
c517b3aa | 206 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
207 | |
208 | if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) | |
c517b3aa | 209 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
210 | |
211 | /* | |
212 | * Skip the quota flags since repair will force quotacheck. | |
213 | * sb_qflags | |
214 | */ | |
215 | ||
216 | if (sb->sb_flags != mp->m_sb.sb_flags) | |
c517b3aa | 217 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
218 | |
219 | if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) | |
c517b3aa | 220 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
221 | |
222 | if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) | |
c517b3aa | 223 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
224 | |
225 | if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) | |
c517b3aa | 226 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
227 | |
228 | if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) | |
c517b3aa | 229 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
230 | |
231 | if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) | |
c517b3aa | 232 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
233 | |
234 | if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) | |
c517b3aa | 235 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
236 | |
237 | if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) | |
c517b3aa | 238 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
239 | |
240 | if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) | |
c517b3aa | 241 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
242 | |
243 | /* Do we see any invalid bits in sb_features2? */ | |
244 | if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { | |
245 | if (sb->sb_features2 != 0) | |
c517b3aa | 246 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
247 | } else { |
248 | v2_ok = XFS_SB_VERSION2_OKBITS; | |
249 | if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5) | |
250 | v2_ok |= XFS_SB_VERSION2_CRCBIT; | |
251 | ||
252 | if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) | |
c517b3aa | 253 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
254 | |
255 | if (sb->sb_features2 != sb->sb_bad_features2) | |
c517b3aa | 256 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
257 | } |
258 | ||
259 | /* Check sb_features2 flags that are set at mkfs time. */ | |
260 | features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | | |
261 | XFS_SB_VERSION2_PROJID32BIT | | |
262 | XFS_SB_VERSION2_CRCBIT | | |
263 | XFS_SB_VERSION2_FTYPE); | |
264 | if ((sb->sb_features2 & features_mask) != | |
265 | (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) | |
c517b3aa | 266 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
267 | |
268 | /* Check sb_features2 flags that can be set after mkfs time. */ | |
269 | features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); | |
270 | if ((sb->sb_features2 & features_mask) != | |
271 | (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) | |
c517b3aa | 272 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
273 | |
274 | if (!xfs_sb_version_hascrc(&mp->m_sb)) { | |
275 | /* all v5 fields must be zero */ | |
276 | if (memchr_inv(&sb->sb_features_compat, 0, | |
277 | sizeof(struct xfs_dsb) - | |
278 | offsetof(struct xfs_dsb, sb_features_compat))) | |
c517b3aa | 279 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
280 | } else { |
281 | /* Check compat flags; all are set at mkfs time. */ | |
282 | features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN); | |
283 | if ((sb->sb_features_compat & features_mask) != | |
284 | (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask)) | |
c517b3aa | 285 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
286 | |
287 | /* Check ro compat flags; all are set at mkfs time. */ | |
288 | features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN | | |
289 | XFS_SB_FEAT_RO_COMPAT_FINOBT | | |
290 | XFS_SB_FEAT_RO_COMPAT_RMAPBT | | |
291 | XFS_SB_FEAT_RO_COMPAT_REFLINK); | |
292 | if ((sb->sb_features_ro_compat & features_mask) != | |
293 | (cpu_to_be32(mp->m_sb.sb_features_ro_compat) & | |
294 | features_mask)) | |
c517b3aa | 295 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
296 | |
297 | /* Check incompat flags; all are set at mkfs time. */ | |
298 | features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN | | |
299 | XFS_SB_FEAT_INCOMPAT_FTYPE | | |
300 | XFS_SB_FEAT_INCOMPAT_SPINODES | | |
301 | XFS_SB_FEAT_INCOMPAT_META_UUID); | |
302 | if ((sb->sb_features_incompat & features_mask) != | |
303 | (cpu_to_be32(mp->m_sb.sb_features_incompat) & | |
304 | features_mask)) | |
c517b3aa | 305 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
306 | |
307 | /* Check log incompat flags; all are set at mkfs time. */ | |
308 | features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN); | |
309 | if ((sb->sb_features_log_incompat & features_mask) != | |
310 | (cpu_to_be32(mp->m_sb.sb_features_log_incompat) & | |
311 | features_mask)) | |
c517b3aa | 312 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
313 | |
314 | /* Don't care about sb_crc */ | |
315 | ||
316 | if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) | |
c517b3aa | 317 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
318 | |
319 | if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) | |
c517b3aa | 320 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
321 | |
322 | /* Don't care about sb_lsn */ | |
323 | } | |
324 | ||
325 | if (xfs_sb_version_hasmetauuid(&mp->m_sb)) { | |
326 | /* The metadata UUID must be the same for all supers */ | |
327 | if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) | |
c517b3aa | 328 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
329 | } |
330 | ||
331 | /* Everything else must be zero. */ | |
332 | if (memchr_inv(sb + 1, 0, | |
333 | BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) | |
c517b3aa | 334 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 335 | |
c517b3aa | 336 | xchk_superblock_xref(sc, bp); |
166d7641 | 337 | |
21fb4cb1 DW |
338 | return error; |
339 | } | |
ab9d5dc5 DW |
340 | |
341 | /* AGF */ | |
342 | ||
52dc4b44 DW |
343 | /* Tally freespace record lengths. */ |
344 | STATIC int | |
c517b3aa | 345 | xchk_agf_record_bno_lengths( |
52dc4b44 DW |
346 | struct xfs_btree_cur *cur, |
347 | struct xfs_alloc_rec_incore *rec, | |
348 | void *priv) | |
349 | { | |
350 | xfs_extlen_t *blocks = priv; | |
351 | ||
352 | (*blocks) += rec->ar_blockcount; | |
353 | return 0; | |
354 | } | |
355 | ||
356 | /* Check agf_freeblks */ | |
357 | static inline void | |
c517b3aa | 358 | xchk_agf_xref_freeblks( |
1d8a748a | 359 | struct xfs_scrub *sc) |
52dc4b44 | 360 | { |
032d91f9 DW |
361 | struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); |
362 | xfs_extlen_t blocks = 0; | |
363 | int error; | |
52dc4b44 DW |
364 | |
365 | if (!sc->sa.bno_cur) | |
366 | return; | |
367 | ||
368 | error = xfs_alloc_query_all(sc->sa.bno_cur, | |
c517b3aa DW |
369 | xchk_agf_record_bno_lengths, &blocks); |
370 | if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) | |
52dc4b44 DW |
371 | return; |
372 | if (blocks != be32_to_cpu(agf->agf_freeblks)) | |
c517b3aa | 373 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
52dc4b44 DW |
374 | } |
375 | ||
e1134b12 DW |
376 | /* Cross reference the AGF with the cntbt (freespace by length btree) */ |
377 | static inline void | |
c517b3aa | 378 | xchk_agf_xref_cntbt( |
1d8a748a | 379 | struct xfs_scrub *sc) |
e1134b12 | 380 | { |
032d91f9 DW |
381 | struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); |
382 | xfs_agblock_t agbno; | |
383 | xfs_extlen_t blocks; | |
384 | int have; | |
385 | int error; | |
e1134b12 DW |
386 | |
387 | if (!sc->sa.cnt_cur) | |
388 | return; | |
389 | ||
390 | /* Any freespace at all? */ | |
391 | error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); | |
c517b3aa | 392 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
e1134b12 DW |
393 | return; |
394 | if (!have) { | |
3d129e1b | 395 | if (agf->agf_freeblks != cpu_to_be32(0)) |
c517b3aa | 396 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
e1134b12 DW |
397 | return; |
398 | } | |
399 | ||
400 | /* Check agf_longest */ | |
401 | error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); | |
c517b3aa | 402 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
e1134b12 DW |
403 | return; |
404 | if (!have || blocks != be32_to_cpu(agf->agf_longest)) | |
c517b3aa | 405 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
e1134b12 DW |
406 | } |
407 | ||
d852657c DW |
408 | /* Check the btree block counts in the AGF against the btrees. */ |
409 | STATIC void | |
c517b3aa | 410 | xchk_agf_xref_btreeblks( |
1d8a748a | 411 | struct xfs_scrub *sc) |
d852657c | 412 | { |
032d91f9 DW |
413 | struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); |
414 | struct xfs_mount *mp = sc->mp; | |
415 | xfs_agblock_t blocks; | |
416 | xfs_agblock_t btreeblks; | |
417 | int error; | |
d852657c DW |
418 | |
419 | /* Check agf_rmap_blocks; set up for agf_btreeblks check */ | |
420 | if (sc->sa.rmap_cur) { | |
421 | error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); | |
c517b3aa | 422 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
d852657c DW |
423 | return; |
424 | btreeblks = blocks - 1; | |
425 | if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) | |
c517b3aa | 426 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
d852657c DW |
427 | } else { |
428 | btreeblks = 0; | |
429 | } | |
430 | ||
431 | /* | |
432 | * No rmap cursor; we can't xref if we have the rmapbt feature. | |
433 | * We also can't do it if we're missing the free space btree cursors. | |
434 | */ | |
435 | if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) || | |
436 | !sc->sa.bno_cur || !sc->sa.cnt_cur) | |
437 | return; | |
438 | ||
439 | /* Check agf_btreeblks */ | |
440 | error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); | |
c517b3aa | 441 | if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) |
d852657c DW |
442 | return; |
443 | btreeblks += blocks - 1; | |
444 | ||
445 | error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); | |
c517b3aa | 446 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
d852657c DW |
447 | return; |
448 | btreeblks += blocks - 1; | |
449 | ||
450 | if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) | |
c517b3aa | 451 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
d852657c DW |
452 | } |
453 | ||
f6d5fc21 DW |
454 | /* Check agf_refcount_blocks against tree size */ |
455 | static inline void | |
c517b3aa | 456 | xchk_agf_xref_refcblks( |
1d8a748a | 457 | struct xfs_scrub *sc) |
f6d5fc21 | 458 | { |
032d91f9 DW |
459 | struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); |
460 | xfs_agblock_t blocks; | |
461 | int error; | |
f6d5fc21 DW |
462 | |
463 | if (!sc->sa.refc_cur) | |
464 | return; | |
465 | ||
466 | error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); | |
c517b3aa | 467 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
f6d5fc21 DW |
468 | return; |
469 | if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) | |
c517b3aa | 470 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
f6d5fc21 DW |
471 | } |
472 | ||
166d7641 DW |
473 | /* Cross-reference with the other btrees. */ |
474 | STATIC void | |
c517b3aa | 475 | xchk_agf_xref( |
1d8a748a | 476 | struct xfs_scrub *sc) |
166d7641 | 477 | { |
032d91f9 DW |
478 | struct xfs_mount *mp = sc->mp; |
479 | xfs_agblock_t agbno; | |
480 | int error; | |
52dc4b44 | 481 | |
166d7641 DW |
482 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
483 | return; | |
52dc4b44 DW |
484 | |
485 | agbno = XFS_AGF_BLOCK(mp); | |
486 | ||
c517b3aa | 487 | error = xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 DW |
488 | if (error) |
489 | return; | |
490 | ||
c517b3aa DW |
491 | xchk_xref_is_used_space(sc, agbno, 1); |
492 | xchk_agf_xref_freeblks(sc); | |
493 | xchk_agf_xref_cntbt(sc); | |
494 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 495 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa DW |
496 | xchk_agf_xref_btreeblks(sc); |
497 | xchk_xref_is_not_shared(sc, agbno, 1); | |
498 | xchk_agf_xref_refcblks(sc); | |
52dc4b44 DW |
499 | |
500 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
501 | } |
502 | ||
ab9d5dc5 DW |
503 | /* Scrub the AGF. */ |
504 | int | |
c517b3aa | 505 | xchk_agf( |
1d8a748a | 506 | struct xfs_scrub *sc) |
ab9d5dc5 | 507 | { |
032d91f9 DW |
508 | struct xfs_mount *mp = sc->mp; |
509 | struct xfs_agf *agf; | |
47cd97b5 | 510 | struct xfs_perag *pag; |
032d91f9 DW |
511 | xfs_agnumber_t agno; |
512 | xfs_agblock_t agbno; | |
513 | xfs_agblock_t eoag; | |
514 | xfs_agblock_t agfl_first; | |
515 | xfs_agblock_t agfl_last; | |
516 | xfs_agblock_t agfl_count; | |
517 | xfs_agblock_t fl_count; | |
518 | int level; | |
519 | int error = 0; | |
ab9d5dc5 DW |
520 | |
521 | agno = sc->sa.agno = sc->sm->sm_agno; | |
c517b3aa | 522 | error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, |
ab9d5dc5 | 523 | &sc->sa.agf_bp, &sc->sa.agfl_bp); |
c517b3aa | 524 | if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) |
ab9d5dc5 | 525 | goto out; |
c517b3aa | 526 | xchk_buffer_recheck(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
527 | |
528 | agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); | |
529 | ||
530 | /* Check the AG length */ | |
531 | eoag = be32_to_cpu(agf->agf_length); | |
532 | if (eoag != xfs_ag_block_count(mp, agno)) | |
c517b3aa | 533 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
534 | |
535 | /* Check the AGF btree roots and levels */ | |
536 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); | |
537 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 538 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
539 | |
540 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); | |
541 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 542 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
543 | |
544 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); | |
545 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 546 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
547 | |
548 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); | |
549 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 550 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
551 | |
552 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { | |
553 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); | |
554 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 555 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
556 | |
557 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); | |
558 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 559 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
560 | } |
561 | ||
562 | if (xfs_sb_version_hasreflink(&mp->m_sb)) { | |
563 | agbno = be32_to_cpu(agf->agf_refcount_root); | |
564 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 565 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
566 | |
567 | level = be32_to_cpu(agf->agf_refcount_level); | |
568 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 569 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
570 | } |
571 | ||
572 | /* Check the AGFL counters */ | |
573 | agfl_first = be32_to_cpu(agf->agf_flfirst); | |
574 | agfl_last = be32_to_cpu(agf->agf_fllast); | |
575 | agfl_count = be32_to_cpu(agf->agf_flcount); | |
576 | if (agfl_last > agfl_first) | |
577 | fl_count = agfl_last - agfl_first + 1; | |
578 | else | |
a78ee256 | 579 | fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; |
ab9d5dc5 | 580 | if (agfl_count != 0 && fl_count != agfl_count) |
c517b3aa | 581 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 | 582 | |
47cd97b5 DW |
583 | /* Do the incore counters match? */ |
584 | pag = xfs_perag_get(mp, agno); | |
585 | if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) | |
586 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); | |
587 | if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) | |
588 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); | |
589 | if (pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) | |
590 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); | |
591 | xfs_perag_put(pag); | |
592 | ||
c517b3aa | 593 | xchk_agf_xref(sc); |
ab9d5dc5 DW |
594 | out: |
595 | return error; | |
596 | } | |
597 | ||
598 | /* AGFL */ | |
599 | ||
c517b3aa | 600 | struct xchk_agfl_info { |
032d91f9 DW |
601 | unsigned int sz_entries; |
602 | unsigned int nr_entries; | |
603 | xfs_agblock_t *entries; | |
1d8a748a | 604 | struct xfs_scrub *sc; |
d44b47fd DW |
605 | }; |
606 | ||
166d7641 DW |
607 | /* Cross-reference with the other btrees. */ |
608 | STATIC void | |
c517b3aa | 609 | xchk_agfl_block_xref( |
1d8a748a | 610 | struct xfs_scrub *sc, |
7280feda | 611 | xfs_agblock_t agbno) |
166d7641 DW |
612 | { |
613 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) | |
614 | return; | |
52dc4b44 | 615 | |
c517b3aa DW |
616 | xchk_xref_is_used_space(sc, agbno, 1); |
617 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 618 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); |
c517b3aa | 619 | xchk_xref_is_not_shared(sc, agbno, 1); |
166d7641 DW |
620 | } |
621 | ||
ab9d5dc5 DW |
622 | /* Scrub an AGFL block. */ |
623 | STATIC int | |
c517b3aa | 624 | xchk_agfl_block( |
032d91f9 DW |
625 | struct xfs_mount *mp, |
626 | xfs_agblock_t agbno, | |
627 | void *priv) | |
ab9d5dc5 | 628 | { |
032d91f9 | 629 | struct xchk_agfl_info *sai = priv; |
1d8a748a | 630 | struct xfs_scrub *sc = sai->sc; |
032d91f9 | 631 | xfs_agnumber_t agno = sc->sa.agno; |
ab9d5dc5 | 632 | |
d44b47fd DW |
633 | if (xfs_verify_agbno(mp, agno, agbno) && |
634 | sai->nr_entries < sai->sz_entries) | |
635 | sai->entries[sai->nr_entries++] = agbno; | |
636 | else | |
c517b3aa | 637 | xchk_block_set_corrupt(sc, sc->sa.agfl_bp); |
ab9d5dc5 | 638 | |
7280feda | 639 | xchk_agfl_block_xref(sc, agbno); |
166d7641 | 640 | |
9f3a080e | 641 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
5bb46e3e | 642 | return XFS_ITER_ABORT; |
9f3a080e | 643 | |
ab9d5dc5 DW |
644 | return 0; |
645 | } | |
646 | ||
d44b47fd | 647 | static int |
c517b3aa | 648 | xchk_agblock_cmp( |
d44b47fd DW |
649 | const void *pa, |
650 | const void *pb) | |
651 | { | |
652 | const xfs_agblock_t *a = pa; | |
653 | const xfs_agblock_t *b = pb; | |
654 | ||
655 | return (int)*a - (int)*b; | |
656 | } | |
657 | ||
166d7641 DW |
658 | /* Cross-reference with the other btrees. */ |
659 | STATIC void | |
c517b3aa | 660 | xchk_agfl_xref( |
1d8a748a | 661 | struct xfs_scrub *sc) |
166d7641 | 662 | { |
032d91f9 DW |
663 | struct xfs_mount *mp = sc->mp; |
664 | xfs_agblock_t agbno; | |
665 | int error; | |
52dc4b44 | 666 | |
166d7641 DW |
667 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
668 | return; | |
52dc4b44 DW |
669 | |
670 | agbno = XFS_AGFL_BLOCK(mp); | |
671 | ||
c517b3aa | 672 | error = xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 DW |
673 | if (error) |
674 | return; | |
675 | ||
c517b3aa DW |
676 | xchk_xref_is_used_space(sc, agbno, 1); |
677 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 678 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 679 | xchk_xref_is_not_shared(sc, agbno, 1); |
52dc4b44 DW |
680 | |
681 | /* | |
682 | * Scrub teardown will take care of sc->sa for us. Leave sc->sa | |
683 | * active so that the agfl block xref can use it too. | |
684 | */ | |
166d7641 DW |
685 | } |
686 | ||
ab9d5dc5 DW |
687 | /* Scrub the AGFL. */ |
688 | int | |
c517b3aa | 689 | xchk_agfl( |
1d8a748a | 690 | struct xfs_scrub *sc) |
ab9d5dc5 | 691 | { |
032d91f9 DW |
692 | struct xchk_agfl_info sai; |
693 | struct xfs_agf *agf; | |
694 | xfs_agnumber_t agno; | |
695 | unsigned int agflcount; | |
696 | unsigned int i; | |
697 | int error; | |
ab9d5dc5 DW |
698 | |
699 | agno = sc->sa.agno = sc->sm->sm_agno; | |
c517b3aa | 700 | error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, |
ab9d5dc5 | 701 | &sc->sa.agf_bp, &sc->sa.agfl_bp); |
c517b3aa | 702 | if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) |
ab9d5dc5 DW |
703 | goto out; |
704 | if (!sc->sa.agf_bp) | |
705 | return -EFSCORRUPTED; | |
c517b3aa | 706 | xchk_buffer_recheck(sc, sc->sa.agfl_bp); |
ab9d5dc5 | 707 | |
c517b3aa | 708 | xchk_agfl_xref(sc); |
166d7641 DW |
709 | |
710 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) | |
711 | goto out; | |
712 | ||
d44b47fd DW |
713 | /* Allocate buffer to ensure uniqueness of AGFL entries. */ |
714 | agf = XFS_BUF_TO_AGF(sc->sa.agf_bp); | |
715 | agflcount = be32_to_cpu(agf->agf_flcount); | |
a78ee256 | 716 | if (agflcount > xfs_agfl_size(sc->mp)) { |
c517b3aa | 717 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
718 | goto out; |
719 | } | |
86516eff | 720 | memset(&sai, 0, sizeof(sai)); |
9f3a080e | 721 | sai.sc = sc; |
d44b47fd | 722 | sai.sz_entries = agflcount; |
631fc955 DW |
723 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, |
724 | KM_MAYFAIL); | |
d44b47fd DW |
725 | if (!sai.entries) { |
726 | error = -ENOMEM; | |
727 | goto out; | |
728 | } | |
729 | ||
ab9d5dc5 | 730 | /* Check the blocks in the AGFL. */ |
9f3a080e | 731 | error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp), |
c517b3aa | 732 | sc->sa.agfl_bp, xchk_agfl_block, &sai); |
5bb46e3e | 733 | if (error == XFS_ITER_ABORT) { |
9f3a080e DW |
734 | error = 0; |
735 | goto out_free; | |
736 | } | |
d44b47fd DW |
737 | if (error) |
738 | goto out_free; | |
739 | ||
740 | if (agflcount != sai.nr_entries) { | |
c517b3aa | 741 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
742 | goto out_free; |
743 | } | |
744 | ||
745 | /* Sort entries, check for duplicates. */ | |
746 | sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), | |
c517b3aa | 747 | xchk_agblock_cmp, NULL); |
d44b47fd DW |
748 | for (i = 1; i < sai.nr_entries; i++) { |
749 | if (sai.entries[i] == sai.entries[i - 1]) { | |
c517b3aa | 750 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
751 | break; |
752 | } | |
753 | } | |
754 | ||
755 | out_free: | |
756 | kmem_free(sai.entries); | |
ab9d5dc5 DW |
757 | out: |
758 | return error; | |
759 | } | |
a12890ae DW |
760 | |
761 | /* AGI */ | |
762 | ||
2e6f2756 DW |
763 | /* Check agi_count/agi_freecount */ |
764 | static inline void | |
c517b3aa | 765 | xchk_agi_xref_icounts( |
1d8a748a | 766 | struct xfs_scrub *sc) |
2e6f2756 | 767 | { |
032d91f9 DW |
768 | struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); |
769 | xfs_agino_t icount; | |
770 | xfs_agino_t freecount; | |
771 | int error; | |
2e6f2756 DW |
772 | |
773 | if (!sc->sa.ino_cur) | |
774 | return; | |
775 | ||
776 | error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); | |
c517b3aa | 777 | if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) |
2e6f2756 DW |
778 | return; |
779 | if (be32_to_cpu(agi->agi_count) != icount || | |
780 | be32_to_cpu(agi->agi_freecount) != freecount) | |
c517b3aa | 781 | xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); |
2e6f2756 DW |
782 | } |
783 | ||
166d7641 DW |
784 | /* Cross-reference with the other btrees. */ |
785 | STATIC void | |
c517b3aa | 786 | xchk_agi_xref( |
1d8a748a | 787 | struct xfs_scrub *sc) |
166d7641 | 788 | { |
032d91f9 DW |
789 | struct xfs_mount *mp = sc->mp; |
790 | xfs_agblock_t agbno; | |
791 | int error; | |
52dc4b44 | 792 | |
166d7641 DW |
793 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
794 | return; | |
52dc4b44 DW |
795 | |
796 | agbno = XFS_AGI_BLOCK(mp); | |
797 | ||
c517b3aa | 798 | error = xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 DW |
799 | if (error) |
800 | return; | |
801 | ||
c517b3aa DW |
802 | xchk_xref_is_used_space(sc, agbno, 1); |
803 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
804 | xchk_agi_xref_icounts(sc); | |
7280feda | 805 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 806 | xchk_xref_is_not_shared(sc, agbno, 1); |
52dc4b44 DW |
807 | |
808 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
809 | } |
810 | ||
a12890ae DW |
811 | /* Scrub the AGI. */ |
812 | int | |
c517b3aa | 813 | xchk_agi( |
1d8a748a | 814 | struct xfs_scrub *sc) |
a12890ae | 815 | { |
032d91f9 DW |
816 | struct xfs_mount *mp = sc->mp; |
817 | struct xfs_agi *agi; | |
47cd97b5 | 818 | struct xfs_perag *pag; |
032d91f9 DW |
819 | xfs_agnumber_t agno; |
820 | xfs_agblock_t agbno; | |
821 | xfs_agblock_t eoag; | |
822 | xfs_agino_t agino; | |
823 | xfs_agino_t first_agino; | |
824 | xfs_agino_t last_agino; | |
825 | xfs_agino_t icount; | |
826 | int i; | |
827 | int level; | |
828 | int error = 0; | |
a12890ae DW |
829 | |
830 | agno = sc->sa.agno = sc->sm->sm_agno; | |
c517b3aa | 831 | error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp, |
a12890ae | 832 | &sc->sa.agf_bp, &sc->sa.agfl_bp); |
c517b3aa | 833 | if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) |
a12890ae | 834 | goto out; |
c517b3aa | 835 | xchk_buffer_recheck(sc, sc->sa.agi_bp); |
a12890ae DW |
836 | |
837 | agi = XFS_BUF_TO_AGI(sc->sa.agi_bp); | |
838 | ||
839 | /* Check the AG length */ | |
840 | eoag = be32_to_cpu(agi->agi_length); | |
841 | if (eoag != xfs_ag_block_count(mp, agno)) | |
c517b3aa | 842 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
843 | |
844 | /* Check btree roots and levels */ | |
845 | agbno = be32_to_cpu(agi->agi_root); | |
846 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 847 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
848 | |
849 | level = be32_to_cpu(agi->agi_level); | |
850 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 851 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
852 | |
853 | if (xfs_sb_version_hasfinobt(&mp->m_sb)) { | |
854 | agbno = be32_to_cpu(agi->agi_free_root); | |
855 | if (!xfs_verify_agbno(mp, agno, agbno)) | |
c517b3aa | 856 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
857 | |
858 | level = be32_to_cpu(agi->agi_free_level); | |
859 | if (level <= 0 || level > XFS_BTREE_MAXLEVELS) | |
c517b3aa | 860 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
861 | } |
862 | ||
863 | /* Check inode counters */ | |
86210fbe | 864 | xfs_agino_range(mp, agno, &first_agino, &last_agino); |
a12890ae DW |
865 | icount = be32_to_cpu(agi->agi_count); |
866 | if (icount > last_agino - first_agino + 1 || | |
867 | icount < be32_to_cpu(agi->agi_freecount)) | |
c517b3aa | 868 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
869 | |
870 | /* Check inode pointers */ | |
871 | agino = be32_to_cpu(agi->agi_newino); | |
7d36c195 | 872 | if (!xfs_verify_agino_or_null(mp, agno, agino)) |
c517b3aa | 873 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
874 | |
875 | agino = be32_to_cpu(agi->agi_dirino); | |
7d36c195 | 876 | if (!xfs_verify_agino_or_null(mp, agno, agino)) |
c517b3aa | 877 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
878 | |
879 | /* Check unlinked inode buckets */ | |
880 | for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { | |
881 | agino = be32_to_cpu(agi->agi_unlinked[i]); | |
7d36c195 | 882 | if (!xfs_verify_agino_or_null(mp, agno, agino)) |
c517b3aa | 883 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
884 | } |
885 | ||
886 | if (agi->agi_pad32 != cpu_to_be32(0)) | |
c517b3aa | 887 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae | 888 | |
47cd97b5 DW |
889 | /* Do the incore counters match? */ |
890 | pag = xfs_perag_get(mp, agno); | |
891 | if (pag->pagi_count != be32_to_cpu(agi->agi_count)) | |
892 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); | |
893 | if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) | |
894 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); | |
895 | xfs_perag_put(pag); | |
896 | ||
c517b3aa | 897 | xchk_agi_xref(sc); |
a12890ae DW |
898 | out: |
899 | return error; | |
900 | } |