]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
21fb4cb1 DW |
2 | /* |
3 | * Copyright (C) 2017 Oracle. All Rights Reserved. | |
21fb4cb1 | 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
21fb4cb1 DW |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_shared.h" | |
9 | #include "xfs_format.h" | |
10 | #include "xfs_trans_resv.h" | |
11 | #include "xfs_mount.h" | |
21fb4cb1 | 12 | #include "xfs_btree.h" |
21fb4cb1 | 13 | #include "xfs_sb.h" |
ab9d5dc5 | 14 | #include "xfs_alloc.h" |
a12890ae | 15 | #include "xfs_ialloc.h" |
d852657c | 16 | #include "xfs_rmap.h" |
9bbafc71 | 17 | #include "xfs_ag.h" |
21fb4cb1 DW |
18 | #include "scrub/scrub.h" |
19 | #include "scrub/common.h" | |
21fb4cb1 | 20 | |
21fb4cb1 DW |
21 | /* Superblock */ |
22 | ||
166d7641 DW |
23 | /* Cross-reference with the other btrees. */ |
24 | STATIC void | |
c517b3aa | 25 | xchk_superblock_xref( |
1d8a748a | 26 | struct xfs_scrub *sc, |
032d91f9 | 27 | struct xfs_buf *bp) |
166d7641 | 28 | { |
032d91f9 DW |
29 | struct xfs_mount *mp = sc->mp; |
30 | xfs_agnumber_t agno = sc->sm->sm_agno; | |
31 | xfs_agblock_t agbno; | |
32 | int error; | |
52dc4b44 | 33 | |
166d7641 DW |
34 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
35 | return; | |
52dc4b44 DW |
36 | |
37 | agbno = XFS_SB_BLOCK(mp); | |
38 | ||
48c6615c | 39 | error = xchk_ag_init_existing(sc, agno, &sc->sa); |
c517b3aa | 40 | if (!xchk_xref_process_error(sc, agno, agbno, &error)) |
52dc4b44 DW |
41 | return; |
42 | ||
c517b3aa DW |
43 | xchk_xref_is_used_space(sc, agbno, 1); |
44 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 45 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 46 | xchk_xref_is_not_shared(sc, agbno, 1); |
52dc4b44 DW |
47 | |
48 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
49 | } |
50 | ||
21fb4cb1 DW |
51 | /* |
52 | * Scrub the filesystem superblock. | |
53 | * | |
54 | * Note: We do /not/ attempt to check AG 0's superblock. Mount is | |
55 | * responsible for validating all the geometry information in sb 0, so | |
56 | * if the filesystem is capable of initiating online scrub, then clearly | |
57 | * sb 0 is ok and we can use its information to check everything else. | |
58 | */ | |
59 | int | |
c517b3aa | 60 | xchk_superblock( |
1d8a748a | 61 | struct xfs_scrub *sc) |
21fb4cb1 | 62 | { |
032d91f9 DW |
63 | struct xfs_mount *mp = sc->mp; |
64 | struct xfs_buf *bp; | |
65 | struct xfs_dsb *sb; | |
48c6615c | 66 | struct xfs_perag *pag; |
032d91f9 DW |
67 | xfs_agnumber_t agno; |
68 | uint32_t v2_ok; | |
69 | __be32 features_mask; | |
70 | int error; | |
71 | __be16 vernum_mask; | |
21fb4cb1 DW |
72 | |
73 | agno = sc->sm->sm_agno; | |
74 | if (agno == 0) | |
75 | return 0; | |
76 | ||
48c6615c DW |
77 | /* |
78 | * Grab an active reference to the perag structure. If we can't get | |
79 | * it, we're racing with something that's tearing down the AG, so | |
80 | * signal that the AG no longer exists. | |
81 | */ | |
82 | pag = xfs_perag_get(mp, agno); | |
83 | if (!pag) | |
84 | return -ENOENT; | |
85 | ||
689e11c8 | 86 | error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp); |
e5b37faa DW |
87 | /* |
88 | * The superblock verifier can return several different error codes | |
89 | * if it thinks the superblock doesn't look right. For a mount these | |
90 | * would all get bounced back to userspace, but if we're here then the | |
91 | * fs mounted successfully, which means that this secondary superblock | |
92 | * is simply incorrect. Treat all these codes the same way we treat | |
93 | * any corruption. | |
94 | */ | |
95 | switch (error) { | |
96 | case -EINVAL: /* also -EWRONGFS */ | |
97 | case -ENOSYS: | |
98 | case -EFBIG: | |
99 | error = -EFSCORRUPTED; | |
53004ee7 | 100 | fallthrough; |
e5b37faa DW |
101 | default: |
102 | break; | |
103 | } | |
c517b3aa | 104 | if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error)) |
48c6615c | 105 | goto out_pag; |
21fb4cb1 | 106 | |
3e6e8afd | 107 | sb = bp->b_addr; |
21fb4cb1 DW |
108 | |
109 | /* | |
110 | * Verify the geometries match. Fields that are permanently | |
111 | * set by mkfs are checked; fields that can be updated later | |
112 | * (and are not propagated to backup superblocks) are preen | |
113 | * checked. | |
114 | */ | |
115 | if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize)) | |
c517b3aa | 116 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
117 | |
118 | if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks)) | |
c517b3aa | 119 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
120 | |
121 | if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks)) | |
c517b3aa | 122 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
123 | |
124 | if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents)) | |
c517b3aa | 125 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
126 | |
127 | if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid)) | |
c517b3aa | 128 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
129 | |
130 | if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart)) | |
c517b3aa | 131 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
132 | |
133 | if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino)) | |
c517b3aa | 134 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
135 | |
136 | if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino)) | |
c517b3aa | 137 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
138 | |
139 | if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino)) | |
c517b3aa | 140 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
141 | |
142 | if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize)) | |
c517b3aa | 143 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
144 | |
145 | if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks)) | |
c517b3aa | 146 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
147 | |
148 | if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount)) | |
c517b3aa | 149 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
150 | |
151 | if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks)) | |
c517b3aa | 152 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
153 | |
154 | if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks)) | |
c517b3aa | 155 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
156 | |
157 | /* Check sb_versionnum bits that are set at mkfs time. */ | |
158 | vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS | | |
159 | XFS_SB_VERSION_NUMBITS | | |
160 | XFS_SB_VERSION_ALIGNBIT | | |
161 | XFS_SB_VERSION_DALIGNBIT | | |
162 | XFS_SB_VERSION_SHAREDBIT | | |
163 | XFS_SB_VERSION_LOGV2BIT | | |
164 | XFS_SB_VERSION_SECTORBIT | | |
165 | XFS_SB_VERSION_EXTFLGBIT | | |
166 | XFS_SB_VERSION_DIRV2BIT); | |
167 | if ((sb->sb_versionnum & vernum_mask) != | |
168 | (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) | |
c517b3aa | 169 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
170 | |
171 | /* Check sb_versionnum bits that can be set after mkfs time. */ | |
172 | vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT | | |
173 | XFS_SB_VERSION_NLINKBIT | | |
174 | XFS_SB_VERSION_QUOTABIT); | |
175 | if ((sb->sb_versionnum & vernum_mask) != | |
176 | (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask)) | |
c517b3aa | 177 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
178 | |
179 | if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize)) | |
c517b3aa | 180 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
181 | |
182 | if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize)) | |
c517b3aa | 183 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
184 | |
185 | if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock)) | |
c517b3aa | 186 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
187 | |
188 | if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname))) | |
c517b3aa | 189 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
190 | |
191 | if (sb->sb_blocklog != mp->m_sb.sb_blocklog) | |
c517b3aa | 192 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
193 | |
194 | if (sb->sb_sectlog != mp->m_sb.sb_sectlog) | |
c517b3aa | 195 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
196 | |
197 | if (sb->sb_inodelog != mp->m_sb.sb_inodelog) | |
c517b3aa | 198 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
199 | |
200 | if (sb->sb_inopblog != mp->m_sb.sb_inopblog) | |
c517b3aa | 201 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
202 | |
203 | if (sb->sb_agblklog != mp->m_sb.sb_agblklog) | |
c517b3aa | 204 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
205 | |
206 | if (sb->sb_rextslog != mp->m_sb.sb_rextslog) | |
c517b3aa | 207 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
208 | |
209 | if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct) | |
c517b3aa | 210 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
211 | |
212 | /* | |
213 | * Skip the summary counters since we track them in memory anyway. | |
214 | * sb_icount, sb_ifree, sb_fdblocks, sb_frexents | |
215 | */ | |
216 | ||
217 | if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino)) | |
c517b3aa | 218 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
219 | |
220 | if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino)) | |
c517b3aa | 221 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
222 | |
223 | /* | |
224 | * Skip the quota flags since repair will force quotacheck. | |
225 | * sb_qflags | |
226 | */ | |
227 | ||
228 | if (sb->sb_flags != mp->m_sb.sb_flags) | |
c517b3aa | 229 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
230 | |
231 | if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn) | |
c517b3aa | 232 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
233 | |
234 | if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt)) | |
c517b3aa | 235 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
236 | |
237 | if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit)) | |
c517b3aa | 238 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
239 | |
240 | if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width)) | |
c517b3aa | 241 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
242 | |
243 | if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog) | |
c517b3aa | 244 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
245 | |
246 | if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog) | |
c517b3aa | 247 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
248 | |
249 | if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize)) | |
c517b3aa | 250 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
251 | |
252 | if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit)) | |
c517b3aa | 253 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
254 | |
255 | /* Do we see any invalid bits in sb_features2? */ | |
256 | if (!xfs_sb_version_hasmorebits(&mp->m_sb)) { | |
257 | if (sb->sb_features2 != 0) | |
c517b3aa | 258 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
259 | } else { |
260 | v2_ok = XFS_SB_VERSION2_OKBITS; | |
d6837c1a | 261 | if (xfs_sb_is_v5(&mp->m_sb)) |
21fb4cb1 DW |
262 | v2_ok |= XFS_SB_VERSION2_CRCBIT; |
263 | ||
264 | if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok))) | |
c517b3aa | 265 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
266 | |
267 | if (sb->sb_features2 != sb->sb_bad_features2) | |
c517b3aa | 268 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
269 | } |
270 | ||
271 | /* Check sb_features2 flags that are set at mkfs time. */ | |
272 | features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT | | |
273 | XFS_SB_VERSION2_PROJID32BIT | | |
274 | XFS_SB_VERSION2_CRCBIT | | |
275 | XFS_SB_VERSION2_FTYPE); | |
276 | if ((sb->sb_features2 & features_mask) != | |
277 | (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) | |
c517b3aa | 278 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
279 | |
280 | /* Check sb_features2 flags that can be set after mkfs time. */ | |
281 | features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT); | |
282 | if ((sb->sb_features2 & features_mask) != | |
283 | (cpu_to_be32(mp->m_sb.sb_features2) & features_mask)) | |
4a9bca86 | 284 | xchk_block_set_preen(sc, bp); |
21fb4cb1 | 285 | |
38c26bfd | 286 | if (!xfs_has_crc(mp)) { |
21fb4cb1 DW |
287 | /* all v5 fields must be zero */ |
288 | if (memchr_inv(&sb->sb_features_compat, 0, | |
289 | sizeof(struct xfs_dsb) - | |
290 | offsetof(struct xfs_dsb, sb_features_compat))) | |
c517b3aa | 291 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 292 | } else { |
4a9bca86 DW |
293 | /* compat features must match */ |
294 | if (sb->sb_features_compat != | |
295 | cpu_to_be32(mp->m_sb.sb_features_compat)) | |
c517b3aa | 296 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 297 | |
4a9bca86 DW |
298 | /* ro compat features must match */ |
299 | if (sb->sb_features_ro_compat != | |
300 | cpu_to_be32(mp->m_sb.sb_features_ro_compat)) | |
c517b3aa | 301 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 302 | |
4a9bca86 DW |
303 | /* |
304 | * NEEDSREPAIR is ignored on a secondary super, so we should | |
305 | * clear it when we find it, though it's not a corruption. | |
306 | */ | |
307 | features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR); | |
308 | if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ | |
309 | sb->sb_features_incompat) & features_mask) | |
310 | xchk_block_set_preen(sc, bp); | |
21fb4cb1 | 311 | |
4a9bca86 DW |
312 | /* all other incompat features must match */ |
313 | if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^ | |
314 | sb->sb_features_incompat) & ~features_mask) | |
c517b3aa | 315 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 316 | |
4a9bca86 DW |
317 | /* |
318 | * log incompat features protect newer log record types from | |
319 | * older log recovery code. Log recovery doesn't check the | |
320 | * secondary supers, so we can clear these if needed. | |
321 | */ | |
322 | if (sb->sb_features_log_incompat) | |
323 | xchk_block_set_preen(sc, bp); | |
324 | ||
21fb4cb1 DW |
325 | /* Don't care about sb_crc */ |
326 | ||
327 | if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align)) | |
c517b3aa | 328 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
329 | |
330 | if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino)) | |
c517b3aa | 331 | xchk_block_set_preen(sc, bp); |
21fb4cb1 DW |
332 | |
333 | /* Don't care about sb_lsn */ | |
334 | } | |
335 | ||
38c26bfd | 336 | if (xfs_has_metauuid(mp)) { |
21fb4cb1 DW |
337 | /* The metadata UUID must be the same for all supers */ |
338 | if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid)) | |
c517b3aa | 339 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 DW |
340 | } |
341 | ||
342 | /* Everything else must be zero. */ | |
343 | if (memchr_inv(sb + 1, 0, | |
344 | BBTOB(bp->b_length) - sizeof(struct xfs_dsb))) | |
c517b3aa | 345 | xchk_block_set_corrupt(sc, bp); |
21fb4cb1 | 346 | |
c517b3aa | 347 | xchk_superblock_xref(sc, bp); |
48c6615c DW |
348 | out_pag: |
349 | xfs_perag_put(pag); | |
21fb4cb1 DW |
350 | return error; |
351 | } | |
ab9d5dc5 DW |
352 | |
353 | /* AGF */ | |
354 | ||
52dc4b44 DW |
355 | /* Tally freespace record lengths. */ |
356 | STATIC int | |
c517b3aa | 357 | xchk_agf_record_bno_lengths( |
52dc4b44 | 358 | struct xfs_btree_cur *cur, |
159eb69d | 359 | const struct xfs_alloc_rec_incore *rec, |
52dc4b44 DW |
360 | void *priv) |
361 | { | |
362 | xfs_extlen_t *blocks = priv; | |
363 | ||
364 | (*blocks) += rec->ar_blockcount; | |
365 | return 0; | |
366 | } | |
367 | ||
368 | /* Check agf_freeblks */ | |
369 | static inline void | |
c517b3aa | 370 | xchk_agf_xref_freeblks( |
1d8a748a | 371 | struct xfs_scrub *sc) |
52dc4b44 | 372 | { |
9798f615 | 373 | struct xfs_agf *agf = sc->sa.agf_bp->b_addr; |
032d91f9 DW |
374 | xfs_extlen_t blocks = 0; |
375 | int error; | |
52dc4b44 DW |
376 | |
377 | if (!sc->sa.bno_cur) | |
378 | return; | |
379 | ||
380 | error = xfs_alloc_query_all(sc->sa.bno_cur, | |
c517b3aa DW |
381 | xchk_agf_record_bno_lengths, &blocks); |
382 | if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) | |
52dc4b44 DW |
383 | return; |
384 | if (blocks != be32_to_cpu(agf->agf_freeblks)) | |
c517b3aa | 385 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
52dc4b44 DW |
386 | } |
387 | ||
e1134b12 DW |
388 | /* Cross reference the AGF with the cntbt (freespace by length btree) */ |
389 | static inline void | |
c517b3aa | 390 | xchk_agf_xref_cntbt( |
1d8a748a | 391 | struct xfs_scrub *sc) |
e1134b12 | 392 | { |
9798f615 | 393 | struct xfs_agf *agf = sc->sa.agf_bp->b_addr; |
032d91f9 DW |
394 | xfs_agblock_t agbno; |
395 | xfs_extlen_t blocks; | |
396 | int have; | |
397 | int error; | |
e1134b12 DW |
398 | |
399 | if (!sc->sa.cnt_cur) | |
400 | return; | |
401 | ||
402 | /* Any freespace at all? */ | |
403 | error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have); | |
c517b3aa | 404 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
e1134b12 DW |
405 | return; |
406 | if (!have) { | |
3d129e1b | 407 | if (agf->agf_freeblks != cpu_to_be32(0)) |
c517b3aa | 408 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
e1134b12 DW |
409 | return; |
410 | } | |
411 | ||
412 | /* Check agf_longest */ | |
413 | error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have); | |
c517b3aa | 414 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
e1134b12 DW |
415 | return; |
416 | if (!have || blocks != be32_to_cpu(agf->agf_longest)) | |
c517b3aa | 417 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
e1134b12 DW |
418 | } |
419 | ||
d852657c DW |
420 | /* Check the btree block counts in the AGF against the btrees. */ |
421 | STATIC void | |
c517b3aa | 422 | xchk_agf_xref_btreeblks( |
1d8a748a | 423 | struct xfs_scrub *sc) |
d852657c | 424 | { |
9798f615 | 425 | struct xfs_agf *agf = sc->sa.agf_bp->b_addr; |
032d91f9 DW |
426 | struct xfs_mount *mp = sc->mp; |
427 | xfs_agblock_t blocks; | |
428 | xfs_agblock_t btreeblks; | |
429 | int error; | |
d852657c | 430 | |
e6c01077 | 431 | /* agf_btreeblks didn't exist before lazysbcount */ |
ebd9027d | 432 | if (!xfs_has_lazysbcount(sc->mp)) |
e6c01077 DW |
433 | return; |
434 | ||
d852657c DW |
435 | /* Check agf_rmap_blocks; set up for agf_btreeblks check */ |
436 | if (sc->sa.rmap_cur) { | |
437 | error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks); | |
c517b3aa | 438 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
d852657c DW |
439 | return; |
440 | btreeblks = blocks - 1; | |
441 | if (blocks != be32_to_cpu(agf->agf_rmap_blocks)) | |
c517b3aa | 442 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
d852657c DW |
443 | } else { |
444 | btreeblks = 0; | |
445 | } | |
446 | ||
447 | /* | |
448 | * No rmap cursor; we can't xref if we have the rmapbt feature. | |
449 | * We also can't do it if we're missing the free space btree cursors. | |
450 | */ | |
38c26bfd | 451 | if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) || |
d852657c DW |
452 | !sc->sa.bno_cur || !sc->sa.cnt_cur) |
453 | return; | |
454 | ||
455 | /* Check agf_btreeblks */ | |
456 | error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks); | |
c517b3aa | 457 | if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur)) |
d852657c DW |
458 | return; |
459 | btreeblks += blocks - 1; | |
460 | ||
461 | error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks); | |
c517b3aa | 462 | if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur)) |
d852657c DW |
463 | return; |
464 | btreeblks += blocks - 1; | |
465 | ||
466 | if (btreeblks != be32_to_cpu(agf->agf_btreeblks)) | |
c517b3aa | 467 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
d852657c DW |
468 | } |
469 | ||
f6d5fc21 DW |
470 | /* Check agf_refcount_blocks against tree size */ |
471 | static inline void | |
c517b3aa | 472 | xchk_agf_xref_refcblks( |
1d8a748a | 473 | struct xfs_scrub *sc) |
f6d5fc21 | 474 | { |
9798f615 | 475 | struct xfs_agf *agf = sc->sa.agf_bp->b_addr; |
032d91f9 DW |
476 | xfs_agblock_t blocks; |
477 | int error; | |
f6d5fc21 DW |
478 | |
479 | if (!sc->sa.refc_cur) | |
480 | return; | |
481 | ||
482 | error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks); | |
c517b3aa | 483 | if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) |
f6d5fc21 DW |
484 | return; |
485 | if (blocks != be32_to_cpu(agf->agf_refcount_blocks)) | |
c517b3aa | 486 | xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp); |
f6d5fc21 DW |
487 | } |
488 | ||
166d7641 DW |
489 | /* Cross-reference with the other btrees. */ |
490 | STATIC void | |
c517b3aa | 491 | xchk_agf_xref( |
1d8a748a | 492 | struct xfs_scrub *sc) |
166d7641 | 493 | { |
032d91f9 DW |
494 | struct xfs_mount *mp = sc->mp; |
495 | xfs_agblock_t agbno; | |
52dc4b44 | 496 | |
166d7641 DW |
497 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
498 | return; | |
52dc4b44 DW |
499 | |
500 | agbno = XFS_AGF_BLOCK(mp); | |
501 | ||
f53acfac | 502 | xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 | 503 | |
c517b3aa DW |
504 | xchk_xref_is_used_space(sc, agbno, 1); |
505 | xchk_agf_xref_freeblks(sc); | |
506 | xchk_agf_xref_cntbt(sc); | |
507 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 508 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa DW |
509 | xchk_agf_xref_btreeblks(sc); |
510 | xchk_xref_is_not_shared(sc, agbno, 1); | |
511 | xchk_agf_xref_refcblks(sc); | |
52dc4b44 DW |
512 | |
513 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
514 | } |
515 | ||
ab9d5dc5 DW |
516 | /* Scrub the AGF. */ |
517 | int | |
c517b3aa | 518 | xchk_agf( |
1d8a748a | 519 | struct xfs_scrub *sc) |
ab9d5dc5 | 520 | { |
032d91f9 DW |
521 | struct xfs_mount *mp = sc->mp; |
522 | struct xfs_agf *agf; | |
47cd97b5 | 523 | struct xfs_perag *pag; |
de9d2a78 | 524 | xfs_agnumber_t agno = sc->sm->sm_agno; |
032d91f9 DW |
525 | xfs_agblock_t agbno; |
526 | xfs_agblock_t eoag; | |
527 | xfs_agblock_t agfl_first; | |
528 | xfs_agblock_t agfl_last; | |
529 | xfs_agblock_t agfl_count; | |
530 | xfs_agblock_t fl_count; | |
531 | int level; | |
532 | int error = 0; | |
ab9d5dc5 | 533 | |
de9d2a78 | 534 | error = xchk_ag_read_headers(sc, agno, &sc->sa); |
c517b3aa | 535 | if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error)) |
ab9d5dc5 | 536 | goto out; |
c517b3aa | 537 | xchk_buffer_recheck(sc, sc->sa.agf_bp); |
ab9d5dc5 | 538 | |
9798f615 | 539 | agf = sc->sa.agf_bp->b_addr; |
48c6615c | 540 | pag = sc->sa.pag; |
ab9d5dc5 DW |
541 | |
542 | /* Check the AG length */ | |
543 | eoag = be32_to_cpu(agf->agf_length); | |
0800169e | 544 | if (eoag != pag->block_count) |
c517b3aa | 545 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
546 | |
547 | /* Check the AGF btree roots and levels */ | |
548 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]); | |
0800169e | 549 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 550 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
551 | |
552 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]); | |
0800169e | 553 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 554 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
555 | |
556 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); | |
7cb3efb4 | 557 | if (level <= 0 || level > mp->m_alloc_maxlevels) |
c517b3aa | 558 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
559 | |
560 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); | |
7cb3efb4 | 561 | if (level <= 0 || level > mp->m_alloc_maxlevels) |
c517b3aa | 562 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 | 563 | |
38c26bfd | 564 | if (xfs_has_rmapbt(mp)) { |
ab9d5dc5 | 565 | agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]); |
0800169e | 566 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 567 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
568 | |
569 | level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); | |
f4585e82 | 570 | if (level <= 0 || level > mp->m_rmap_maxlevels) |
c517b3aa | 571 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
572 | } |
573 | ||
38c26bfd | 574 | if (xfs_has_reflink(mp)) { |
ab9d5dc5 | 575 | agbno = be32_to_cpu(agf->agf_refcount_root); |
0800169e | 576 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 577 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
578 | |
579 | level = be32_to_cpu(agf->agf_refcount_level); | |
f4585e82 | 580 | if (level <= 0 || level > mp->m_refc_maxlevels) |
c517b3aa | 581 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 DW |
582 | } |
583 | ||
584 | /* Check the AGFL counters */ | |
585 | agfl_first = be32_to_cpu(agf->agf_flfirst); | |
586 | agfl_last = be32_to_cpu(agf->agf_fllast); | |
587 | agfl_count = be32_to_cpu(agf->agf_flcount); | |
588 | if (agfl_last > agfl_first) | |
589 | fl_count = agfl_last - agfl_first + 1; | |
590 | else | |
a78ee256 | 591 | fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1; |
ab9d5dc5 | 592 | if (agfl_count != 0 && fl_count != agfl_count) |
c517b3aa | 593 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
ab9d5dc5 | 594 | |
47cd97b5 | 595 | /* Do the incore counters match? */ |
47cd97b5 DW |
596 | if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks)) |
597 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); | |
598 | if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount)) | |
599 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); | |
ebd9027d | 600 | if (xfs_has_lazysbcount(sc->mp) && |
e6c01077 | 601 | pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks)) |
47cd97b5 | 602 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
47cd97b5 | 603 | |
c517b3aa | 604 | xchk_agf_xref(sc); |
ab9d5dc5 DW |
605 | out: |
606 | return error; | |
607 | } | |
608 | ||
609 | /* AGFL */ | |
610 | ||
c517b3aa | 611 | struct xchk_agfl_info { |
032d91f9 DW |
612 | unsigned int sz_entries; |
613 | unsigned int nr_entries; | |
614 | xfs_agblock_t *entries; | |
1d8a748a | 615 | struct xfs_scrub *sc; |
d44b47fd DW |
616 | }; |
617 | ||
166d7641 DW |
618 | /* Cross-reference with the other btrees. */ |
619 | STATIC void | |
c517b3aa | 620 | xchk_agfl_block_xref( |
1d8a748a | 621 | struct xfs_scrub *sc, |
7280feda | 622 | xfs_agblock_t agbno) |
166d7641 DW |
623 | { |
624 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) | |
625 | return; | |
52dc4b44 | 626 | |
c517b3aa DW |
627 | xchk_xref_is_used_space(sc, agbno, 1); |
628 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 629 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG); |
c517b3aa | 630 | xchk_xref_is_not_shared(sc, agbno, 1); |
166d7641 DW |
631 | } |
632 | ||
ab9d5dc5 DW |
633 | /* Scrub an AGFL block. */ |
634 | STATIC int | |
c517b3aa | 635 | xchk_agfl_block( |
032d91f9 DW |
636 | struct xfs_mount *mp, |
637 | xfs_agblock_t agbno, | |
638 | void *priv) | |
ab9d5dc5 | 639 | { |
032d91f9 | 640 | struct xchk_agfl_info *sai = priv; |
1d8a748a | 641 | struct xfs_scrub *sc = sai->sc; |
ab9d5dc5 | 642 | |
0800169e | 643 | if (xfs_verify_agbno(sc->sa.pag, agbno) && |
d44b47fd DW |
644 | sai->nr_entries < sai->sz_entries) |
645 | sai->entries[sai->nr_entries++] = agbno; | |
646 | else | |
c517b3aa | 647 | xchk_block_set_corrupt(sc, sc->sa.agfl_bp); |
ab9d5dc5 | 648 | |
7280feda | 649 | xchk_agfl_block_xref(sc, agbno); |
166d7641 | 650 | |
9f3a080e | 651 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
e7ee96df | 652 | return -ECANCELED; |
9f3a080e | 653 | |
ab9d5dc5 DW |
654 | return 0; |
655 | } | |
656 | ||
d44b47fd | 657 | static int |
c517b3aa | 658 | xchk_agblock_cmp( |
d44b47fd DW |
659 | const void *pa, |
660 | const void *pb) | |
661 | { | |
662 | const xfs_agblock_t *a = pa; | |
663 | const xfs_agblock_t *b = pb; | |
664 | ||
665 | return (int)*a - (int)*b; | |
666 | } | |
667 | ||
166d7641 DW |
668 | /* Cross-reference with the other btrees. */ |
669 | STATIC void | |
c517b3aa | 670 | xchk_agfl_xref( |
1d8a748a | 671 | struct xfs_scrub *sc) |
166d7641 | 672 | { |
032d91f9 DW |
673 | struct xfs_mount *mp = sc->mp; |
674 | xfs_agblock_t agbno; | |
52dc4b44 | 675 | |
166d7641 DW |
676 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
677 | return; | |
52dc4b44 DW |
678 | |
679 | agbno = XFS_AGFL_BLOCK(mp); | |
680 | ||
f53acfac | 681 | xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 | 682 | |
c517b3aa DW |
683 | xchk_xref_is_used_space(sc, agbno, 1); |
684 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
7280feda | 685 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 686 | xchk_xref_is_not_shared(sc, agbno, 1); |
52dc4b44 DW |
687 | |
688 | /* | |
689 | * Scrub teardown will take care of sc->sa for us. Leave sc->sa | |
690 | * active so that the agfl block xref can use it too. | |
691 | */ | |
166d7641 DW |
692 | } |
693 | ||
ab9d5dc5 DW |
694 | /* Scrub the AGFL. */ |
695 | int | |
c517b3aa | 696 | xchk_agfl( |
1d8a748a | 697 | struct xfs_scrub *sc) |
ab9d5dc5 | 698 | { |
032d91f9 DW |
699 | struct xchk_agfl_info sai; |
700 | struct xfs_agf *agf; | |
de9d2a78 | 701 | xfs_agnumber_t agno = sc->sm->sm_agno; |
032d91f9 DW |
702 | unsigned int agflcount; |
703 | unsigned int i; | |
704 | int error; | |
ab9d5dc5 | 705 | |
de9d2a78 | 706 | error = xchk_ag_read_headers(sc, agno, &sc->sa); |
c517b3aa | 707 | if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error)) |
ab9d5dc5 DW |
708 | goto out; |
709 | if (!sc->sa.agf_bp) | |
710 | return -EFSCORRUPTED; | |
c517b3aa | 711 | xchk_buffer_recheck(sc, sc->sa.agfl_bp); |
ab9d5dc5 | 712 | |
c517b3aa | 713 | xchk_agfl_xref(sc); |
166d7641 DW |
714 | |
715 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) | |
716 | goto out; | |
717 | ||
d44b47fd | 718 | /* Allocate buffer to ensure uniqueness of AGFL entries. */ |
9798f615 | 719 | agf = sc->sa.agf_bp->b_addr; |
d44b47fd | 720 | agflcount = be32_to_cpu(agf->agf_flcount); |
a78ee256 | 721 | if (agflcount > xfs_agfl_size(sc->mp)) { |
c517b3aa | 722 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
723 | goto out; |
724 | } | |
86516eff | 725 | memset(&sai, 0, sizeof(sai)); |
9f3a080e | 726 | sai.sc = sc; |
d44b47fd | 727 | sai.sz_entries = agflcount; |
631fc955 DW |
728 | sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, |
729 | KM_MAYFAIL); | |
d44b47fd DW |
730 | if (!sai.entries) { |
731 | error = -ENOMEM; | |
732 | goto out; | |
733 | } | |
734 | ||
ab9d5dc5 | 735 | /* Check the blocks in the AGFL. */ |
9798f615 | 736 | error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr, |
c517b3aa | 737 | sc->sa.agfl_bp, xchk_agfl_block, &sai); |
e7ee96df | 738 | if (error == -ECANCELED) { |
9f3a080e DW |
739 | error = 0; |
740 | goto out_free; | |
741 | } | |
d44b47fd DW |
742 | if (error) |
743 | goto out_free; | |
744 | ||
745 | if (agflcount != sai.nr_entries) { | |
c517b3aa | 746 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
747 | goto out_free; |
748 | } | |
749 | ||
750 | /* Sort entries, check for duplicates. */ | |
751 | sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), | |
c517b3aa | 752 | xchk_agblock_cmp, NULL); |
d44b47fd DW |
753 | for (i = 1; i < sai.nr_entries; i++) { |
754 | if (sai.entries[i] == sai.entries[i - 1]) { | |
c517b3aa | 755 | xchk_block_set_corrupt(sc, sc->sa.agf_bp); |
d44b47fd DW |
756 | break; |
757 | } | |
758 | } | |
759 | ||
760 | out_free: | |
761 | kmem_free(sai.entries); | |
ab9d5dc5 DW |
762 | out: |
763 | return error; | |
764 | } | |
a12890ae DW |
765 | |
766 | /* AGI */ | |
767 | ||
2e6f2756 DW |
768 | /* Check agi_count/agi_freecount */ |
769 | static inline void | |
c517b3aa | 770 | xchk_agi_xref_icounts( |
1d8a748a | 771 | struct xfs_scrub *sc) |
2e6f2756 | 772 | { |
370c782b | 773 | struct xfs_agi *agi = sc->sa.agi_bp->b_addr; |
032d91f9 DW |
774 | xfs_agino_t icount; |
775 | xfs_agino_t freecount; | |
776 | int error; | |
2e6f2756 DW |
777 | |
778 | if (!sc->sa.ino_cur) | |
779 | return; | |
780 | ||
781 | error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount); | |
c517b3aa | 782 | if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) |
2e6f2756 DW |
783 | return; |
784 | if (be32_to_cpu(agi->agi_count) != icount || | |
785 | be32_to_cpu(agi->agi_freecount) != freecount) | |
c517b3aa | 786 | xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); |
2e6f2756 DW |
787 | } |
788 | ||
1dbbff02 DW |
789 | /* Check agi_[fi]blocks against tree size */ |
790 | static inline void | |
791 | xchk_agi_xref_fiblocks( | |
792 | struct xfs_scrub *sc) | |
793 | { | |
794 | struct xfs_agi *agi = sc->sa.agi_bp->b_addr; | |
795 | xfs_agblock_t blocks; | |
796 | int error = 0; | |
797 | ||
ebd9027d | 798 | if (!xfs_has_inobtcounts(sc->mp)) |
1dbbff02 DW |
799 | return; |
800 | ||
801 | if (sc->sa.ino_cur) { | |
802 | error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks); | |
803 | if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur)) | |
804 | return; | |
805 | if (blocks != be32_to_cpu(agi->agi_iblocks)) | |
806 | xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); | |
807 | } | |
808 | ||
809 | if (sc->sa.fino_cur) { | |
810 | error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks); | |
811 | if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur)) | |
812 | return; | |
813 | if (blocks != be32_to_cpu(agi->agi_fblocks)) | |
814 | xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp); | |
815 | } | |
816 | } | |
817 | ||
166d7641 DW |
818 | /* Cross-reference with the other btrees. */ |
819 | STATIC void | |
c517b3aa | 820 | xchk_agi_xref( |
1d8a748a | 821 | struct xfs_scrub *sc) |
166d7641 | 822 | { |
032d91f9 DW |
823 | struct xfs_mount *mp = sc->mp; |
824 | xfs_agblock_t agbno; | |
52dc4b44 | 825 | |
166d7641 DW |
826 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
827 | return; | |
52dc4b44 DW |
828 | |
829 | agbno = XFS_AGI_BLOCK(mp); | |
830 | ||
f53acfac | 831 | xchk_ag_btcur_init(sc, &sc->sa); |
52dc4b44 | 832 | |
c517b3aa DW |
833 | xchk_xref_is_used_space(sc, agbno, 1); |
834 | xchk_xref_is_not_inode_chunk(sc, agbno, 1); | |
835 | xchk_agi_xref_icounts(sc); | |
7280feda | 836 | xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); |
c517b3aa | 837 | xchk_xref_is_not_shared(sc, agbno, 1); |
1dbbff02 | 838 | xchk_agi_xref_fiblocks(sc); |
52dc4b44 DW |
839 | |
840 | /* scrub teardown will take care of sc->sa for us */ | |
166d7641 DW |
841 | } |
842 | ||
a12890ae DW |
843 | /* Scrub the AGI. */ |
844 | int | |
c517b3aa | 845 | xchk_agi( |
1d8a748a | 846 | struct xfs_scrub *sc) |
a12890ae | 847 | { |
032d91f9 DW |
848 | struct xfs_mount *mp = sc->mp; |
849 | struct xfs_agi *agi; | |
47cd97b5 | 850 | struct xfs_perag *pag; |
f4585e82 | 851 | struct xfs_ino_geometry *igeo = M_IGEO(sc->mp); |
de9d2a78 | 852 | xfs_agnumber_t agno = sc->sm->sm_agno; |
032d91f9 DW |
853 | xfs_agblock_t agbno; |
854 | xfs_agblock_t eoag; | |
855 | xfs_agino_t agino; | |
856 | xfs_agino_t first_agino; | |
857 | xfs_agino_t last_agino; | |
858 | xfs_agino_t icount; | |
859 | int i; | |
860 | int level; | |
861 | int error = 0; | |
a12890ae | 862 | |
de9d2a78 | 863 | error = xchk_ag_read_headers(sc, agno, &sc->sa); |
c517b3aa | 864 | if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error)) |
a12890ae | 865 | goto out; |
c517b3aa | 866 | xchk_buffer_recheck(sc, sc->sa.agi_bp); |
a12890ae | 867 | |
370c782b | 868 | agi = sc->sa.agi_bp->b_addr; |
48c6615c | 869 | pag = sc->sa.pag; |
a12890ae DW |
870 | |
871 | /* Check the AG length */ | |
872 | eoag = be32_to_cpu(agi->agi_length); | |
0800169e | 873 | if (eoag != pag->block_count) |
c517b3aa | 874 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
875 | |
876 | /* Check btree roots and levels */ | |
877 | agbno = be32_to_cpu(agi->agi_root); | |
0800169e | 878 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 879 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
880 | |
881 | level = be32_to_cpu(agi->agi_level); | |
f4585e82 | 882 | if (level <= 0 || level > igeo->inobt_maxlevels) |
c517b3aa | 883 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae | 884 | |
38c26bfd | 885 | if (xfs_has_finobt(mp)) { |
a12890ae | 886 | agbno = be32_to_cpu(agi->agi_free_root); |
0800169e | 887 | if (!xfs_verify_agbno(pag, agbno)) |
c517b3aa | 888 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
889 | |
890 | level = be32_to_cpu(agi->agi_free_level); | |
f4585e82 | 891 | if (level <= 0 || level > igeo->inobt_maxlevels) |
c517b3aa | 892 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
893 | } |
894 | ||
895 | /* Check inode counters */ | |
86210fbe | 896 | xfs_agino_range(mp, agno, &first_agino, &last_agino); |
a12890ae DW |
897 | icount = be32_to_cpu(agi->agi_count); |
898 | if (icount > last_agino - first_agino + 1 || | |
899 | icount < be32_to_cpu(agi->agi_freecount)) | |
c517b3aa | 900 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
901 | |
902 | /* Check inode pointers */ | |
903 | agino = be32_to_cpu(agi->agi_newino); | |
2d6ca832 | 904 | if (!xfs_verify_agino_or_null(pag, agino)) |
c517b3aa | 905 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
906 | |
907 | agino = be32_to_cpu(agi->agi_dirino); | |
2d6ca832 | 908 | if (!xfs_verify_agino_or_null(pag, agino)) |
c517b3aa | 909 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
910 | |
911 | /* Check unlinked inode buckets */ | |
912 | for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { | |
913 | agino = be32_to_cpu(agi->agi_unlinked[i]); | |
2d6ca832 | 914 | if (!xfs_verify_agino_or_null(pag, agino)) |
c517b3aa | 915 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae DW |
916 | } |
917 | ||
918 | if (agi->agi_pad32 != cpu_to_be32(0)) | |
c517b3aa | 919 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); |
a12890ae | 920 | |
47cd97b5 | 921 | /* Do the incore counters match? */ |
47cd97b5 DW |
922 | if (pag->pagi_count != be32_to_cpu(agi->agi_count)) |
923 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); | |
924 | if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) | |
925 | xchk_block_set_corrupt(sc, sc->sa.agi_bp); | |
47cd97b5 | 926 | |
c517b3aa | 927 | xchk_agi_xref(sc); |
a12890ae DW |
928 | out: |
929 | return error; | |
930 | } |