]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - repair/phase4.c
xfsprogs: Release v6.7.0
[thirdparty/xfsprogs-dev.git] / repair / phase4.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "libxfs.h"
8 #include "threads.h"
9 #include "prefetch.h"
10 #include "avl.h"
11 #include "globals.h"
12 #include "agheader.h"
13 #include "incore.h"
14 #include "protos.h"
15 #include "err_protos.h"
16 #include "dinode.h"
17 #include "bmap.h"
18 #include "versions.h"
19 #include "dir2.h"
20 #include "progress.h"
21 #include "slab.h"
22 #include "rmap.h"
23
24 bool collect_rmaps;
25
26 /*
27 * null out quota inode fields in sb if they point to non-existent inodes.
28 * this isn't as redundant as it looks since it's possible that the sb field
29 * might be set but the imap and inode(s) agree that the inode is
30 * free in which case they'd never be cleared so the fields wouldn't
31 * be cleared by process_dinode().
32 */
33 static void
34 quotino_check(xfs_mount_t *mp)
35 {
36 ino_tree_node_t *irec;
37
38 if (mp->m_sb.sb_uquotino != NULLFSINO && mp->m_sb.sb_uquotino != 0) {
39 if (verify_inum(mp, mp->m_sb.sb_uquotino))
40 irec = NULL;
41 else
42 irec = find_inode_rec(mp,
43 XFS_INO_TO_AGNO(mp, mp->m_sb.sb_uquotino),
44 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_uquotino));
45
46 if (irec == NULL || is_inode_free(irec,
47 mp->m_sb.sb_uquotino - irec->ino_startnum)) {
48 mp->m_sb.sb_uquotino = NULLFSINO;
49 lost_uquotino = 1;
50 } else
51 lost_uquotino = 0;
52 }
53
54 if (mp->m_sb.sb_gquotino != NULLFSINO && mp->m_sb.sb_gquotino != 0) {
55 if (verify_inum(mp, mp->m_sb.sb_gquotino))
56 irec = NULL;
57 else
58 irec = find_inode_rec(mp,
59 XFS_INO_TO_AGNO(mp, mp->m_sb.sb_gquotino),
60 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_gquotino));
61
62 if (irec == NULL || is_inode_free(irec,
63 mp->m_sb.sb_gquotino - irec->ino_startnum)) {
64 mp->m_sb.sb_gquotino = NULLFSINO;
65 lost_gquotino = 1;
66 } else
67 lost_gquotino = 0;
68 }
69
70 if (mp->m_sb.sb_pquotino != NULLFSINO && mp->m_sb.sb_pquotino != 0) {
71 if (verify_inum(mp, mp->m_sb.sb_pquotino))
72 irec = NULL;
73 else
74 irec = find_inode_rec(mp,
75 XFS_INO_TO_AGNO(mp, mp->m_sb.sb_pquotino),
76 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_pquotino));
77
78 if (irec == NULL || is_inode_free(irec,
79 mp->m_sb.sb_pquotino - irec->ino_startnum)) {
80 mp->m_sb.sb_pquotino = NULLFSINO;
81 lost_pquotino = 1;
82 } else
83 lost_pquotino = 0;
84 }
85 }
86
87 static void
88 quota_sb_check(xfs_mount_t *mp)
89 {
90 /*
91 * if the sb says we have quotas and we lost both,
92 * signal a superblock downgrade. that will cause
93 * the quota flags to get zeroed. (if we only lost
94 * one quota inode, do nothing and complain later.)
95 *
96 * if the sb says we have quotas but we didn't start out
97 * with any quota inodes, signal a superblock downgrade.
98 *
99 * The sb downgrades are so that older systems can mount
100 * the filesystem.
101 *
102 * if the sb says we don't have quotas but it looks like
103 * we do have quota inodes, then signal a superblock upgrade.
104 *
105 * if the sb says we don't have quotas and we have no
106 * quota inodes, then leave will enough alone.
107 */
108
109 if (fs_quotas &&
110 (mp->m_sb.sb_uquotino == NULLFSINO || mp->m_sb.sb_uquotino == 0) &&
111 (mp->m_sb.sb_gquotino == NULLFSINO || mp->m_sb.sb_gquotino == 0) &&
112 (mp->m_sb.sb_pquotino == NULLFSINO || mp->m_sb.sb_pquotino == 0)) {
113 lost_quotas = 1;
114 fs_quotas = 0;
115 } else if (!verify_inum(mp, mp->m_sb.sb_uquotino) &&
116 !verify_inum(mp, mp->m_sb.sb_gquotino) &&
117 !verify_inum(mp, mp->m_sb.sb_pquotino)) {
118 fs_quotas = 1;
119 }
120 }
121
122
123 static void
124 process_ag_func(
125 struct workqueue *wq,
126 xfs_agnumber_t agno,
127 void *arg)
128 {
129 wait_for_inode_prefetch(arg);
130 do_log(_(" - agno = %d\n"), agno);
131 process_aginodes(wq->wq_ctx, arg, agno, 0, 1, 0);
132 blkmap_free_final();
133 cleanup_inode_prefetch(arg);
134
135 /*
136 * now recycle the per-AG duplicate extent records
137 */
138 release_dup_extent_tree(agno);
139 }
140
141 static void
142 process_ags(
143 xfs_mount_t *mp)
144 {
145 xfs_agnumber_t i;
146 int error;
147
148 do_inode_prefetch(mp, ag_stride, process_ag_func, true, false);
149 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
150 error = rmap_finish_collecting_fork_recs(mp, i);
151 if (error)
152 do_error(
153 _("unable to finish adding attr/data fork reverse-mapping data for AG %u.\n"),
154 i);
155 }
156 }
157
158 static void
159 check_rmap_btrees(
160 struct workqueue*wq,
161 xfs_agnumber_t agno,
162 void *arg)
163 {
164 int error;
165
166 error = rmap_add_fixed_ag_rec(wq->wq_ctx, agno);
167 if (error)
168 do_error(
169 _("unable to add AG %u metadata reverse-mapping data.\n"), agno);
170
171 error = rmap_fold_raw_recs(wq->wq_ctx, agno);
172 if (error)
173 do_error(
174 _("unable to merge AG %u metadata reverse-mapping data.\n"), agno);
175
176 error = rmaps_verify_btree(wq->wq_ctx, agno);
177 if (error)
178 do_error(
179 _("%s while checking reverse-mappings"),
180 strerror(-error));
181 }
182
183 static void
184 compute_ag_refcounts(
185 struct workqueue*wq,
186 xfs_agnumber_t agno,
187 void *arg)
188 {
189 int error;
190
191 error = compute_refcounts(wq->wq_ctx, agno);
192 if (error)
193 do_error(
194 _("%s while computing reference count records.\n"),
195 strerror(-error));
196 }
197
198 static void
199 process_inode_reflink_flags(
200 struct workqueue *wq,
201 xfs_agnumber_t agno,
202 void *arg)
203 {
204 int error;
205
206 error = fix_inode_reflink_flags(wq->wq_ctx, agno);
207 if (error)
208 do_error(
209 _("%s while fixing inode reflink flags.\n"),
210 strerror(-error));
211 }
212
213 static void
214 check_refcount_btrees(
215 struct workqueue*wq,
216 xfs_agnumber_t agno,
217 void *arg)
218 {
219 int error;
220
221 error = check_refcounts(wq->wq_ctx, agno);
222 if (error)
223 do_error(
224 _("%s while checking reference counts"),
225 strerror(-error));
226 }
227
228 static void
229 process_rmap_data(
230 struct xfs_mount *mp)
231 {
232 struct workqueue wq;
233 xfs_agnumber_t i;
234
235 if (!rmap_needs_work(mp))
236 return;
237
238 create_work_queue(&wq, mp, libxfs_nproc());
239 for (i = 0; i < mp->m_sb.sb_agcount; i++)
240 queue_work(&wq, check_rmap_btrees, i, NULL);
241 destroy_work_queue(&wq);
242
243 if (!xfs_sb_version_hasreflink(&mp->m_sb))
244 return;
245
246 create_work_queue(&wq, mp, libxfs_nproc());
247 for (i = 0; i < mp->m_sb.sb_agcount; i++)
248 queue_work(&wq, compute_ag_refcounts, i, NULL);
249 destroy_work_queue(&wq);
250
251 create_work_queue(&wq, mp, libxfs_nproc());
252 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
253 queue_work(&wq, process_inode_reflink_flags, i, NULL);
254 queue_work(&wq, check_refcount_btrees, i, NULL);
255 }
256 destroy_work_queue(&wq);
257 }
258
259 void
260 phase4(xfs_mount_t *mp)
261 {
262 ino_tree_node_t *irec;
263 xfs_rtblock_t bno;
264 xfs_rtblock_t rt_start;
265 xfs_extlen_t rt_len;
266 xfs_agnumber_t i;
267 xfs_agblock_t j;
268 xfs_agblock_t ag_end;
269 xfs_extlen_t blen;
270 int ag_hdr_len = 4 * mp->m_sb.sb_sectsize;
271 int ag_hdr_block;
272 int bstate;
273
274 if (rmap_needs_work(mp))
275 collect_rmaps = true;
276 ag_hdr_block = howmany(ag_hdr_len, mp->m_sb.sb_blocksize);
277
278 do_log(_("Phase 4 - check for duplicate blocks...\n"));
279 do_log(_(" - setting up duplicate extent list...\n"));
280
281 set_progress_msg(PROG_FMT_DUP_EXTENT, (uint64_t) glob_agcount);
282
283 irec = find_inode_rec(mp, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino),
284 XFS_INO_TO_AGINO(mp, mp->m_sb.sb_rootino));
285
286 /*
287 * we always have a root inode, even if it's free...
288 * if the root is free, forget it, lost+found is already gone
289 */
290 if (is_inode_free(irec, 0) || !inode_isadir(irec, 0)) {
291 need_root_inode = 1;
292 if (no_modify)
293 do_warn(_("root inode would be lost\n"));
294 else
295 do_warn(_("root inode lost\n"));
296 }
297
298 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
299 ag_end = (i < mp->m_sb.sb_agcount - 1) ? mp->m_sb.sb_agblocks :
300 mp->m_sb.sb_dblocks -
301 (xfs_rfsblock_t) mp->m_sb.sb_agblocks * i;
302
303 /*
304 * set up duplicate extent list for this ag
305 */
306 for (j = ag_hdr_block; j < ag_end; j += blen) {
307 bstate = get_bmap_ext(i, j, ag_end, &blen);
308 switch (bstate) {
309 case XR_E_BAD_STATE:
310 default:
311 do_warn(
312 _("unknown block state, ag %d, block %d\n"),
313 i, j);
314 /* fall through .. */
315 case XR_E_UNKNOWN:
316 case XR_E_FREE1:
317 case XR_E_FREE:
318 case XR_E_INUSE:
319 case XR_E_INUSE_FS:
320 case XR_E_INO:
321 case XR_E_FS_MAP:
322 break;
323 case XR_E_MULT:
324 add_dup_extent(i, j, blen);
325 break;
326 }
327 }
328
329 PROG_RPT_INC(prog_rpt_done[i], 1);
330 }
331 print_final_rpt();
332
333 /*
334 * initialize realtime bitmap
335 */
336 rt_start = 0;
337 rt_len = 0;
338
339 for (bno = 0; bno < mp->m_sb.sb_rextents; bno++) {
340 bstate = get_rtbmap(bno);
341 switch (bstate) {
342 case XR_E_BAD_STATE:
343 default:
344 do_warn(
345 _("unknown rt extent state, extent %" PRIu64 "\n"),
346 bno);
347 /* fall through .. */
348 case XR_E_UNKNOWN:
349 case XR_E_FREE1:
350 case XR_E_FREE:
351 case XR_E_INUSE:
352 case XR_E_INUSE_FS:
353 case XR_E_INO:
354 case XR_E_FS_MAP:
355 if (rt_start == 0)
356 continue;
357 else {
358 /*
359 * add extent and reset extent state
360 */
361 add_rt_dup_extent(rt_start, rt_len);
362 rt_start = 0;
363 rt_len = 0;
364 }
365 break;
366 case XR_E_MULT:
367 if (rt_start == 0) {
368 rt_start = bno;
369 rt_len = 1;
370 } else if (rt_len == MAXEXTLEN) {
371 /*
372 * large extent case
373 */
374 add_rt_dup_extent(rt_start, rt_len);
375 rt_start = bno;
376 rt_len = 1;
377 } else
378 rt_len++;
379 break;
380 }
381 }
382
383 /*
384 * catch tail-case, extent hitting the end of the ag
385 */
386 if (rt_start != 0)
387 add_rt_dup_extent(rt_start, rt_len);
388
389 /*
390 * initialize bitmaps for all AGs
391 */
392 reset_bmaps(mp);
393
394 do_log(_(" - check for inodes claiming duplicate blocks...\n"));
395 set_progress_msg(PROG_FMT_DUP_BLOCKS, (uint64_t) mp->m_sb.sb_icount);
396
397 /*
398 * ok, now process the inodes -- signal 2-pass check per inode.
399 * first pass checks if the inode conflicts with a known
400 * duplicate extent. if so, the inode is cleared and second
401 * pass is skipped. second pass sets the block bitmap
402 * for all blocks claimed by the inode. directory
403 * and attribute processing is turned OFF since we did that
404 * already in phase 3.
405 */
406 process_ags(mp);
407
408 /*
409 * Process all the reverse-mapping data that we collected. This
410 * involves checking the rmap data against the btree, computing
411 * reference counts based on the rmap data, and checking the counts
412 * against the refcount btree.
413 */
414 process_rmap_data(mp);
415
416 print_final_rpt();
417
418 /*
419 * free up memory used to track trealtime duplicate extents
420 */
421 if (rt_start != 0)
422 free_rt_dup_extent_tree(mp);
423
424 /*
425 * ensure consistency of quota inode pointers in superblock,
426 * make sure they point to real inodes
427 */
428 quotino_check(mp);
429 quota_sb_check(mp);
430 }