1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
15 #include "err_protos.h"
27 * null out quota inode fields in sb if they point to non-existent inodes.
28 * this isn't as redundant as it looks since it's possible that the sb field
29 * might be set but the imap and inode(s) agree that the inode is
30 * free in which case they'd never be cleared so the fields wouldn't
31 * be cleared by process_dinode().
34 quotino_check(xfs_mount_t
*mp
)
36 ino_tree_node_t
*irec
;
38 if (mp
->m_sb
.sb_uquotino
!= NULLFSINO
&& mp
->m_sb
.sb_uquotino
!= 0) {
39 if (verify_inum(mp
, mp
->m_sb
.sb_uquotino
))
42 irec
= find_inode_rec(mp
,
43 XFS_INO_TO_AGNO(mp
, mp
->m_sb
.sb_uquotino
),
44 XFS_INO_TO_AGINO(mp
, mp
->m_sb
.sb_uquotino
));
46 if (irec
== NULL
|| is_inode_free(irec
,
47 mp
->m_sb
.sb_uquotino
- irec
->ino_startnum
)) {
48 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
54 if (mp
->m_sb
.sb_gquotino
!= NULLFSINO
&& mp
->m_sb
.sb_gquotino
!= 0) {
55 if (verify_inum(mp
, mp
->m_sb
.sb_gquotino
))
58 irec
= find_inode_rec(mp
,
59 XFS_INO_TO_AGNO(mp
, mp
->m_sb
.sb_gquotino
),
60 XFS_INO_TO_AGINO(mp
, mp
->m_sb
.sb_gquotino
));
62 if (irec
== NULL
|| is_inode_free(irec
,
63 mp
->m_sb
.sb_gquotino
- irec
->ino_startnum
)) {
64 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
70 if (mp
->m_sb
.sb_pquotino
!= NULLFSINO
&& mp
->m_sb
.sb_pquotino
!= 0) {
71 if (verify_inum(mp
, mp
->m_sb
.sb_pquotino
))
74 irec
= find_inode_rec(mp
,
75 XFS_INO_TO_AGNO(mp
, mp
->m_sb
.sb_pquotino
),
76 XFS_INO_TO_AGINO(mp
, mp
->m_sb
.sb_pquotino
));
78 if (irec
== NULL
|| is_inode_free(irec
,
79 mp
->m_sb
.sb_pquotino
- irec
->ino_startnum
)) {
80 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
88 quota_sb_check(xfs_mount_t
*mp
)
91 * if the sb says we have quotas and we lost both,
92 * signal a superblock downgrade. that will cause
93 * the quota flags to get zeroed. (if we only lost
94 * one quota inode, do nothing and complain later.)
96 * if the sb says we have quotas but we didn't start out
97 * with any quota inodes, signal a superblock downgrade.
99 * The sb downgrades are so that older systems can mount
102 * if the sb says we don't have quotas but it looks like
103 * we do have quota inodes, then signal a superblock upgrade.
105 * if the sb says we don't have quotas and we have no
106 * quota inodes, then leave will enough alone.
110 (mp
->m_sb
.sb_uquotino
== NULLFSINO
|| mp
->m_sb
.sb_uquotino
== 0) &&
111 (mp
->m_sb
.sb_gquotino
== NULLFSINO
|| mp
->m_sb
.sb_gquotino
== 0) &&
112 (mp
->m_sb
.sb_pquotino
== NULLFSINO
|| mp
->m_sb
.sb_pquotino
== 0)) {
115 } else if (!verify_inum(mp
, mp
->m_sb
.sb_uquotino
) &&
116 !verify_inum(mp
, mp
->m_sb
.sb_gquotino
) &&
117 !verify_inum(mp
, mp
->m_sb
.sb_pquotino
)) {
125 struct workqueue
*wq
,
129 wait_for_inode_prefetch(arg
);
130 do_log(_(" - agno = %d\n"), agno
);
131 process_aginodes(wq
->wq_ctx
, arg
, agno
, 0, 1, 0);
133 cleanup_inode_prefetch(arg
);
136 * now recycle the per-AG duplicate extent records
138 release_dup_extent_tree(agno
);
148 do_inode_prefetch(mp
, ag_stride
, process_ag_func
, true, false);
149 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
150 error
= rmap_finish_collecting_fork_recs(mp
, i
);
153 _("unable to finish adding attr/data fork reverse-mapping data for AG %u.\n"),
166 error
= rmap_add_fixed_ag_rec(wq
->wq_ctx
, agno
);
169 _("unable to add AG %u metadata reverse-mapping data.\n"), agno
);
171 error
= rmap_fold_raw_recs(wq
->wq_ctx
, agno
);
174 _("unable to merge AG %u metadata reverse-mapping data.\n"), agno
);
176 error
= rmaps_verify_btree(wq
->wq_ctx
, agno
);
179 _("%s while checking reverse-mappings"),
184 compute_ag_refcounts(
191 error
= compute_refcounts(wq
->wq_ctx
, agno
);
194 _("%s while computing reference count records.\n"),
199 process_inode_reflink_flags(
200 struct workqueue
*wq
,
206 error
= fix_inode_reflink_flags(wq
->wq_ctx
, agno
);
209 _("%s while fixing inode reflink flags.\n"),
214 check_refcount_btrees(
221 error
= check_refcounts(wq
->wq_ctx
, agno
);
224 _("%s while checking reference counts"),
230 struct xfs_mount
*mp
)
235 if (!rmap_needs_work(mp
))
238 create_work_queue(&wq
, mp
, platform_nproc());
239 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++)
240 queue_work(&wq
, check_rmap_btrees
, i
, NULL
);
241 destroy_work_queue(&wq
);
243 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
246 create_work_queue(&wq
, mp
, platform_nproc());
247 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++)
248 queue_work(&wq
, compute_ag_refcounts
, i
, NULL
);
249 destroy_work_queue(&wq
);
251 create_work_queue(&wq
, mp
, platform_nproc());
252 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
253 queue_work(&wq
, process_inode_reflink_flags
, i
, NULL
);
254 queue_work(&wq
, check_refcount_btrees
, i
, NULL
);
256 destroy_work_queue(&wq
);
260 phase4(xfs_mount_t
*mp
)
262 ino_tree_node_t
*irec
;
264 xfs_rtblock_t rt_start
;
268 xfs_agblock_t ag_end
;
270 int ag_hdr_len
= 4 * mp
->m_sb
.sb_sectsize
;
274 if (rmap_needs_work(mp
))
275 collect_rmaps
= true;
276 ag_hdr_block
= howmany(ag_hdr_len
, mp
->m_sb
.sb_blocksize
);
278 do_log(_("Phase 4 - check for duplicate blocks...\n"));
279 do_log(_(" - setting up duplicate extent list...\n"));
281 set_progress_msg(PROG_FMT_DUP_EXTENT
, (uint64_t) glob_agcount
);
283 irec
= find_inode_rec(mp
, XFS_INO_TO_AGNO(mp
, mp
->m_sb
.sb_rootino
),
284 XFS_INO_TO_AGINO(mp
, mp
->m_sb
.sb_rootino
));
287 * we always have a root inode, even if it's free...
288 * if the root is free, forget it, lost+found is already gone
290 if (is_inode_free(irec
, 0) || !inode_isadir(irec
, 0)) {
293 do_warn(_("root inode would be lost\n"));
295 do_warn(_("root inode lost\n"));
298 for (i
= 0; i
< mp
->m_sb
.sb_agcount
; i
++) {
299 ag_end
= (i
< mp
->m_sb
.sb_agcount
- 1) ? mp
->m_sb
.sb_agblocks
:
300 mp
->m_sb
.sb_dblocks
-
301 (xfs_rfsblock_t
) mp
->m_sb
.sb_agblocks
* i
;
304 * set up duplicate extent list for this ag
306 for (j
= ag_hdr_block
; j
< ag_end
; j
+= blen
) {
307 bstate
= get_bmap_ext(i
, j
, ag_end
, &blen
);
312 _("unknown block state, ag %d, block %d\n"),
314 /* fall through .. */
324 add_dup_extent(i
, j
, blen
);
329 PROG_RPT_INC(prog_rpt_done
[i
], 1);
334 * initialize realtime bitmap
339 for (bno
= 0; bno
< mp
->m_sb
.sb_rextents
; bno
++) {
340 bstate
= get_rtbmap(bno
);
345 _("unknown rt extent state, extent %" PRIu64
"\n"),
347 /* fall through .. */
359 * add extent and reset extent state
361 add_rt_dup_extent(rt_start
, rt_len
);
370 } else if (rt_len
== MAXEXTLEN
) {
374 add_rt_dup_extent(rt_start
, rt_len
);
384 * catch tail-case, extent hitting the end of the ag
387 add_rt_dup_extent(rt_start
, rt_len
);
390 * initialize bitmaps for all AGs
394 do_log(_(" - check for inodes claiming duplicate blocks...\n"));
395 set_progress_msg(PROG_FMT_DUP_BLOCKS
, (uint64_t) mp
->m_sb
.sb_icount
);
398 * ok, now process the inodes -- signal 2-pass check per inode.
399 * first pass checks if the inode conflicts with a known
400 * duplicate extent. if so, the inode is cleared and second
401 * pass is skipped. second pass sets the block bitmap
402 * for all blocks claimed by the inode. directory
403 * and attribute processing is turned OFF since we did that
404 * already in phase 3.
409 * Process all the reverse-mapping data that we collected. This
410 * involves checking the rmap data against the btree, computing
411 * reference counts based on the rmap data, and checking the counts
412 * against the refcount btree.
414 process_rmap_data(mp
);
419 * free up memory used to track trealtime duplicate extents
422 free_rt_dup_extent_tree(mp
);
425 * ensure consistency of quota inode pointers in superblock,
426 * make sure they point to real inodes