]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/inodes.c
1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
10 #include <sys/statvfs.h>
11 #include "platform_defs.h"
13 #include "xfs_format.h"
16 #include "workqueue.h"
17 #include "xfs_scrub.h"
20 #include "libfrog/fsgeom.h"
21 #include "libfrog/bulkstat.h"
24 * Iterate a range of inodes.
26 * This is a little more involved than repeatedly asking BULKSTAT for a
27 * buffer's worth of stat data for some number of inodes. We want to scan as
28 * many of the inodes that the inobt thinks there are, including the ones that
29 * are broken, but if we ask for n inodes starting at x, it'll skip the bad
30 * ones and fill from beyond the range (x + n).
32 * Therefore, we ask INUMBERS to return one inobt chunk's worth of inode
33 * bitmap information. Then we try to BULKSTAT only the inodes that were
34 * present in that chunk, and compare what we got against what INUMBERS said
35 * was there. If there's a mismatch, we know that we have an inode that fails
36 * the verifiers but we can inject the bulkstat information to force the scrub
37 * code to deal with the broken inodes.
39 * If the iteration function returns ESTALE, that means that the inode has
40 * been deleted and possibly recreated since the BULKSTAT call. We wil
41 * refresh the stat information and try again up to 30 times before reporting
42 * the staleness as an error.
46 * Did we get exactly the inodes we expected? If not, load them one at a
47 * time (or fake it) into the bulkstat data.
50 xfs_iterate_inodes_range_check(
51 struct scrub_ctx
*ctx
,
52 struct xfs_inogrp
*inogrp
,
53 struct xfs_bstat
*bstat
)
59 for (i
= 0, bs
= bstat
; i
< XFS_INODES_PER_CHUNK
; i
++) {
60 if (!(inogrp
->xi_allocmask
& (1ULL << i
)))
62 if (bs
->bs_ino
== inogrp
->xi_startino
+ i
) {
67 /* Load the one inode. */
68 error
= xfrog_bulkstat_single(&ctx
->mnt
,
69 inogrp
->xi_startino
+ i
, bs
);
70 if (error
|| bs
->bs_ino
!= inogrp
->xi_startino
+ i
) {
71 memset(bs
, 0, sizeof(struct xfs_bstat
));
72 bs
->bs_ino
= inogrp
->xi_startino
+ i
;
73 bs
->bs_blksize
= ctx
->mnt_sv
.f_frsize
;
80 * Call into the filesystem for inode/bulkstat information and call our
81 * iterator function. We'll try to fill the bulkstat information in batches,
82 * but we also can detect iget failures.
85 xfs_iterate_inodes_range(
86 struct scrub_ctx
*ctx
,
94 struct xfs_handle handle
;
95 struct xfs_inogrp inogrp
;
96 struct xfs_bstat bstat
[XFS_INODES_PER_CHUNK
];
97 char idescr
[DESCR_BUFSZ
];
101 uint32_t bulklen
= 0;
102 uint32_t igrplen
= 0;
109 memset(bstat
, 0, XFS_INODES_PER_CHUNK
* sizeof(struct xfs_bstat
));
111 memcpy(&handle
.ha_fsid
, fshandle
, sizeof(handle
.ha_fsid
));
112 handle
.ha_fid
.fid_len
= sizeof(xfs_fid_t
) -
113 sizeof(handle
.ha_fid
.fid_len
);
114 handle
.ha_fid
.fid_pad
= 0;
116 /* Find the inode chunk & alloc mask */
117 igrp_ino
= first_ino
;
118 error
= xfrog_inumbers(&ctx
->mnt
, &igrp_ino
, 1, &inogrp
, &igrplen
);
119 while (!error
&& igrplen
) {
120 /* Load the inodes. */
121 ino
= inogrp
.xi_startino
- 1;
124 * We can have totally empty inode chunks on filesystems where
125 * there are more than 64 inodes per block. Skip these.
127 if (inogrp
.xi_alloccount
== 0)
129 error
= xfrog_bulkstat(&ctx
->mnt
, &ino
, inogrp
.xi_alloccount
,
132 char errbuf
[DESCR_BUFSZ
];
134 str_info(ctx
, descr
, "%s", strerror_r(error
,
135 errbuf
, DESCR_BUFSZ
));
138 xfs_iterate_inodes_range_check(ctx
, &inogrp
, bstat
);
140 /* Iterate all the inodes. */
141 for (i
= 0, bs
= bstat
; i
< inogrp
.xi_alloccount
; i
++, bs
++) {
142 if (bs
->bs_ino
> last_ino
)
145 handle
.ha_fid
.fid_ino
= bs
->bs_ino
;
146 handle
.ha_fid
.fid_gen
= bs
->bs_gen
;
147 error
= fn(ctx
, &handle
, bs
, arg
);
153 if (stale_count
< 30) {
154 igrp_ino
= inogrp
.xi_startino
;
157 snprintf(idescr
, DESCR_BUFSZ
, "inode %"PRIu64
,
158 (uint64_t)bs
->bs_ino
);
159 str_info(ctx
, idescr
,
160 _("Changed too many times during scan; giving up."));
162 case XFS_ITERATE_INODES_ABORT
:
170 if (xfs_scrub_excessive_errors(ctx
)) {
178 error
= xfrog_inumbers(&ctx
->mnt
, &igrp_ino
, 1, &inogrp
,
184 str_liberror(ctx
, error
, descr
);
191 /* BULKSTAT wrapper routines. */
192 struct xfs_scan_inodes
{
193 xfs_inode_iter_fn fn
;
198 /* Scan all the inodes in an AG. */
201 struct workqueue
*wq
,
205 struct xfs_scan_inodes
*si
= arg
;
206 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
207 char descr
[DESCR_BUFSZ
];
209 uint64_t next_ag_ino
;
212 snprintf(descr
, DESCR_BUFSZ
, _("dev %d:%d AG %u inodes"),
213 major(ctx
->fsinfo
.fs_datadev
),
214 minor(ctx
->fsinfo
.fs_datadev
),
217 ag_ino
= cvt_agino_to_ino(&ctx
->mnt
, agno
, 0);
218 next_ag_ino
= cvt_agino_to_ino(&ctx
->mnt
, agno
+ 1, 0);
220 moveon
= xfs_iterate_inodes_range(ctx
, descr
, ctx
->fshandle
, ag_ino
,
221 next_ag_ino
- 1, si
->fn
, si
->arg
);
226 /* Scan all the inodes in a filesystem. */
229 struct scrub_ctx
*ctx
,
230 xfs_inode_iter_fn fn
,
233 struct xfs_scan_inodes si
;
242 ret
= workqueue_create(&wq
, (struct xfs_mount
*)ctx
,
243 scrub_nproc_workqueue(ctx
));
245 str_info(ctx
, ctx
->mntpoint
, _("Could not create workqueue."));
249 for (agno
= 0; agno
< ctx
->mnt
.fsgeom
.agcount
; agno
++) {
250 ret
= workqueue_add(&wq
, xfs_scan_ag_inodes
, agno
, &si
);
253 str_info(ctx
, ctx
->mntpoint
,
254 _("Could not queue AG %u bulkstat work."), agno
);
259 workqueue_destroy(&wq
);
265 * Open a file by handle, or return a negative error code.
269 struct xfs_handle
*handle
)
271 return open_by_fshandle(handle
, sizeof(*handle
),
272 O_RDONLY
| O_NOATIME
| O_NOFOLLOW
| O_NOCTTY
);