]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/inodes.c
1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
10 #include <sys/statvfs.h>
11 #include "platform_defs.h"
13 #include "xfs_format.h"
15 #include "libfrog/paths.h"
16 #include "libfrog/workqueue.h"
17 #include "xfs_scrub.h"
20 #include "libfrog/fsgeom.h"
21 #include "libfrog/bulkstat.h"
24 * Iterate a range of inodes.
26 * This is a little more involved than repeatedly asking BULKSTAT for a
27 * buffer's worth of stat data for some number of inodes. We want to scan as
28 * many of the inodes that the inobt thinks there are, including the ones that
29 * are broken, but if we ask for n inodes starting at x, it'll skip the bad
30 * ones and fill from beyond the range (x + n).
32 * Therefore, we ask INUMBERS to return one inobt chunk's worth of inode
33 * bitmap information. Then we try to BULKSTAT only the inodes that were
34 * present in that chunk, and compare what we got against what INUMBERS said
35 * was there. If there's a mismatch, we know that we have an inode that fails
36 * the verifiers but we can inject the bulkstat information to force the scrub
37 * code to deal with the broken inodes.
39 * If the iteration function returns ESTALE, that means that the inode has
40 * been deleted and possibly recreated since the BULKSTAT call. We wil
41 * refresh the stat information and try again up to 30 times before reporting
42 * the staleness as an error.
46 * Did we get exactly the inodes we expected? If not, load them one at a
47 * time (or fake it) into the bulkstat data.
50 xfs_iterate_inodes_range_check(
51 struct scrub_ctx
*ctx
,
52 struct xfs_inumbers
*inumbers
,
53 struct xfs_bulkstat
*bstat
)
55 struct xfs_bulkstat
*bs
;
59 for (i
= 0, bs
= bstat
; i
< XFS_INODES_PER_CHUNK
; i
++) {
60 if (!(inumbers
->xi_allocmask
& (1ULL << i
)))
62 if (bs
->bs_ino
== inumbers
->xi_startino
+ i
) {
67 /* Load the one inode. */
68 error
= xfrog_bulkstat_single(&ctx
->mnt
,
69 inumbers
->xi_startino
+ i
, 0, bs
);
70 if (error
|| bs
->bs_ino
!= inumbers
->xi_startino
+ i
) {
71 memset(bs
, 0, sizeof(struct xfs_bulkstat
));
72 bs
->bs_ino
= inumbers
->xi_startino
+ i
;
73 bs
->bs_blksize
= ctx
->mnt_sv
.f_frsize
;
80 * Call into the filesystem for inode/bulkstat information and call our
81 * iterator function. We'll try to fill the bulkstat information in batches,
82 * but we also can detect iget failures.
85 xfs_iterate_inodes_ag(
86 struct scrub_ctx
*ctx
,
93 struct xfs_handle handle
;
94 struct xfs_inumbers_req
*ireq
;
95 struct xfs_bulkstat_req
*breq
;
96 char idescr
[DESCR_BUFSZ
];
97 struct xfs_bulkstat
*bs
;
98 struct xfs_inumbers
*inumbers
;
104 memcpy(&handle
.ha_fsid
, fshandle
, sizeof(handle
.ha_fsid
));
105 handle
.ha_fid
.fid_len
= sizeof(xfs_fid_t
) -
106 sizeof(handle
.ha_fid
.fid_len
);
107 handle
.ha_fid
.fid_pad
= 0;
109 breq
= xfrog_bulkstat_alloc_req(XFS_INODES_PER_CHUNK
, 0);
111 str_info(ctx
, descr
, _("Insufficient memory; giving up."));
115 ireq
= xfrog_inumbers_alloc_req(1, 0);
117 str_info(ctx
, descr
, _("Insufficient memory; giving up."));
121 inumbers
= &ireq
->inumbers
[0];
122 xfrog_inumbers_set_ag(ireq
, agno
);
124 /* Find the inode chunk & alloc mask */
125 error
= xfrog_inumbers(&ctx
->mnt
, ireq
);
126 while (!error
&& ireq
->hdr
.ocount
> 0) {
128 * We can have totally empty inode chunks on filesystems where
129 * there are more than 64 inodes per block. Skip these.
131 if (inumbers
->xi_alloccount
== 0)
134 breq
->hdr
.ino
= inumbers
->xi_startino
;
135 breq
->hdr
.icount
= inumbers
->xi_alloccount
;
136 error
= xfrog_bulkstat(&ctx
->mnt
, breq
);
138 char errbuf
[DESCR_BUFSZ
];
140 str_info(ctx
, descr
, "%s", strerror_r(error
,
141 errbuf
, DESCR_BUFSZ
));
144 xfs_iterate_inodes_range_check(ctx
, inumbers
, breq
->bulkstat
);
146 /* Iterate all the inodes. */
147 for (i
= 0, bs
= breq
->bulkstat
;
148 i
< inumbers
->xi_alloccount
;
150 handle
.ha_fid
.fid_ino
= bs
->bs_ino
;
151 handle
.ha_fid
.fid_gen
= bs
->bs_gen
;
152 error
= fn(ctx
, &handle
, bs
, arg
);
158 if (stale_count
< 30) {
159 ireq
->hdr
.ino
= inumbers
->xi_startino
;
162 snprintf(idescr
, DESCR_BUFSZ
, "inode %"PRIu64
,
163 (uint64_t)bs
->bs_ino
);
164 str_info(ctx
, idescr
,
165 _("Changed too many times during scan; giving up."));
167 case XFS_ITERATE_INODES_ABORT
:
175 if (xfs_scrub_excessive_errors(ctx
)) {
183 error
= xfrog_inumbers(&ctx
->mnt
, ireq
);
188 str_liberror(ctx
, error
, descr
);
197 /* BULKSTAT wrapper routines. */
198 struct xfs_scan_inodes
{
199 xfs_inode_iter_fn fn
;
204 /* Scan all the inodes in an AG. */
207 struct workqueue
*wq
,
211 struct xfs_scan_inodes
*si
= arg
;
212 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
213 char descr
[DESCR_BUFSZ
];
216 snprintf(descr
, DESCR_BUFSZ
, _("dev %d:%d AG %u inodes"),
217 major(ctx
->fsinfo
.fs_datadev
),
218 minor(ctx
->fsinfo
.fs_datadev
),
221 moveon
= xfs_iterate_inodes_ag(ctx
, descr
, ctx
->fshandle
, agno
,
227 /* Scan all the inodes in a filesystem. */
230 struct scrub_ctx
*ctx
,
231 xfs_inode_iter_fn fn
,
234 struct xfs_scan_inodes si
;
243 ret
= workqueue_create(&wq
, (struct xfs_mount
*)ctx
,
244 scrub_nproc_workqueue(ctx
));
246 str_liberror(ctx
, ret
, _("creating bulkstat workqueue"));
250 for (agno
= 0; agno
< ctx
->mnt
.fsgeom
.agcount
; agno
++) {
251 ret
= workqueue_add(&wq
, xfs_scan_ag_inodes
, agno
, &si
);
254 str_liberror(ctx
, ret
, _("queueing bulkstat work"));
259 workqueue_destroy(&wq
);
265 * Open a file by handle, or return a negative error code.
269 struct xfs_handle
*handle
)
271 return open_by_fshandle(handle
, sizeof(*handle
),
272 O_RDONLY
| O_NOATIME
| O_NOFOLLOW
| O_NOCTTY
);