]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/inodes.c
1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
10 #include <sys/statvfs.h>
11 #include "platform_defs.h"
13 #include "xfs_format.h"
16 #include "workqueue.h"
17 #include "xfs_scrub.h"
21 #include "libfrog/bulkstat.h"
24 * Iterate a range of inodes.
26 * This is a little more involved than repeatedly asking BULKSTAT for a
27 * buffer's worth of stat data for some number of inodes. We want to scan as
28 * many of the inodes that the inobt thinks there are, including the ones that
29 * are broken, but if we ask for n inodes starting at x, it'll skip the bad
30 * ones and fill from beyond the range (x + n).
32 * Therefore, we ask INUMBERS to return one inobt chunk's worth of inode
33 * bitmap information. Then we try to BULKSTAT only the inodes that were
34 * present in that chunk, and compare what we got against what INUMBERS said
35 * was there. If there's a mismatch, we know that we have an inode that fails
36 * the verifiers but we can inject the bulkstat information to force the scrub
37 * code to deal with the broken inodes.
39 * If the iteration function returns ESTALE, that means that the inode has
40 * been deleted and possibly recreated since the BULKSTAT call. We wil
41 * refresh the stat information and try again up to 30 times before reporting
42 * the staleness as an error.
46 * Did we get exactly the inodes we expected? If not, load them one at a
47 * time (or fake it) into the bulkstat data.
50 xfs_iterate_inodes_range_check(
51 struct scrub_ctx
*ctx
,
52 struct xfs_inogrp
*inogrp
,
53 struct xfs_bstat
*bstat
)
59 for (i
= 0, bs
= bstat
; i
< XFS_INODES_PER_CHUNK
; i
++) {
60 if (!(inogrp
->xi_allocmask
& (1ULL << i
)))
62 if (bs
->bs_ino
== inogrp
->xi_startino
+ i
) {
67 /* Load the one inode. */
68 error
= xfrog_bulkstat_single(&ctx
->mnt
,
69 inogrp
->xi_startino
+ i
, bs
);
70 if (error
|| bs
->bs_ino
!= inogrp
->xi_startino
+ i
) {
71 memset(bs
, 0, sizeof(struct xfs_bstat
));
72 bs
->bs_ino
= inogrp
->xi_startino
+ i
;
73 bs
->bs_blksize
= ctx
->mnt_sv
.f_frsize
;
80 * Call into the filesystem for inode/bulkstat information and call our
81 * iterator function. We'll try to fill the bulkstat information in batches,
82 * but we also can detect iget failures.
85 xfs_iterate_inodes_range(
86 struct scrub_ctx
*ctx
,
94 struct xfs_fsop_bulkreq igrpreq
= {NULL
};
95 struct xfs_handle handle
;
96 struct xfs_inogrp inogrp
;
97 struct xfs_bstat bstat
[XFS_INODES_PER_CHUNK
];
98 char idescr
[DESCR_BUFSZ
];
102 uint32_t bulklen
= 0;
110 memset(bstat
, 0, XFS_INODES_PER_CHUNK
* sizeof(struct xfs_bstat
));
112 igrpreq
.lastip
= &igrp_ino
;
114 igrpreq
.ubuffer
= &inogrp
;
115 igrpreq
.ocount
= &igrplen
;
117 memcpy(&handle
.ha_fsid
, fshandle
, sizeof(handle
.ha_fsid
));
118 handle
.ha_fid
.fid_len
= sizeof(xfs_fid_t
) -
119 sizeof(handle
.ha_fid
.fid_len
);
120 handle
.ha_fid
.fid_pad
= 0;
122 /* Find the inode chunk & alloc mask */
123 igrp_ino
= first_ino
;
124 error
= ioctl(ctx
->mnt
.fd
, XFS_IOC_FSINUMBERS
, &igrpreq
);
125 while (!error
&& igrplen
) {
126 /* Load the inodes. */
127 ino
= inogrp
.xi_startino
- 1;
130 * We can have totally empty inode chunks on filesystems where
131 * there are more than 64 inodes per block. Skip these.
133 if (inogrp
.xi_alloccount
== 0)
135 error
= xfrog_bulkstat(&ctx
->mnt
, &ino
, inogrp
.xi_alloccount
,
138 char errbuf
[DESCR_BUFSZ
];
140 str_info(ctx
, descr
, "%s", strerror_r(error
,
141 errbuf
, DESCR_BUFSZ
));
144 xfs_iterate_inodes_range_check(ctx
, &inogrp
, bstat
);
146 /* Iterate all the inodes. */
147 for (i
= 0, bs
= bstat
; i
< inogrp
.xi_alloccount
; i
++, bs
++) {
148 if (bs
->bs_ino
> last_ino
)
151 handle
.ha_fid
.fid_ino
= bs
->bs_ino
;
152 handle
.ha_fid
.fid_gen
= bs
->bs_gen
;
153 error
= fn(ctx
, &handle
, bs
, arg
);
159 if (stale_count
< 30) {
160 igrp_ino
= inogrp
.xi_startino
;
163 snprintf(idescr
, DESCR_BUFSZ
, "inode %"PRIu64
,
164 (uint64_t)bs
->bs_ino
);
165 str_info(ctx
, idescr
,
166 _("Changed too many times during scan; giving up."));
168 case XFS_ITERATE_INODES_ABORT
:
176 if (xfs_scrub_excessive_errors(ctx
)) {
184 error
= ioctl(ctx
->mnt
.fd
, XFS_IOC_FSINUMBERS
, &igrpreq
);
189 str_errno(ctx
, descr
);
196 /* BULKSTAT wrapper routines. */
197 struct xfs_scan_inodes
{
198 xfs_inode_iter_fn fn
;
203 /* Scan all the inodes in an AG. */
206 struct workqueue
*wq
,
210 struct xfs_scan_inodes
*si
= arg
;
211 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
212 char descr
[DESCR_BUFSZ
];
214 uint64_t next_ag_ino
;
217 snprintf(descr
, DESCR_BUFSZ
, _("dev %d:%d AG %u inodes"),
218 major(ctx
->fsinfo
.fs_datadev
),
219 minor(ctx
->fsinfo
.fs_datadev
),
222 ag_ino
= cvt_agino_to_ino(&ctx
->mnt
, agno
, 0);
223 next_ag_ino
= cvt_agino_to_ino(&ctx
->mnt
, agno
+ 1, 0);
225 moveon
= xfs_iterate_inodes_range(ctx
, descr
, ctx
->fshandle
, ag_ino
,
226 next_ag_ino
- 1, si
->fn
, si
->arg
);
231 /* Scan all the inodes in a filesystem. */
234 struct scrub_ctx
*ctx
,
235 xfs_inode_iter_fn fn
,
238 struct xfs_scan_inodes si
;
247 ret
= workqueue_create(&wq
, (struct xfs_mount
*)ctx
,
248 scrub_nproc_workqueue(ctx
));
250 str_info(ctx
, ctx
->mntpoint
, _("Could not create workqueue."));
254 for (agno
= 0; agno
< ctx
->mnt
.fsgeom
.agcount
; agno
++) {
255 ret
= workqueue_add(&wq
, xfs_scan_ag_inodes
, agno
, &si
);
258 str_info(ctx
, ctx
->mntpoint
,
259 _("Could not queue AG %u bulkstat work."), agno
);
264 workqueue_destroy(&wq
);
270 * Open a file by handle, or return a negative error code.
274 struct xfs_handle
*handle
)
276 return open_by_fshandle(handle
, sizeof(*handle
),
277 O_RDONLY
| O_NOATIME
| O_NOFOLLOW
| O_NOCTTY
);