]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - scrub/inodes.c
xfs_scrub: remove moveon from the fscounters functions
[thirdparty/xfsprogs-dev.git] / scrub / inodes.c
CommitLineData
959ef981 1// SPDX-License-Identifier: GPL-2.0+
372d4ba9
DW
2/*
3 * Copyright (C) 2018 Oracle. All Rights Reserved.
372d4ba9 4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
372d4ba9 5 */
a440f877 6#include "xfs.h"
372d4ba9
DW
7#include <stdint.h>
8#include <stdlib.h>
9#include <pthread.h>
10#include <sys/statvfs.h>
11#include "platform_defs.h"
372d4ba9
DW
12#include "xfs_arch.h"
13#include "xfs_format.h"
14#include "handle.h"
42b4c8e8 15#include "libfrog/paths.h"
56598728 16#include "libfrog/workqueue.h"
372d4ba9
DW
17#include "xfs_scrub.h"
18#include "common.h"
19#include "inodes.h"
fee68490 20#include "libfrog/fsgeom.h"
f31b5e12 21#include "libfrog/bulkstat.h"
372d4ba9
DW
22
23/*
24 * Iterate a range of inodes.
25 *
26 * This is a little more involved than repeatedly asking BULKSTAT for a
27 * buffer's worth of stat data for some number of inodes. We want to scan as
28 * many of the inodes that the inobt thinks there are, including the ones that
29 * are broken, but if we ask for n inodes starting at x, it'll skip the bad
30 * ones and fill from beyond the range (x + n).
31 *
32 * Therefore, we ask INUMBERS to return one inobt chunk's worth of inode
33 * bitmap information. Then we try to BULKSTAT only the inodes that were
34 * present in that chunk, and compare what we got against what INUMBERS said
35 * was there. If there's a mismatch, we know that we have an inode that fails
36 * the verifiers but we can inject the bulkstat information to force the scrub
37 * code to deal with the broken inodes.
38 *
39 * If the iteration function returns ESTALE, that means that the inode has
40 * been deleted and possibly recreated since the BULKSTAT call. We wil
41 * refresh the stat information and try again up to 30 times before reporting
42 * the staleness as an error.
43 */
44
45/*
e3724c8b
DW
46 * Run bulkstat on an entire inode allocation group, then check that we got
47 * exactly the inodes we expected. If not, load them one at a time (or fake
48 * it) into the bulkstat data.
372d4ba9
DW
49 */
50static void
e3724c8b 51bulkstat_for_inumbers(
372d4ba9 52 struct scrub_ctx *ctx,
e3724c8b
DW
53 const char *descr,
54 const struct xfs_inumbers *inumbers,
55 struct xfs_bulkstat_req *breq)
372d4ba9 56{
e3724c8b 57 struct xfs_bulkstat *bstat = breq->bulkstat;
4cca629d 58 struct xfs_bulkstat *bs;
372d4ba9
DW
59 int i;
60 int error;
61
e3724c8b
DW
62 /* First we try regular bulkstat, for speed. */
63 breq->hdr.ino = inumbers->xi_startino;
64 breq->hdr.icount = inumbers->xi_alloccount;
65 error = xfrog_bulkstat(&ctx->mnt, breq);
66 if (error) {
67 char errbuf[DESCR_BUFSZ];
68
69 str_info(ctx, descr, "%s",
70 strerror_r(error, errbuf, DESCR_BUFSZ));
71 }
72
73 /*
74 * Check each of the stats we got back to make sure we got the inodes
75 * we asked for.
76 */
372d4ba9 77 for (i = 0, bs = bstat; i < XFS_INODES_PER_CHUNK; i++) {
b94a69ac 78 if (!(inumbers->xi_allocmask & (1ULL << i)))
372d4ba9 79 continue;
b94a69ac 80 if (bs->bs_ino == inumbers->xi_startino + i) {
372d4ba9
DW
81 bs++;
82 continue;
83 }
84
85 /* Load the one inode. */
f31b5e12 86 error = xfrog_bulkstat_single(&ctx->mnt,
b94a69ac
DW
87 inumbers->xi_startino + i, 0, bs);
88 if (error || bs->bs_ino != inumbers->xi_startino + i) {
4cca629d 89 memset(bs, 0, sizeof(struct xfs_bulkstat));
b94a69ac 90 bs->bs_ino = inumbers->xi_startino + i;
372d4ba9
DW
91 bs->bs_blksize = ctx->mnt_sv.f_frsize;
92 }
93 bs++;
94 }
95}
96
97/*
98 * Call into the filesystem for inode/bulkstat information and call our
99 * iterator function. We'll try to fill the bulkstat information in batches,
100 * but we also can detect iget failures.
101 */
102static bool
23ea9841 103xfs_iterate_inodes_ag(
372d4ba9
DW
104 struct scrub_ctx *ctx,
105 const char *descr,
106 void *fshandle,
23ea9841 107 uint32_t agno,
372d4ba9
DW
108 xfs_inode_iter_fn fn,
109 void *arg)
110{
372d4ba9 111 struct xfs_handle handle;
b94a69ac 112 struct xfs_inumbers_req *ireq;
4cca629d 113 struct xfs_bulkstat_req *breq;
372d4ba9 114 char idescr[DESCR_BUFSZ];
4cca629d 115 struct xfs_bulkstat *bs;
b94a69ac 116 struct xfs_inumbers *inumbers;
372d4ba9
DW
117 bool moveon = true;
118 int i;
119 int error;
120 int stale_count = 0;
121
372d4ba9
DW
122 memcpy(&handle.ha_fsid, fshandle, sizeof(handle.ha_fsid));
123 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
124 sizeof(handle.ha_fid.fid_len);
125 handle.ha_fid.fid_pad = 0;
126
4cca629d
DW
127 breq = xfrog_bulkstat_alloc_req(XFS_INODES_PER_CHUNK, 0);
128 if (!breq) {
e98616ba 129 str_liberror(ctx, ENOMEM, _("allocating bulkstat request"));
4cca629d
DW
130 return false;
131 }
132
23ea9841 133 ireq = xfrog_inumbers_alloc_req(1, 0);
b94a69ac 134 if (!ireq) {
e98616ba 135 str_liberror(ctx, ENOMEM, _("allocating inumbers request"));
b94a69ac
DW
136 free(breq);
137 return false;
138 }
139 inumbers = &ireq->inumbers[0];
23ea9841 140 xfrog_inumbers_set_ag(ireq, agno);
b94a69ac 141
372d4ba9 142 /* Find the inode chunk & alloc mask */
b94a69ac
DW
143 error = xfrog_inumbers(&ctx->mnt, ireq);
144 while (!error && ireq->hdr.ocount > 0) {
300661d3
DW
145 /*
146 * We can have totally empty inode chunks on filesystems where
147 * there are more than 64 inodes per block. Skip these.
148 */
b94a69ac 149 if (inumbers->xi_alloccount == 0)
300661d3 150 goto igrp_retry;
4cca629d 151
e3724c8b 152 bulkstat_for_inumbers(ctx, descr, inumbers, breq);
372d4ba9
DW
153
154 /* Iterate all the inodes. */
4cca629d 155 for (i = 0, bs = breq->bulkstat;
b94a69ac 156 i < inumbers->xi_alloccount;
4cca629d 157 i++, bs++) {
372d4ba9
DW
158 handle.ha_fid.fid_ino = bs->bs_ino;
159 handle.ha_fid.fid_gen = bs->bs_gen;
160 error = fn(ctx, &handle, bs, arg);
161 switch (error) {
162 case 0:
163 break;
164 case ESTALE:
165 stale_count++;
166 if (stale_count < 30) {
b94a69ac 167 ireq->hdr.ino = inumbers->xi_startino;
372d4ba9
DW
168 goto igrp_retry;
169 }
15589f0a
DW
170 scrub_render_ino_descr(ctx, idescr, DESCR_BUFSZ,
171 bs->bs_ino, bs->bs_gen, NULL);
bb5dbd06
DW
172 str_info(ctx, idescr,
173_("Changed too many times during scan; giving up."));
372d4ba9
DW
174 break;
175 case XFS_ITERATE_INODES_ABORT:
176 error = 0;
177 /* fall thru */
178 default:
179 moveon = false;
180 errno = error;
181 goto err;
182 }
183 if (xfs_scrub_excessive_errors(ctx)) {
184 moveon = false;
185 goto out;
186 }
187 }
188
189 stale_count = 0;
190igrp_retry:
b94a69ac 191 error = xfrog_inumbers(&ctx->mnt, ireq);
372d4ba9
DW
192 }
193
194err:
195 if (error) {
621f3374 196 str_liberror(ctx, error, descr);
372d4ba9
DW
197 moveon = false;
198 }
199out:
b94a69ac 200 free(ireq);
4cca629d 201 free(breq);
372d4ba9
DW
202 return moveon;
203}
204
205/* BULKSTAT wrapper routines. */
206struct xfs_scan_inodes {
207 xfs_inode_iter_fn fn;
208 void *arg;
209 bool moveon;
210};
211
212/* Scan all the inodes in an AG. */
213static void
214xfs_scan_ag_inodes(
215 struct workqueue *wq,
216 xfs_agnumber_t agno,
217 void *arg)
218{
219 struct xfs_scan_inodes *si = arg;
220 struct scrub_ctx *ctx = (struct scrub_ctx *)wq->wq_ctx;
221 char descr[DESCR_BUFSZ];
372d4ba9
DW
222 bool moveon;
223
224 snprintf(descr, DESCR_BUFSZ, _("dev %d:%d AG %u inodes"),
225 major(ctx->fsinfo.fs_datadev),
226 minor(ctx->fsinfo.fs_datadev),
227 agno);
228
23ea9841
DW
229 moveon = xfs_iterate_inodes_ag(ctx, descr, ctx->fshandle, agno,
230 si->fn, si->arg);
372d4ba9
DW
231 if (!moveon)
232 si->moveon = false;
233}
234
235/* Scan all the inodes in a filesystem. */
236bool
237xfs_scan_all_inodes(
238 struct scrub_ctx *ctx,
239 xfs_inode_iter_fn fn,
240 void *arg)
241{
242 struct xfs_scan_inodes si;
243 xfs_agnumber_t agno;
244 struct workqueue wq;
245 int ret;
246
247 si.moveon = true;
248 si.fn = fn;
249 si.arg = arg;
250
251 ret = workqueue_create(&wq, (struct xfs_mount *)ctx,
252 scrub_nproc_workqueue(ctx));
253 if (ret) {
9d57cbfc 254 str_liberror(ctx, ret, _("creating bulkstat workqueue"));
372d4ba9
DW
255 return false;
256 }
257
3f9efb2e 258 for (agno = 0; agno < ctx->mnt.fsgeom.agcount; agno++) {
372d4ba9
DW
259 ret = workqueue_add(&wq, xfs_scan_ag_inodes, agno, &si);
260 if (ret) {
261 si.moveon = false;
9d57cbfc 262 str_liberror(ctx, ret, _("queueing bulkstat work"));
372d4ba9
DW
263 break;
264 }
265 }
266
71296cf8
DW
267 ret = workqueue_terminate(&wq);
268 if (ret) {
269 si.moveon = false;
270 str_liberror(ctx, ret, _("finishing bulkstat work"));
271 }
372d4ba9
DW
272 workqueue_destroy(&wq);
273
274 return si.moveon;
275}
276
277/*
278 * Open a file by handle, or return a negative error code.
279 */
280int
281xfs_open_handle(
282 struct xfs_handle *handle)
283{
284 return open_by_fshandle(handle, sizeof(*handle),
285 O_RDONLY | O_NOATIME | O_NOFOLLOW | O_NOCTTY);
286}