]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - scrub/inodes.c
xfs_scrub: fix #include ordering to avoid build failure
[thirdparty/xfsprogs-dev.git] / scrub / inodes.c
1 /*
2 * Copyright (C) 2018 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include "xfs.h"
21 #include <stdint.h>
22 #include <stdlib.h>
23 #include <pthread.h>
24 #include <sys/statvfs.h>
25 #include "platform_defs.h"
26 #include "xfs_arch.h"
27 #include "xfs_format.h"
28 #include "handle.h"
29 #include "path.h"
30 #include "workqueue.h"
31 #include "xfs_scrub.h"
32 #include "common.h"
33 #include "inodes.h"
34
35 /*
36 * Iterate a range of inodes.
37 *
38 * This is a little more involved than repeatedly asking BULKSTAT for a
39 * buffer's worth of stat data for some number of inodes. We want to scan as
40 * many of the inodes that the inobt thinks there are, including the ones that
41 * are broken, but if we ask for n inodes starting at x, it'll skip the bad
42 * ones and fill from beyond the range (x + n).
43 *
44 * Therefore, we ask INUMBERS to return one inobt chunk's worth of inode
45 * bitmap information. Then we try to BULKSTAT only the inodes that were
46 * present in that chunk, and compare what we got against what INUMBERS said
47 * was there. If there's a mismatch, we know that we have an inode that fails
48 * the verifiers but we can inject the bulkstat information to force the scrub
49 * code to deal with the broken inodes.
50 *
51 * If the iteration function returns ESTALE, that means that the inode has
52 * been deleted and possibly recreated since the BULKSTAT call. We wil
53 * refresh the stat information and try again up to 30 times before reporting
54 * the staleness as an error.
55 */
56
57 /*
58 * Did we get exactly the inodes we expected? If not, load them one at a
59 * time (or fake it) into the bulkstat data.
60 */
61 static void
62 xfs_iterate_inodes_range_check(
63 struct scrub_ctx *ctx,
64 struct xfs_inogrp *inogrp,
65 struct xfs_bstat *bstat)
66 {
67 struct xfs_fsop_bulkreq onereq = {0};
68 struct xfs_bstat *bs;
69 __u64 oneino;
70 __s32 onelen = 0;
71 int i;
72 int error;
73
74 onereq.lastip = &oneino;
75 onereq.icount = 1;
76 onereq.ocount = &onelen;
77
78 for (i = 0, bs = bstat; i < XFS_INODES_PER_CHUNK; i++) {
79 if (!(inogrp->xi_allocmask & (1ULL << i)))
80 continue;
81 if (bs->bs_ino == inogrp->xi_startino + i) {
82 bs++;
83 continue;
84 }
85
86 /* Load the one inode. */
87 oneino = inogrp->xi_startino + i;
88 onereq.ubuffer = bs;
89 error = ioctl(ctx->mnt_fd, XFS_IOC_FSBULKSTAT_SINGLE,
90 &onereq);
91 if (error || bs->bs_ino != inogrp->xi_startino + i) {
92 memset(bs, 0, sizeof(struct xfs_bstat));
93 bs->bs_ino = inogrp->xi_startino + i;
94 bs->bs_blksize = ctx->mnt_sv.f_frsize;
95 }
96 bs++;
97 }
98 }
99
100 /*
101 * Call into the filesystem for inode/bulkstat information and call our
102 * iterator function. We'll try to fill the bulkstat information in batches,
103 * but we also can detect iget failures.
104 */
105 static bool
106 xfs_iterate_inodes_range(
107 struct scrub_ctx *ctx,
108 const char *descr,
109 void *fshandle,
110 uint64_t first_ino,
111 uint64_t last_ino,
112 xfs_inode_iter_fn fn,
113 void *arg)
114 {
115 struct xfs_fsop_bulkreq igrpreq = {0};
116 struct xfs_fsop_bulkreq bulkreq = {0};
117 struct xfs_handle handle;
118 struct xfs_inogrp inogrp;
119 struct xfs_bstat bstat[XFS_INODES_PER_CHUNK];
120 char idescr[DESCR_BUFSZ];
121 char buf[DESCR_BUFSZ];
122 struct xfs_bstat *bs;
123 __u64 igrp_ino;
124 __u64 ino;
125 __s32 bulklen = 0;
126 __s32 igrplen = 0;
127 bool moveon = true;
128 int i;
129 int error;
130 int stale_count = 0;
131
132
133 memset(bstat, 0, XFS_INODES_PER_CHUNK * sizeof(struct xfs_bstat));
134 bulkreq.lastip = &ino;
135 bulkreq.icount = XFS_INODES_PER_CHUNK;
136 bulkreq.ubuffer = &bstat;
137 bulkreq.ocount = &bulklen;
138
139 igrpreq.lastip = &igrp_ino;
140 igrpreq.icount = 1;
141 igrpreq.ubuffer = &inogrp;
142 igrpreq.ocount = &igrplen;
143
144 memcpy(&handle.ha_fsid, fshandle, sizeof(handle.ha_fsid));
145 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
146 sizeof(handle.ha_fid.fid_len);
147 handle.ha_fid.fid_pad = 0;
148
149 /* Find the inode chunk & alloc mask */
150 igrp_ino = first_ino;
151 error = ioctl(ctx->mnt_fd, XFS_IOC_FSINUMBERS, &igrpreq);
152 while (!error && igrplen) {
153 /* Load the inodes. */
154 ino = inogrp.xi_startino - 1;
155 bulkreq.icount = inogrp.xi_alloccount;
156 error = ioctl(ctx->mnt_fd, XFS_IOC_FSBULKSTAT, &bulkreq);
157 if (error)
158 str_info(ctx, descr, "%s", strerror_r(errno,
159 buf, DESCR_BUFSZ));
160
161 xfs_iterate_inodes_range_check(ctx, &inogrp, bstat);
162
163 /* Iterate all the inodes. */
164 for (i = 0, bs = bstat; i < inogrp.xi_alloccount; i++, bs++) {
165 if (bs->bs_ino > last_ino)
166 goto out;
167
168 handle.ha_fid.fid_ino = bs->bs_ino;
169 handle.ha_fid.fid_gen = bs->bs_gen;
170 error = fn(ctx, &handle, bs, arg);
171 switch (error) {
172 case 0:
173 break;
174 case ESTALE:
175 stale_count++;
176 if (stale_count < 30) {
177 igrp_ino = inogrp.xi_startino;
178 goto igrp_retry;
179 }
180 snprintf(idescr, DESCR_BUFSZ, "inode %"PRIu64,
181 (uint64_t)bs->bs_ino);
182 str_info(ctx, idescr,
183 _("Changed too many times during scan; giving up."));
184 break;
185 case XFS_ITERATE_INODES_ABORT:
186 error = 0;
187 /* fall thru */
188 default:
189 moveon = false;
190 errno = error;
191 goto err;
192 }
193 if (xfs_scrub_excessive_errors(ctx)) {
194 moveon = false;
195 goto out;
196 }
197 }
198
199 stale_count = 0;
200 igrp_retry:
201 error = ioctl(ctx->mnt_fd, XFS_IOC_FSINUMBERS, &igrpreq);
202 }
203
204 err:
205 if (error) {
206 str_errno(ctx, descr);
207 moveon = false;
208 }
209 out:
210 return moveon;
211 }
212
213 /* BULKSTAT wrapper routines. */
214 struct xfs_scan_inodes {
215 xfs_inode_iter_fn fn;
216 void *arg;
217 bool moveon;
218 };
219
220 /* Scan all the inodes in an AG. */
221 static void
222 xfs_scan_ag_inodes(
223 struct workqueue *wq,
224 xfs_agnumber_t agno,
225 void *arg)
226 {
227 struct xfs_scan_inodes *si = arg;
228 struct scrub_ctx *ctx = (struct scrub_ctx *)wq->wq_ctx;
229 char descr[DESCR_BUFSZ];
230 uint64_t ag_ino;
231 uint64_t next_ag_ino;
232 bool moveon;
233
234 snprintf(descr, DESCR_BUFSZ, _("dev %d:%d AG %u inodes"),
235 major(ctx->fsinfo.fs_datadev),
236 minor(ctx->fsinfo.fs_datadev),
237 agno);
238
239 ag_ino = (__u64)agno << (ctx->inopblog + ctx->agblklog);
240 next_ag_ino = (__u64)(agno + 1) << (ctx->inopblog + ctx->agblklog);
241
242 moveon = xfs_iterate_inodes_range(ctx, descr, ctx->fshandle, ag_ino,
243 next_ag_ino - 1, si->fn, si->arg);
244 if (!moveon)
245 si->moveon = false;
246 }
247
248 /* Scan all the inodes in a filesystem. */
249 bool
250 xfs_scan_all_inodes(
251 struct scrub_ctx *ctx,
252 xfs_inode_iter_fn fn,
253 void *arg)
254 {
255 struct xfs_scan_inodes si;
256 xfs_agnumber_t agno;
257 struct workqueue wq;
258 int ret;
259
260 si.moveon = true;
261 si.fn = fn;
262 si.arg = arg;
263
264 ret = workqueue_create(&wq, (struct xfs_mount *)ctx,
265 scrub_nproc_workqueue(ctx));
266 if (ret) {
267 str_info(ctx, ctx->mntpoint, _("Could not create workqueue."));
268 return false;
269 }
270
271 for (agno = 0; agno < ctx->geo.agcount; agno++) {
272 ret = workqueue_add(&wq, xfs_scan_ag_inodes, agno, &si);
273 if (ret) {
274 si.moveon = false;
275 str_info(ctx, ctx->mntpoint,
276 _("Could not queue AG %u bulkstat work."), agno);
277 break;
278 }
279 }
280
281 workqueue_destroy(&wq);
282
283 return si.moveon;
284 }
285
286 /*
287 * Open a file by handle, or return a negative error code.
288 */
289 int
290 xfs_open_handle(
291 struct xfs_handle *handle)
292 {
293 return open_by_fshandle(handle, sizeof(*handle),
294 O_RDONLY | O_NOATIME | O_NOFOLLOW | O_NOCTTY);
295 }