]>
Commit | Line | Data |
---|---|---|
f31b5e12 DW |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (C) 2019 Oracle. All Rights Reserved. | |
4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> | |
5 | */ | |
4cca629d DW |
6 | #include <string.h> |
7 | #include <strings.h> | |
f31b5e12 DW |
8 | #include "xfs.h" |
9 | #include "fsgeom.h" | |
10 | #include "bulkstat.h" | |
11 | ||
4cca629d DW |
12 | /* |
13 | * Wrapper functions for BULKSTAT and INUMBERS | |
14 | * =========================================== | |
15 | * | |
16 | * The functions in this file are thin wrappers around the most recent version | |
17 | * of the BULKSTAT and INUMBERS ioctls. BULKSTAT is used to query XFS-specific | |
18 | * stat information about a group of inodes. INUMBERS is used to query | |
19 | * allocation information about batches of XFS inodes. | |
20 | * | |
21 | * At the moment, the public xfrog_* functions provide all functionality of the | |
22 | * V5 interface. If the V5 interface is not available on the running kernel, | |
23 | * the functions will emulate them as best they can with previous versions of | |
24 | * the interface (currently V1). If emulation is not possible, EINVAL will be | |
25 | * returned. | |
26 | * | |
27 | * The XFROG_FLAG_BULKSTAT_FORCE_V[15] flags can be used to force use of a | |
28 | * particular version of the kernel interface for testing. | |
29 | */ | |
30 | ||
31 | /* | |
32 | * Grab the fs geometry information that is needed to needed to emulate v5 with | |
33 | * v1 interfaces. | |
34 | */ | |
35 | static inline int | |
36 | xfrog_bulkstat_prep_v1_emulation( | |
37 | struct xfs_fd *xfd) | |
38 | { | |
39 | if (xfd->fsgeom.blocksize > 0) | |
40 | return 0; | |
41 | ||
e6542132 | 42 | return xfd_prepare_geometry(xfd); |
4cca629d DW |
43 | } |
44 | ||
45 | /* Bulkstat a single inode using v5 ioctl. */ | |
46 | static int | |
47 | xfrog_bulkstat_single5( | |
48 | struct xfs_fd *xfd, | |
49 | uint64_t ino, | |
50 | unsigned int flags, | |
51 | struct xfs_bulkstat *bulkstat) | |
52 | { | |
53 | struct xfs_bulkstat_req *req; | |
54 | int ret; | |
55 | ||
03582d3f | 56 | if (flags & ~(XFS_BULK_IREQ_SPECIAL | XFS_BULK_IREQ_NREXT64)) |
e6542132 | 57 | return -EINVAL; |
4cca629d | 58 | |
ac87307e CB |
59 | if (xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64) |
60 | flags |= XFS_BULK_IREQ_NREXT64; | |
61 | ||
e6542132 DW |
62 | ret = xfrog_bulkstat_alloc_req(1, ino, &req); |
63 | if (ret) | |
64 | return ret; | |
4cca629d DW |
65 | |
66 | req->hdr.flags = flags; | |
67 | ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req); | |
68 | if (ret) { | |
e6542132 | 69 | ret = -errno; |
4cca629d DW |
70 | goto free; |
71 | } | |
72 | ||
73 | if (req->hdr.ocount == 0) { | |
e6542132 | 74 | ret = -ENOENT; |
4cca629d DW |
75 | goto free; |
76 | } | |
77 | ||
78 | memcpy(bulkstat, req->bulkstat, sizeof(struct xfs_bulkstat)); | |
ac87307e CB |
79 | |
80 | if (!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)) { | |
81 | bulkstat->bs_extents64 = bulkstat->bs_extents; | |
82 | bulkstat->bs_extents = 0; | |
83 | } | |
84 | ||
4cca629d DW |
85 | free: |
86 | free(req); | |
87 | return ret; | |
88 | } | |
89 | ||
90 | /* Bulkstat a single inode using v1 ioctl. */ | |
91 | static int | |
92 | xfrog_bulkstat_single1( | |
93 | struct xfs_fd *xfd, | |
94 | uint64_t ino, | |
95 | unsigned int flags, | |
96 | struct xfs_bulkstat *bulkstat) | |
97 | { | |
98 | struct xfs_bstat bstat; | |
99 | struct xfs_fsop_bulkreq bulkreq = { 0 }; | |
100 | int error; | |
101 | ||
102 | if (flags) | |
e6542132 | 103 | return -EINVAL; |
4cca629d DW |
104 | |
105 | error = xfrog_bulkstat_prep_v1_emulation(xfd); | |
106 | if (error) | |
107 | return error; | |
108 | ||
109 | bulkreq.lastip = (__u64 *)&ino; | |
110 | bulkreq.icount = 1; | |
111 | bulkreq.ubuffer = &bstat; | |
112 | error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT_SINGLE, &bulkreq); | |
113 | if (error) | |
e6542132 | 114 | return -errno; |
4cca629d DW |
115 | |
116 | xfrog_bulkstat_v1_to_v5(xfd, bulkstat, &bstat); | |
117 | return 0; | |
118 | } | |
119 | ||
e6542132 | 120 | /* Bulkstat a single inode. Returns zero or a negative error code. */ |
f31b5e12 DW |
121 | int |
122 | xfrog_bulkstat_single( | |
4cca629d DW |
123 | struct xfs_fd *xfd, |
124 | uint64_t ino, | |
125 | unsigned int flags, | |
126 | struct xfs_bulkstat *bulkstat) | |
127 | { | |
128 | int error; | |
129 | ||
130 | if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1) | |
131 | goto try_v1; | |
132 | ||
133 | error = xfrog_bulkstat_single5(xfd, ino, flags, bulkstat); | |
134 | if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5)) | |
135 | return error; | |
136 | ||
137 | /* If the v5 ioctl wasn't found, we punt to v1. */ | |
138 | switch (error) { | |
e6542132 DW |
139 | case -EOPNOTSUPP: |
140 | case -ENOTTY: | |
ac87307e | 141 | assert(!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)); |
4cca629d DW |
142 | xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1; |
143 | break; | |
144 | } | |
145 | ||
146 | try_v1: | |
147 | return xfrog_bulkstat_single1(xfd, ino, flags, bulkstat); | |
148 | } | |
149 | ||
150 | /* | |
151 | * Set up the necessary control structures to emulate a V5 bulk request ioctl | |
152 | * by calling a V1 bulk request ioctl. This enables callers to run on older | |
153 | * kernels. | |
154 | * | |
155 | * Returns 0 if the emulation should proceed; ECANCELED if there are no | |
e6542132 | 156 | * records; or a negative error code. |
4cca629d DW |
157 | */ |
158 | static int | |
159 | xfrog_bulk_req_v1_setup( | |
f31b5e12 | 160 | struct xfs_fd *xfd, |
4cca629d DW |
161 | struct xfs_bulk_ireq *hdr, |
162 | struct xfs_fsop_bulkreq *bulkreq, | |
163 | size_t rec_size) | |
164 | { | |
165 | void *buf; | |
166 | ||
167 | if (hdr->flags & XFS_BULK_IREQ_AGNO) { | |
168 | uint32_t agno = cvt_ino_to_agno(xfd, hdr->ino); | |
169 | ||
170 | if (hdr->ino == 0) | |
171 | hdr->ino = cvt_agino_to_ino(xfd, hdr->agno, 0); | |
172 | else if (agno < hdr->agno) | |
e6542132 | 173 | return -EINVAL; |
4cca629d DW |
174 | else if (agno > hdr->agno) |
175 | goto no_results; | |
176 | } | |
177 | ||
178 | if (cvt_ino_to_agno(xfd, hdr->ino) > xfd->fsgeom.agcount) | |
179 | goto no_results; | |
180 | ||
181 | buf = malloc(hdr->icount * rec_size); | |
182 | if (!buf) | |
e6542132 | 183 | return -errno; |
4cca629d DW |
184 | |
185 | if (hdr->ino) | |
186 | hdr->ino--; | |
187 | bulkreq->lastip = (__u64 *)&hdr->ino, | |
188 | bulkreq->icount = hdr->icount, | |
189 | bulkreq->ocount = (__s32 *)&hdr->ocount, | |
190 | bulkreq->ubuffer = buf; | |
191 | return 0; | |
192 | ||
193 | no_results: | |
194 | hdr->ocount = 0; | |
e6542132 | 195 | return -ECANCELED; |
4cca629d DW |
196 | } |
197 | ||
198 | /* | |
199 | * Clean up after using a V1 bulk request to emulate a V5 bulk request call. | |
200 | * | |
201 | * If the ioctl was successful, we need to convert the returned V1-format bulk | |
202 | * request data into the V5-format bulk request data and copy it into the | |
203 | * caller's buffer. We also need to free all resources allocated during the | |
204 | * setup setup. | |
205 | */ | |
206 | static int | |
207 | xfrog_bulk_req_v1_cleanup( | |
208 | struct xfs_fd *xfd, | |
209 | struct xfs_bulk_ireq *hdr, | |
210 | struct xfs_fsop_bulkreq *bulkreq, | |
211 | size_t v1_rec_size, | |
212 | uint64_t (*v1_ino)(void *v1_rec), | |
213 | void *v5_records, | |
214 | size_t v5_rec_size, | |
215 | void (*cvt)(struct xfs_fd *xfd, void *v5, void *v1), | |
216 | unsigned int startino_adj, | |
217 | int error) | |
218 | { | |
219 | void *v1_rec = bulkreq->ubuffer; | |
220 | void *v5_rec = v5_records; | |
221 | unsigned int i; | |
222 | ||
e6542132 | 223 | if (error == -ECANCELED) { |
4cca629d DW |
224 | error = 0; |
225 | goto free; | |
226 | } | |
227 | if (error) | |
228 | goto free; | |
229 | ||
230 | /* | |
231 | * Convert each record from v1 to v5 format, keeping the startino | |
232 | * value up to date and (if desired) stopping at the end of the | |
233 | * AG. | |
234 | */ | |
235 | for (i = 0; | |
236 | i < hdr->ocount; | |
237 | i++, v1_rec += v1_rec_size, v5_rec += v5_rec_size) { | |
238 | uint64_t ino = v1_ino(v1_rec); | |
239 | ||
240 | /* Stop if we hit a different AG. */ | |
241 | if ((hdr->flags & XFS_BULK_IREQ_AGNO) && | |
242 | cvt_ino_to_agno(xfd, ino) != hdr->agno) { | |
243 | hdr->ocount = i; | |
244 | break; | |
245 | } | |
246 | cvt(xfd, v5_rec, v1_rec); | |
247 | hdr->ino = ino + startino_adj; | |
248 | } | |
249 | ||
250 | free: | |
251 | free(bulkreq->ubuffer); | |
252 | return error; | |
253 | } | |
254 | ||
255 | static uint64_t xfrog_bstat_ino(void *v1_rec) | |
256 | { | |
257 | return ((struct xfs_bstat *)v1_rec)->bs_ino; | |
258 | } | |
259 | ||
260 | static void xfrog_bstat_cvt(struct xfs_fd *xfd, void *v5, void *v1) | |
261 | { | |
262 | xfrog_bulkstat_v1_to_v5(xfd, v5, v1); | |
263 | } | |
264 | ||
265 | /* Bulkstat a bunch of inodes using the v5 interface. */ | |
266 | static int | |
267 | xfrog_bulkstat5( | |
268 | struct xfs_fd *xfd, | |
269 | struct xfs_bulkstat_req *req) | |
f31b5e12 | 270 | { |
f31b5e12 | 271 | int ret; |
ac87307e CB |
272 | int i; |
273 | ||
274 | if (xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64) | |
275 | req->hdr.flags |= XFS_BULK_IREQ_NREXT64; | |
f31b5e12 | 276 | |
4cca629d | 277 | ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req); |
f31b5e12 | 278 | if (ret) |
e6542132 | 279 | return -errno; |
ac87307e CB |
280 | |
281 | if (!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)) { | |
282 | for (i = 0; i < req->hdr.ocount; i++) { | |
283 | req->bulkstat[i].bs_extents64 = | |
284 | req->bulkstat[i].bs_extents; | |
285 | req->bulkstat[i].bs_extents = 0; | |
286 | } | |
287 | } | |
288 | ||
f31b5e12 DW |
289 | return 0; |
290 | } | |
291 | ||
4cca629d DW |
292 | /* Bulkstat a bunch of inodes using the v1 interface. */ |
293 | static int | |
294 | xfrog_bulkstat1( | |
295 | struct xfs_fd *xfd, | |
296 | struct xfs_bulkstat_req *req) | |
297 | { | |
298 | struct xfs_fsop_bulkreq bulkreq = { 0 }; | |
299 | int error; | |
300 | ||
301 | error = xfrog_bulkstat_prep_v1_emulation(xfd); | |
302 | if (error) | |
303 | return error; | |
304 | ||
305 | error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq, | |
306 | sizeof(struct xfs_bstat)); | |
e6542132 | 307 | if (error == -ECANCELED) |
4cca629d DW |
308 | goto out_teardown; |
309 | if (error) | |
310 | return error; | |
311 | ||
312 | error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT, &bulkreq); | |
313 | if (error) | |
e6542132 | 314 | error = -errno; |
4cca629d DW |
315 | |
316 | out_teardown: | |
317 | return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq, | |
318 | sizeof(struct xfs_bstat), xfrog_bstat_ino, | |
319 | &req->bulkstat, sizeof(struct xfs_bulkstat), | |
320 | xfrog_bstat_cvt, 1, error); | |
321 | } | |
322 | ||
f31b5e12 DW |
323 | /* Bulkstat a bunch of inodes. Returns zero or a positive error code. */ |
324 | int | |
325 | xfrog_bulkstat( | |
326 | struct xfs_fd *xfd, | |
4cca629d | 327 | struct xfs_bulkstat_req *req) |
f31b5e12 | 328 | { |
4cca629d | 329 | int error; |
f31b5e12 | 330 | |
4cca629d DW |
331 | if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1) |
332 | goto try_v1; | |
333 | ||
334 | error = xfrog_bulkstat5(xfd, req); | |
335 | if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5)) | |
336 | return error; | |
337 | ||
338 | /* If the v5 ioctl wasn't found, we punt to v1. */ | |
339 | switch (error) { | |
e6542132 DW |
340 | case -EOPNOTSUPP: |
341 | case -ENOTTY: | |
ac87307e | 342 | assert(!(xfd->fsgeom.flags & XFS_FSOP_GEOM_FLAGS_NREXT64)); |
4cca629d DW |
343 | xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1; |
344 | break; | |
345 | } | |
346 | ||
347 | try_v1: | |
348 | return xfrog_bulkstat1(xfd, req); | |
349 | } | |
350 | ||
351 | static bool | |
352 | time_too_big( | |
353 | uint64_t time) | |
354 | { | |
355 | time_t TIME_MAX; | |
356 | ||
357 | memset(&TIME_MAX, 0xFF, sizeof(TIME_MAX)); | |
358 | return time > TIME_MAX; | |
359 | } | |
360 | ||
361 | /* Convert bulkstat data from v5 format to v1 format. */ | |
362 | int | |
363 | xfrog_bulkstat_v5_to_v1( | |
364 | struct xfs_fd *xfd, | |
365 | struct xfs_bstat *bs1, | |
366 | const struct xfs_bulkstat *bs5) | |
367 | { | |
368 | if (bs5->bs_aextents > UINT16_MAX || | |
ac87307e | 369 | bs5->bs_extents64 > INT32_MAX || |
4cca629d DW |
370 | cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks) > UINT32_MAX || |
371 | cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks) > UINT32_MAX || | |
372 | time_too_big(bs5->bs_atime) || | |
373 | time_too_big(bs5->bs_ctime) || | |
374 | time_too_big(bs5->bs_mtime)) | |
e6542132 | 375 | return -ERANGE; |
4cca629d DW |
376 | |
377 | bs1->bs_ino = bs5->bs_ino; | |
378 | bs1->bs_mode = bs5->bs_mode; | |
379 | bs1->bs_nlink = bs5->bs_nlink; | |
380 | bs1->bs_uid = bs5->bs_uid; | |
381 | bs1->bs_gid = bs5->bs_gid; | |
382 | bs1->bs_rdev = bs5->bs_rdev; | |
383 | bs1->bs_blksize = bs5->bs_blksize; | |
384 | bs1->bs_size = bs5->bs_size; | |
385 | bs1->bs_atime.tv_sec = bs5->bs_atime; | |
386 | bs1->bs_mtime.tv_sec = bs5->bs_mtime; | |
387 | bs1->bs_ctime.tv_sec = bs5->bs_ctime; | |
388 | bs1->bs_atime.tv_nsec = bs5->bs_atime_nsec; | |
389 | bs1->bs_mtime.tv_nsec = bs5->bs_mtime_nsec; | |
390 | bs1->bs_ctime.tv_nsec = bs5->bs_ctime_nsec; | |
391 | bs1->bs_blocks = bs5->bs_blocks; | |
392 | bs1->bs_xflags = bs5->bs_xflags; | |
393 | bs1->bs_extsize = cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks); | |
ac87307e | 394 | bs1->bs_extents = bs5->bs_extents64; |
4cca629d DW |
395 | bs1->bs_gen = bs5->bs_gen; |
396 | bs1->bs_projid_lo = bs5->bs_projectid & 0xFFFF; | |
397 | bs1->bs_forkoff = bs5->bs_forkoff; | |
398 | bs1->bs_projid_hi = bs5->bs_projectid >> 16; | |
399 | bs1->bs_sick = bs5->bs_sick; | |
400 | bs1->bs_checked = bs5->bs_checked; | |
401 | bs1->bs_cowextsize = cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks); | |
402 | bs1->bs_dmevmask = 0; | |
403 | bs1->bs_dmstate = 0; | |
404 | bs1->bs_aextents = bs5->bs_aextents; | |
f31b5e12 DW |
405 | return 0; |
406 | } | |
621f3374 | 407 | |
4cca629d DW |
408 | /* Convert bulkstat data from v1 format to v5 format. */ |
409 | void | |
410 | xfrog_bulkstat_v1_to_v5( | |
411 | struct xfs_fd *xfd, | |
412 | struct xfs_bulkstat *bs5, | |
413 | const struct xfs_bstat *bs1) | |
414 | { | |
415 | memset(bs5, 0, sizeof(*bs5)); | |
416 | bs5->bs_version = XFS_BULKSTAT_VERSION_V1; | |
417 | ||
418 | bs5->bs_ino = bs1->bs_ino; | |
419 | bs5->bs_mode = bs1->bs_mode; | |
420 | bs5->bs_nlink = bs1->bs_nlink; | |
421 | bs5->bs_uid = bs1->bs_uid; | |
422 | bs5->bs_gid = bs1->bs_gid; | |
423 | bs5->bs_rdev = bs1->bs_rdev; | |
424 | bs5->bs_blksize = bs1->bs_blksize; | |
425 | bs5->bs_size = bs1->bs_size; | |
426 | bs5->bs_atime = bs1->bs_atime.tv_sec; | |
427 | bs5->bs_mtime = bs1->bs_mtime.tv_sec; | |
428 | bs5->bs_ctime = bs1->bs_ctime.tv_sec; | |
429 | bs5->bs_atime_nsec = bs1->bs_atime.tv_nsec; | |
430 | bs5->bs_mtime_nsec = bs1->bs_mtime.tv_nsec; | |
431 | bs5->bs_ctime_nsec = bs1->bs_ctime.tv_nsec; | |
432 | bs5->bs_blocks = bs1->bs_blocks; | |
433 | bs5->bs_xflags = bs1->bs_xflags; | |
434 | bs5->bs_extsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_extsize); | |
4cca629d DW |
435 | bs5->bs_gen = bs1->bs_gen; |
436 | bs5->bs_projectid = bstat_get_projid(bs1); | |
437 | bs5->bs_forkoff = bs1->bs_forkoff; | |
438 | bs5->bs_sick = bs1->bs_sick; | |
439 | bs5->bs_checked = bs1->bs_checked; | |
440 | bs5->bs_cowextsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_cowextsize); | |
441 | bs5->bs_aextents = bs1->bs_aextents; | |
ac87307e | 442 | bs5->bs_extents64 = bs1->bs_extents; |
4cca629d DW |
443 | } |
444 | ||
e6542132 DW |
445 | /* Allocate a bulkstat request. Returns zero or a negative error code. */ |
446 | int | |
4cca629d DW |
447 | xfrog_bulkstat_alloc_req( |
448 | uint32_t nr, | |
e6542132 DW |
449 | uint64_t startino, |
450 | struct xfs_bulkstat_req **preq) | |
4cca629d DW |
451 | { |
452 | struct xfs_bulkstat_req *breq; | |
453 | ||
454 | breq = calloc(1, XFS_BULKSTAT_REQ_SIZE(nr)); | |
455 | if (!breq) | |
0779e54d | 456 | return -ENOMEM; |
4cca629d DW |
457 | |
458 | breq->hdr.icount = nr; | |
459 | breq->hdr.ino = startino; | |
460 | ||
e6542132 DW |
461 | *preq = breq; |
462 | return 0; | |
4cca629d DW |
463 | } |
464 | ||
3c8276c4 DW |
465 | /* Set a bulkstat cursor to iterate only a particular AG. */ |
466 | void | |
467 | xfrog_bulkstat_set_ag( | |
468 | struct xfs_bulkstat_req *req, | |
469 | uint32_t agno) | |
470 | { | |
471 | req->hdr.agno = agno; | |
472 | req->hdr.flags |= XFS_BULK_IREQ_AGNO; | |
473 | } | |
474 | ||
b94a69ac DW |
475 | /* Convert a inumbers data from v5 format to v1 format. */ |
476 | void | |
477 | xfrog_inumbers_v5_to_v1( | |
478 | struct xfs_inogrp *ig1, | |
479 | const struct xfs_inumbers *ig5) | |
480 | { | |
481 | ig1->xi_startino = ig5->xi_startino; | |
482 | ig1->xi_alloccount = ig5->xi_alloccount; | |
483 | ig1->xi_allocmask = ig5->xi_allocmask; | |
484 | } | |
485 | ||
486 | /* Convert a inumbers data from v1 format to v5 format. */ | |
487 | void | |
488 | xfrog_inumbers_v1_to_v5( | |
489 | struct xfs_inumbers *ig5, | |
490 | const struct xfs_inogrp *ig1) | |
491 | { | |
492 | memset(ig5, 0, sizeof(*ig5)); | |
493 | ig5->xi_version = XFS_INUMBERS_VERSION_V1; | |
494 | ||
495 | ig5->xi_startino = ig1->xi_startino; | |
496 | ig5->xi_alloccount = ig1->xi_alloccount; | |
497 | ig5->xi_allocmask = ig1->xi_allocmask; | |
498 | } | |
499 | ||
500 | static uint64_t xfrog_inum_ino(void *v1_rec) | |
501 | { | |
502 | return ((struct xfs_inogrp *)v1_rec)->xi_startino; | |
503 | } | |
504 | ||
505 | static void xfrog_inum_cvt(struct xfs_fd *xfd, void *v5, void *v1) | |
506 | { | |
507 | xfrog_inumbers_v1_to_v5(v5, v1); | |
508 | } | |
509 | ||
510 | /* Query inode allocation bitmask information using v5 ioctl. */ | |
511 | static int | |
512 | xfrog_inumbers5( | |
513 | struct xfs_fd *xfd, | |
514 | struct xfs_inumbers_req *req) | |
515 | { | |
516 | int ret; | |
517 | ||
518 | ret = ioctl(xfd->fd, XFS_IOC_INUMBERS, req); | |
519 | if (ret) | |
e6542132 | 520 | return -errno; |
b94a69ac DW |
521 | return 0; |
522 | } | |
523 | ||
524 | /* Query inode allocation bitmask information using v1 ioctl. */ | |
525 | static int | |
526 | xfrog_inumbers1( | |
527 | struct xfs_fd *xfd, | |
528 | struct xfs_inumbers_req *req) | |
529 | { | |
530 | struct xfs_fsop_bulkreq bulkreq = { 0 }; | |
531 | int error; | |
532 | ||
533 | error = xfrog_bulkstat_prep_v1_emulation(xfd); | |
534 | if (error) | |
535 | return error; | |
536 | ||
537 | error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq, | |
538 | sizeof(struct xfs_inogrp)); | |
e6542132 | 539 | if (error == -ECANCELED) |
b94a69ac DW |
540 | goto out_teardown; |
541 | if (error) | |
542 | return error; | |
543 | ||
544 | error = ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq); | |
545 | if (error) | |
e6542132 | 546 | error = -errno; |
b94a69ac DW |
547 | |
548 | out_teardown: | |
549 | return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq, | |
550 | sizeof(struct xfs_inogrp), xfrog_inum_ino, | |
551 | &req->inumbers, sizeof(struct xfs_inumbers), | |
552 | xfrog_inum_cvt, 64, error); | |
553 | } | |
554 | ||
621f3374 | 555 | /* |
e6542132 | 556 | * Query inode allocation bitmask information. Returns zero or a negative |
621f3374 DW |
557 | * error code. |
558 | */ | |
559 | int | |
560 | xfrog_inumbers( | |
561 | struct xfs_fd *xfd, | |
b94a69ac | 562 | struct xfs_inumbers_req *req) |
621f3374 | 563 | { |
b94a69ac | 564 | int error; |
621f3374 | 565 | |
b94a69ac DW |
566 | if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1) |
567 | goto try_v1; | |
568 | ||
569 | error = xfrog_inumbers5(xfd, req); | |
570 | if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5)) | |
571 | return error; | |
572 | ||
573 | /* If the v5 ioctl wasn't found, we punt to v1. */ | |
574 | switch (error) { | |
e6542132 DW |
575 | case -EOPNOTSUPP: |
576 | case -ENOTTY: | |
b94a69ac DW |
577 | xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1; |
578 | break; | |
579 | } | |
580 | ||
581 | try_v1: | |
582 | return xfrog_inumbers1(xfd, req); | |
583 | } | |
584 | ||
e6542132 DW |
585 | /* Allocate a inumbers request. Returns zero or a negative error code. */ |
586 | int | |
b94a69ac DW |
587 | xfrog_inumbers_alloc_req( |
588 | uint32_t nr, | |
e6542132 DW |
589 | uint64_t startino, |
590 | struct xfs_inumbers_req **preq) | |
b94a69ac DW |
591 | { |
592 | struct xfs_inumbers_req *ireq; | |
593 | ||
594 | ireq = calloc(1, XFS_INUMBERS_REQ_SIZE(nr)); | |
595 | if (!ireq) | |
e6542132 | 596 | return -errno; |
b94a69ac DW |
597 | |
598 | ireq->hdr.icount = nr; | |
599 | ireq->hdr.ino = startino; | |
600 | ||
e6542132 DW |
601 | *preq = ireq; |
602 | return 0; | |
621f3374 | 603 | } |
3c8276c4 DW |
604 | |
605 | /* Set an inumbers cursor to iterate only a particular AG. */ | |
606 | void | |
607 | xfrog_inumbers_set_ag( | |
608 | struct xfs_inumbers_req *req, | |
609 | uint32_t agno) | |
610 | { | |
611 | req->hdr.agno = agno; | |
612 | req->hdr.flags |= XFS_BULK_IREQ_AGNO; | |
613 | } |