]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libfrog/bulkstat.c
libfrog: convert bulkstat.c functions to negative error codes
[thirdparty/xfsprogs-dev.git] / libfrog / bulkstat.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include <string.h>
7 #include <strings.h>
8 #include "xfs.h"
9 #include "fsgeom.h"
10 #include "bulkstat.h"
11
12 /*
13 * Wrapper functions for BULKSTAT and INUMBERS
14 * ===========================================
15 *
16 * The functions in this file are thin wrappers around the most recent version
17 * of the BULKSTAT and INUMBERS ioctls. BULKSTAT is used to query XFS-specific
18 * stat information about a group of inodes. INUMBERS is used to query
19 * allocation information about batches of XFS inodes.
20 *
21 * At the moment, the public xfrog_* functions provide all functionality of the
22 * V5 interface. If the V5 interface is not available on the running kernel,
23 * the functions will emulate them as best they can with previous versions of
24 * the interface (currently V1). If emulation is not possible, EINVAL will be
25 * returned.
26 *
27 * The XFROG_FLAG_BULKSTAT_FORCE_V[15] flags can be used to force use of a
28 * particular version of the kernel interface for testing.
29 */
30
31 /*
32 * Grab the fs geometry information that is needed to needed to emulate v5 with
33 * v1 interfaces.
34 */
35 static inline int
36 xfrog_bulkstat_prep_v1_emulation(
37 struct xfs_fd *xfd)
38 {
39 if (xfd->fsgeom.blocksize > 0)
40 return 0;
41
42 return xfd_prepare_geometry(xfd);
43 }
44
45 /* Bulkstat a single inode using v5 ioctl. */
46 static int
47 xfrog_bulkstat_single5(
48 struct xfs_fd *xfd,
49 uint64_t ino,
50 unsigned int flags,
51 struct xfs_bulkstat *bulkstat)
52 {
53 struct xfs_bulkstat_req *req;
54 int ret;
55
56 if (flags & ~(XFS_BULK_IREQ_SPECIAL))
57 return -EINVAL;
58
59 ret = xfrog_bulkstat_alloc_req(1, ino, &req);
60 if (ret)
61 return ret;
62
63 req->hdr.flags = flags;
64 ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
65 if (ret) {
66 ret = -errno;
67 goto free;
68 }
69
70 if (req->hdr.ocount == 0) {
71 ret = -ENOENT;
72 goto free;
73 }
74
75 memcpy(bulkstat, req->bulkstat, sizeof(struct xfs_bulkstat));
76 free:
77 free(req);
78 return ret;
79 }
80
81 /* Bulkstat a single inode using v1 ioctl. */
82 static int
83 xfrog_bulkstat_single1(
84 struct xfs_fd *xfd,
85 uint64_t ino,
86 unsigned int flags,
87 struct xfs_bulkstat *bulkstat)
88 {
89 struct xfs_bstat bstat;
90 struct xfs_fsop_bulkreq bulkreq = { 0 };
91 int error;
92
93 if (flags)
94 return -EINVAL;
95
96 error = xfrog_bulkstat_prep_v1_emulation(xfd);
97 if (error)
98 return error;
99
100 bulkreq.lastip = (__u64 *)&ino;
101 bulkreq.icount = 1;
102 bulkreq.ubuffer = &bstat;
103 error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT_SINGLE, &bulkreq);
104 if (error)
105 return -errno;
106
107 xfrog_bulkstat_v1_to_v5(xfd, bulkstat, &bstat);
108 return 0;
109 }
110
111 /* Bulkstat a single inode. Returns zero or a negative error code. */
112 int
113 xfrog_bulkstat_single(
114 struct xfs_fd *xfd,
115 uint64_t ino,
116 unsigned int flags,
117 struct xfs_bulkstat *bulkstat)
118 {
119 int error;
120
121 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
122 goto try_v1;
123
124 error = xfrog_bulkstat_single5(xfd, ino, flags, bulkstat);
125 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
126 return error;
127
128 /* If the v5 ioctl wasn't found, we punt to v1. */
129 switch (error) {
130 case -EOPNOTSUPP:
131 case -ENOTTY:
132 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
133 break;
134 }
135
136 try_v1:
137 return xfrog_bulkstat_single1(xfd, ino, flags, bulkstat);
138 }
139
140 /*
141 * Set up the necessary control structures to emulate a V5 bulk request ioctl
142 * by calling a V1 bulk request ioctl. This enables callers to run on older
143 * kernels.
144 *
145 * Returns 0 if the emulation should proceed; ECANCELED if there are no
146 * records; or a negative error code.
147 */
148 static int
149 xfrog_bulk_req_v1_setup(
150 struct xfs_fd *xfd,
151 struct xfs_bulk_ireq *hdr,
152 struct xfs_fsop_bulkreq *bulkreq,
153 size_t rec_size)
154 {
155 void *buf;
156
157 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
158 uint32_t agno = cvt_ino_to_agno(xfd, hdr->ino);
159
160 if (hdr->ino == 0)
161 hdr->ino = cvt_agino_to_ino(xfd, hdr->agno, 0);
162 else if (agno < hdr->agno)
163 return -EINVAL;
164 else if (agno > hdr->agno)
165 goto no_results;
166 }
167
168 if (cvt_ino_to_agno(xfd, hdr->ino) > xfd->fsgeom.agcount)
169 goto no_results;
170
171 buf = malloc(hdr->icount * rec_size);
172 if (!buf)
173 return -errno;
174
175 if (hdr->ino)
176 hdr->ino--;
177 bulkreq->lastip = (__u64 *)&hdr->ino,
178 bulkreq->icount = hdr->icount,
179 bulkreq->ocount = (__s32 *)&hdr->ocount,
180 bulkreq->ubuffer = buf;
181 return 0;
182
183 no_results:
184 hdr->ocount = 0;
185 return -ECANCELED;
186 }
187
188 /*
189 * Clean up after using a V1 bulk request to emulate a V5 bulk request call.
190 *
191 * If the ioctl was successful, we need to convert the returned V1-format bulk
192 * request data into the V5-format bulk request data and copy it into the
193 * caller's buffer. We also need to free all resources allocated during the
194 * setup setup.
195 */
196 static int
197 xfrog_bulk_req_v1_cleanup(
198 struct xfs_fd *xfd,
199 struct xfs_bulk_ireq *hdr,
200 struct xfs_fsop_bulkreq *bulkreq,
201 size_t v1_rec_size,
202 uint64_t (*v1_ino)(void *v1_rec),
203 void *v5_records,
204 size_t v5_rec_size,
205 void (*cvt)(struct xfs_fd *xfd, void *v5, void *v1),
206 unsigned int startino_adj,
207 int error)
208 {
209 void *v1_rec = bulkreq->ubuffer;
210 void *v5_rec = v5_records;
211 unsigned int i;
212
213 if (error == -ECANCELED) {
214 error = 0;
215 goto free;
216 }
217 if (error)
218 goto free;
219
220 /*
221 * Convert each record from v1 to v5 format, keeping the startino
222 * value up to date and (if desired) stopping at the end of the
223 * AG.
224 */
225 for (i = 0;
226 i < hdr->ocount;
227 i++, v1_rec += v1_rec_size, v5_rec += v5_rec_size) {
228 uint64_t ino = v1_ino(v1_rec);
229
230 /* Stop if we hit a different AG. */
231 if ((hdr->flags & XFS_BULK_IREQ_AGNO) &&
232 cvt_ino_to_agno(xfd, ino) != hdr->agno) {
233 hdr->ocount = i;
234 break;
235 }
236 cvt(xfd, v5_rec, v1_rec);
237 hdr->ino = ino + startino_adj;
238 }
239
240 free:
241 free(bulkreq->ubuffer);
242 return error;
243 }
244
245 static uint64_t xfrog_bstat_ino(void *v1_rec)
246 {
247 return ((struct xfs_bstat *)v1_rec)->bs_ino;
248 }
249
250 static void xfrog_bstat_cvt(struct xfs_fd *xfd, void *v5, void *v1)
251 {
252 xfrog_bulkstat_v1_to_v5(xfd, v5, v1);
253 }
254
255 /* Bulkstat a bunch of inodes using the v5 interface. */
256 static int
257 xfrog_bulkstat5(
258 struct xfs_fd *xfd,
259 struct xfs_bulkstat_req *req)
260 {
261 int ret;
262
263 ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
264 if (ret)
265 return -errno;
266 return 0;
267 }
268
269 /* Bulkstat a bunch of inodes using the v1 interface. */
270 static int
271 xfrog_bulkstat1(
272 struct xfs_fd *xfd,
273 struct xfs_bulkstat_req *req)
274 {
275 struct xfs_fsop_bulkreq bulkreq = { 0 };
276 int error;
277
278 error = xfrog_bulkstat_prep_v1_emulation(xfd);
279 if (error)
280 return error;
281
282 error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
283 sizeof(struct xfs_bstat));
284 if (error == -ECANCELED)
285 goto out_teardown;
286 if (error)
287 return error;
288
289 error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT, &bulkreq);
290 if (error)
291 error = -errno;
292
293 out_teardown:
294 return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
295 sizeof(struct xfs_bstat), xfrog_bstat_ino,
296 &req->bulkstat, sizeof(struct xfs_bulkstat),
297 xfrog_bstat_cvt, 1, error);
298 }
299
300 /* Bulkstat a bunch of inodes. Returns zero or a positive error code. */
301 int
302 xfrog_bulkstat(
303 struct xfs_fd *xfd,
304 struct xfs_bulkstat_req *req)
305 {
306 int error;
307
308 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
309 goto try_v1;
310
311 error = xfrog_bulkstat5(xfd, req);
312 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
313 return error;
314
315 /* If the v5 ioctl wasn't found, we punt to v1. */
316 switch (error) {
317 case -EOPNOTSUPP:
318 case -ENOTTY:
319 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
320 break;
321 }
322
323 try_v1:
324 return xfrog_bulkstat1(xfd, req);
325 }
326
327 static bool
328 time_too_big(
329 uint64_t time)
330 {
331 time_t TIME_MAX;
332
333 memset(&TIME_MAX, 0xFF, sizeof(TIME_MAX));
334 return time > TIME_MAX;
335 }
336
337 /* Convert bulkstat data from v5 format to v1 format. */
338 int
339 xfrog_bulkstat_v5_to_v1(
340 struct xfs_fd *xfd,
341 struct xfs_bstat *bs1,
342 const struct xfs_bulkstat *bs5)
343 {
344 if (bs5->bs_aextents > UINT16_MAX ||
345 cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks) > UINT32_MAX ||
346 cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks) > UINT32_MAX ||
347 time_too_big(bs5->bs_atime) ||
348 time_too_big(bs5->bs_ctime) ||
349 time_too_big(bs5->bs_mtime))
350 return -ERANGE;
351
352 bs1->bs_ino = bs5->bs_ino;
353 bs1->bs_mode = bs5->bs_mode;
354 bs1->bs_nlink = bs5->bs_nlink;
355 bs1->bs_uid = bs5->bs_uid;
356 bs1->bs_gid = bs5->bs_gid;
357 bs1->bs_rdev = bs5->bs_rdev;
358 bs1->bs_blksize = bs5->bs_blksize;
359 bs1->bs_size = bs5->bs_size;
360 bs1->bs_atime.tv_sec = bs5->bs_atime;
361 bs1->bs_mtime.tv_sec = bs5->bs_mtime;
362 bs1->bs_ctime.tv_sec = bs5->bs_ctime;
363 bs1->bs_atime.tv_nsec = bs5->bs_atime_nsec;
364 bs1->bs_mtime.tv_nsec = bs5->bs_mtime_nsec;
365 bs1->bs_ctime.tv_nsec = bs5->bs_ctime_nsec;
366 bs1->bs_blocks = bs5->bs_blocks;
367 bs1->bs_xflags = bs5->bs_xflags;
368 bs1->bs_extsize = cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks);
369 bs1->bs_extents = bs5->bs_extents;
370 bs1->bs_gen = bs5->bs_gen;
371 bs1->bs_projid_lo = bs5->bs_projectid & 0xFFFF;
372 bs1->bs_forkoff = bs5->bs_forkoff;
373 bs1->bs_projid_hi = bs5->bs_projectid >> 16;
374 bs1->bs_sick = bs5->bs_sick;
375 bs1->bs_checked = bs5->bs_checked;
376 bs1->bs_cowextsize = cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks);
377 bs1->bs_dmevmask = 0;
378 bs1->bs_dmstate = 0;
379 bs1->bs_aextents = bs5->bs_aextents;
380 return 0;
381 }
382
383 /* Convert bulkstat data from v1 format to v5 format. */
384 void
385 xfrog_bulkstat_v1_to_v5(
386 struct xfs_fd *xfd,
387 struct xfs_bulkstat *bs5,
388 const struct xfs_bstat *bs1)
389 {
390 memset(bs5, 0, sizeof(*bs5));
391 bs5->bs_version = XFS_BULKSTAT_VERSION_V1;
392
393 bs5->bs_ino = bs1->bs_ino;
394 bs5->bs_mode = bs1->bs_mode;
395 bs5->bs_nlink = bs1->bs_nlink;
396 bs5->bs_uid = bs1->bs_uid;
397 bs5->bs_gid = bs1->bs_gid;
398 bs5->bs_rdev = bs1->bs_rdev;
399 bs5->bs_blksize = bs1->bs_blksize;
400 bs5->bs_size = bs1->bs_size;
401 bs5->bs_atime = bs1->bs_atime.tv_sec;
402 bs5->bs_mtime = bs1->bs_mtime.tv_sec;
403 bs5->bs_ctime = bs1->bs_ctime.tv_sec;
404 bs5->bs_atime_nsec = bs1->bs_atime.tv_nsec;
405 bs5->bs_mtime_nsec = bs1->bs_mtime.tv_nsec;
406 bs5->bs_ctime_nsec = bs1->bs_ctime.tv_nsec;
407 bs5->bs_blocks = bs1->bs_blocks;
408 bs5->bs_xflags = bs1->bs_xflags;
409 bs5->bs_extsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_extsize);
410 bs5->bs_extents = bs1->bs_extents;
411 bs5->bs_gen = bs1->bs_gen;
412 bs5->bs_projectid = bstat_get_projid(bs1);
413 bs5->bs_forkoff = bs1->bs_forkoff;
414 bs5->bs_sick = bs1->bs_sick;
415 bs5->bs_checked = bs1->bs_checked;
416 bs5->bs_cowextsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_cowextsize);
417 bs5->bs_aextents = bs1->bs_aextents;
418 }
419
420 /* Allocate a bulkstat request. Returns zero or a negative error code. */
421 int
422 xfrog_bulkstat_alloc_req(
423 uint32_t nr,
424 uint64_t startino,
425 struct xfs_bulkstat_req **preq)
426 {
427 struct xfs_bulkstat_req *breq;
428
429 breq = calloc(1, XFS_BULKSTAT_REQ_SIZE(nr));
430 if (!breq)
431 return -errno;
432
433 breq->hdr.icount = nr;
434 breq->hdr.ino = startino;
435
436 *preq = breq;
437 return 0;
438 }
439
440 /* Set a bulkstat cursor to iterate only a particular AG. */
441 void
442 xfrog_bulkstat_set_ag(
443 struct xfs_bulkstat_req *req,
444 uint32_t agno)
445 {
446 req->hdr.agno = agno;
447 req->hdr.flags |= XFS_BULK_IREQ_AGNO;
448 }
449
450 /* Convert a inumbers data from v5 format to v1 format. */
451 void
452 xfrog_inumbers_v5_to_v1(
453 struct xfs_inogrp *ig1,
454 const struct xfs_inumbers *ig5)
455 {
456 ig1->xi_startino = ig5->xi_startino;
457 ig1->xi_alloccount = ig5->xi_alloccount;
458 ig1->xi_allocmask = ig5->xi_allocmask;
459 }
460
461 /* Convert a inumbers data from v1 format to v5 format. */
462 void
463 xfrog_inumbers_v1_to_v5(
464 struct xfs_inumbers *ig5,
465 const struct xfs_inogrp *ig1)
466 {
467 memset(ig5, 0, sizeof(*ig5));
468 ig5->xi_version = XFS_INUMBERS_VERSION_V1;
469
470 ig5->xi_startino = ig1->xi_startino;
471 ig5->xi_alloccount = ig1->xi_alloccount;
472 ig5->xi_allocmask = ig1->xi_allocmask;
473 }
474
475 static uint64_t xfrog_inum_ino(void *v1_rec)
476 {
477 return ((struct xfs_inogrp *)v1_rec)->xi_startino;
478 }
479
480 static void xfrog_inum_cvt(struct xfs_fd *xfd, void *v5, void *v1)
481 {
482 xfrog_inumbers_v1_to_v5(v5, v1);
483 }
484
485 /* Query inode allocation bitmask information using v5 ioctl. */
486 static int
487 xfrog_inumbers5(
488 struct xfs_fd *xfd,
489 struct xfs_inumbers_req *req)
490 {
491 int ret;
492
493 ret = ioctl(xfd->fd, XFS_IOC_INUMBERS, req);
494 if (ret)
495 return -errno;
496 return 0;
497 }
498
499 /* Query inode allocation bitmask information using v1 ioctl. */
500 static int
501 xfrog_inumbers1(
502 struct xfs_fd *xfd,
503 struct xfs_inumbers_req *req)
504 {
505 struct xfs_fsop_bulkreq bulkreq = { 0 };
506 int error;
507
508 error = xfrog_bulkstat_prep_v1_emulation(xfd);
509 if (error)
510 return error;
511
512 error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
513 sizeof(struct xfs_inogrp));
514 if (error == -ECANCELED)
515 goto out_teardown;
516 if (error)
517 return error;
518
519 error = ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq);
520 if (error)
521 error = -errno;
522
523 out_teardown:
524 return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
525 sizeof(struct xfs_inogrp), xfrog_inum_ino,
526 &req->inumbers, sizeof(struct xfs_inumbers),
527 xfrog_inum_cvt, 64, error);
528 }
529
530 /*
531 * Query inode allocation bitmask information. Returns zero or a negative
532 * error code.
533 */
534 int
535 xfrog_inumbers(
536 struct xfs_fd *xfd,
537 struct xfs_inumbers_req *req)
538 {
539 int error;
540
541 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
542 goto try_v1;
543
544 error = xfrog_inumbers5(xfd, req);
545 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
546 return error;
547
548 /* If the v5 ioctl wasn't found, we punt to v1. */
549 switch (error) {
550 case -EOPNOTSUPP:
551 case -ENOTTY:
552 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
553 break;
554 }
555
556 try_v1:
557 return xfrog_inumbers1(xfd, req);
558 }
559
560 /* Allocate a inumbers request. Returns zero or a negative error code. */
561 int
562 xfrog_inumbers_alloc_req(
563 uint32_t nr,
564 uint64_t startino,
565 struct xfs_inumbers_req **preq)
566 {
567 struct xfs_inumbers_req *ireq;
568
569 ireq = calloc(1, XFS_INUMBERS_REQ_SIZE(nr));
570 if (!ireq)
571 return -errno;
572
573 ireq->hdr.icount = nr;
574 ireq->hdr.ino = startino;
575
576 *preq = ireq;
577 return 0;
578 }
579
580 /* Set an inumbers cursor to iterate only a particular AG. */
581 void
582 xfrog_inumbers_set_ag(
583 struct xfs_inumbers_req *req,
584 uint32_t agno)
585 {
586 req->hdr.agno = agno;
587 req->hdr.flags |= XFS_BULK_IREQ_AGNO;
588 }