]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libfrog/bulkstat.c
libfrog: fix bitmap error communication problems
[thirdparty/xfsprogs-dev.git] / libfrog / bulkstat.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include <string.h>
7 #include <strings.h>
8 #include "xfs.h"
9 #include "fsgeom.h"
10 #include "bulkstat.h"
11
12 /*
13 * Wrapper functions for BULKSTAT and INUMBERS
14 * ===========================================
15 *
16 * The functions in this file are thin wrappers around the most recent version
17 * of the BULKSTAT and INUMBERS ioctls. BULKSTAT is used to query XFS-specific
18 * stat information about a group of inodes. INUMBERS is used to query
19 * allocation information about batches of XFS inodes.
20 *
21 * At the moment, the public xfrog_* functions provide all functionality of the
22 * V5 interface. If the V5 interface is not available on the running kernel,
23 * the functions will emulate them as best they can with previous versions of
24 * the interface (currently V1). If emulation is not possible, EINVAL will be
25 * returned.
26 *
27 * The XFROG_FLAG_BULKSTAT_FORCE_V[15] flags can be used to force use of a
28 * particular version of the kernel interface for testing.
29 */
30
31 /*
32 * Grab the fs geometry information that is needed to needed to emulate v5 with
33 * v1 interfaces.
34 */
35 static inline int
36 xfrog_bulkstat_prep_v1_emulation(
37 struct xfs_fd *xfd)
38 {
39 if (xfd->fsgeom.blocksize > 0)
40 return 0;
41
42 return xfd_prepare_geometry(xfd);
43 }
44
45 /* Bulkstat a single inode using v5 ioctl. */
46 static int
47 xfrog_bulkstat_single5(
48 struct xfs_fd *xfd,
49 uint64_t ino,
50 unsigned int flags,
51 struct xfs_bulkstat *bulkstat)
52 {
53 struct xfs_bulkstat_req *req;
54 int ret;
55
56 if (flags & ~(XFS_BULK_IREQ_SPECIAL))
57 return EINVAL;
58
59 req = xfrog_bulkstat_alloc_req(1, ino);
60 if (!req)
61 return ENOMEM;
62
63 req->hdr.flags = flags;
64 ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
65 if (ret) {
66 ret = errno;
67 goto free;
68 }
69
70 if (req->hdr.ocount == 0) {
71 ret = ENOENT;
72 goto free;
73 }
74
75 memcpy(bulkstat, req->bulkstat, sizeof(struct xfs_bulkstat));
76 free:
77 free(req);
78 return ret;
79 }
80
81 /* Bulkstat a single inode using v1 ioctl. */
82 static int
83 xfrog_bulkstat_single1(
84 struct xfs_fd *xfd,
85 uint64_t ino,
86 unsigned int flags,
87 struct xfs_bulkstat *bulkstat)
88 {
89 struct xfs_bstat bstat;
90 struct xfs_fsop_bulkreq bulkreq = { 0 };
91 int error;
92
93 if (flags)
94 return EINVAL;
95
96 error = xfrog_bulkstat_prep_v1_emulation(xfd);
97 if (error)
98 return error;
99
100 bulkreq.lastip = (__u64 *)&ino;
101 bulkreq.icount = 1;
102 bulkreq.ubuffer = &bstat;
103 error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT_SINGLE, &bulkreq);
104 if (error)
105 return errno;
106
107 xfrog_bulkstat_v1_to_v5(xfd, bulkstat, &bstat);
108 return 0;
109 }
110
111 /* Bulkstat a single inode. Returns zero or a positive error code. */
112 int
113 xfrog_bulkstat_single(
114 struct xfs_fd *xfd,
115 uint64_t ino,
116 unsigned int flags,
117 struct xfs_bulkstat *bulkstat)
118 {
119 int error;
120
121 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
122 goto try_v1;
123
124 error = xfrog_bulkstat_single5(xfd, ino, flags, bulkstat);
125 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
126 return error;
127
128 /* If the v5 ioctl wasn't found, we punt to v1. */
129 switch (error) {
130 case EOPNOTSUPP:
131 case ENOTTY:
132 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
133 break;
134 }
135
136 try_v1:
137 return xfrog_bulkstat_single1(xfd, ino, flags, bulkstat);
138 }
139
140 /*
141 * Set up the necessary control structures to emulate a V5 bulk request ioctl
142 * by calling a V1 bulk request ioctl. This enables callers to run on older
143 * kernels.
144 *
145 * Returns 0 if the emulation should proceed; ECANCELED if there are no
146 * records; or a positive error code.
147 */
148 static int
149 xfrog_bulk_req_v1_setup(
150 struct xfs_fd *xfd,
151 struct xfs_bulk_ireq *hdr,
152 struct xfs_fsop_bulkreq *bulkreq,
153 size_t rec_size)
154 {
155 void *buf;
156
157 if (hdr->flags & XFS_BULK_IREQ_AGNO) {
158 uint32_t agno = cvt_ino_to_agno(xfd, hdr->ino);
159
160 if (hdr->ino == 0)
161 hdr->ino = cvt_agino_to_ino(xfd, hdr->agno, 0);
162 else if (agno < hdr->agno)
163 return EINVAL;
164 else if (agno > hdr->agno)
165 goto no_results;
166 }
167
168 if (cvt_ino_to_agno(xfd, hdr->ino) > xfd->fsgeom.agcount)
169 goto no_results;
170
171 buf = malloc(hdr->icount * rec_size);
172 if (!buf)
173 return errno;
174
175 if (hdr->ino)
176 hdr->ino--;
177 bulkreq->lastip = (__u64 *)&hdr->ino,
178 bulkreq->icount = hdr->icount,
179 bulkreq->ocount = (__s32 *)&hdr->ocount,
180 bulkreq->ubuffer = buf;
181 return 0;
182
183 no_results:
184 hdr->ocount = 0;
185 return ECANCELED;
186 }
187
188 /*
189 * Clean up after using a V1 bulk request to emulate a V5 bulk request call.
190 *
191 * If the ioctl was successful, we need to convert the returned V1-format bulk
192 * request data into the V5-format bulk request data and copy it into the
193 * caller's buffer. We also need to free all resources allocated during the
194 * setup setup.
195 */
196 static int
197 xfrog_bulk_req_v1_cleanup(
198 struct xfs_fd *xfd,
199 struct xfs_bulk_ireq *hdr,
200 struct xfs_fsop_bulkreq *bulkreq,
201 size_t v1_rec_size,
202 uint64_t (*v1_ino)(void *v1_rec),
203 void *v5_records,
204 size_t v5_rec_size,
205 void (*cvt)(struct xfs_fd *xfd, void *v5, void *v1),
206 unsigned int startino_adj,
207 int error)
208 {
209 void *v1_rec = bulkreq->ubuffer;
210 void *v5_rec = v5_records;
211 unsigned int i;
212
213 if (error == ECANCELED) {
214 error = 0;
215 goto free;
216 }
217 if (error)
218 goto free;
219
220 /*
221 * Convert each record from v1 to v5 format, keeping the startino
222 * value up to date and (if desired) stopping at the end of the
223 * AG.
224 */
225 for (i = 0;
226 i < hdr->ocount;
227 i++, v1_rec += v1_rec_size, v5_rec += v5_rec_size) {
228 uint64_t ino = v1_ino(v1_rec);
229
230 /* Stop if we hit a different AG. */
231 if ((hdr->flags & XFS_BULK_IREQ_AGNO) &&
232 cvt_ino_to_agno(xfd, ino) != hdr->agno) {
233 hdr->ocount = i;
234 break;
235 }
236 cvt(xfd, v5_rec, v1_rec);
237 hdr->ino = ino + startino_adj;
238 }
239
240 free:
241 free(bulkreq->ubuffer);
242 return error;
243 }
244
245 static uint64_t xfrog_bstat_ino(void *v1_rec)
246 {
247 return ((struct xfs_bstat *)v1_rec)->bs_ino;
248 }
249
250 static void xfrog_bstat_cvt(struct xfs_fd *xfd, void *v5, void *v1)
251 {
252 xfrog_bulkstat_v1_to_v5(xfd, v5, v1);
253 }
254
255 /* Bulkstat a bunch of inodes using the v5 interface. */
256 static int
257 xfrog_bulkstat5(
258 struct xfs_fd *xfd,
259 struct xfs_bulkstat_req *req)
260 {
261 int ret;
262
263 ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
264 if (ret)
265 return errno;
266 return 0;
267 }
268
269 /* Bulkstat a bunch of inodes using the v1 interface. */
270 static int
271 xfrog_bulkstat1(
272 struct xfs_fd *xfd,
273 struct xfs_bulkstat_req *req)
274 {
275 struct xfs_fsop_bulkreq bulkreq = { 0 };
276 int error;
277
278 error = xfrog_bulkstat_prep_v1_emulation(xfd);
279 if (error)
280 return error;
281
282 error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
283 sizeof(struct xfs_bstat));
284 if (error == ECANCELED)
285 goto out_teardown;
286 if (error)
287 return error;
288
289 error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT, &bulkreq);
290 if (error)
291 error = errno;
292
293 out_teardown:
294 return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
295 sizeof(struct xfs_bstat), xfrog_bstat_ino,
296 &req->bulkstat, sizeof(struct xfs_bulkstat),
297 xfrog_bstat_cvt, 1, error);
298 }
299
300 /* Bulkstat a bunch of inodes. Returns zero or a positive error code. */
301 int
302 xfrog_bulkstat(
303 struct xfs_fd *xfd,
304 struct xfs_bulkstat_req *req)
305 {
306 int error;
307
308 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
309 goto try_v1;
310
311 error = xfrog_bulkstat5(xfd, req);
312 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
313 return error;
314
315 /* If the v5 ioctl wasn't found, we punt to v1. */
316 switch (error) {
317 case EOPNOTSUPP:
318 case ENOTTY:
319 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
320 break;
321 }
322
323 try_v1:
324 return xfrog_bulkstat1(xfd, req);
325 }
326
327 static bool
328 time_too_big(
329 uint64_t time)
330 {
331 time_t TIME_MAX;
332
333 memset(&TIME_MAX, 0xFF, sizeof(TIME_MAX));
334 return time > TIME_MAX;
335 }
336
337 /* Convert bulkstat data from v5 format to v1 format. */
338 int
339 xfrog_bulkstat_v5_to_v1(
340 struct xfs_fd *xfd,
341 struct xfs_bstat *bs1,
342 const struct xfs_bulkstat *bs5)
343 {
344 if (bs5->bs_aextents > UINT16_MAX ||
345 cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks) > UINT32_MAX ||
346 cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks) > UINT32_MAX ||
347 time_too_big(bs5->bs_atime) ||
348 time_too_big(bs5->bs_ctime) ||
349 time_too_big(bs5->bs_mtime))
350 return ERANGE;
351
352 bs1->bs_ino = bs5->bs_ino;
353 bs1->bs_mode = bs5->bs_mode;
354 bs1->bs_nlink = bs5->bs_nlink;
355 bs1->bs_uid = bs5->bs_uid;
356 bs1->bs_gid = bs5->bs_gid;
357 bs1->bs_rdev = bs5->bs_rdev;
358 bs1->bs_blksize = bs5->bs_blksize;
359 bs1->bs_size = bs5->bs_size;
360 bs1->bs_atime.tv_sec = bs5->bs_atime;
361 bs1->bs_mtime.tv_sec = bs5->bs_mtime;
362 bs1->bs_ctime.tv_sec = bs5->bs_ctime;
363 bs1->bs_atime.tv_nsec = bs5->bs_atime_nsec;
364 bs1->bs_mtime.tv_nsec = bs5->bs_mtime_nsec;
365 bs1->bs_ctime.tv_nsec = bs5->bs_ctime_nsec;
366 bs1->bs_blocks = bs5->bs_blocks;
367 bs1->bs_xflags = bs5->bs_xflags;
368 bs1->bs_extsize = cvt_off_fsb_to_b(xfd, bs5->bs_extsize_blks);
369 bs1->bs_extents = bs5->bs_extents;
370 bs1->bs_gen = bs5->bs_gen;
371 bs1->bs_projid_lo = bs5->bs_projectid & 0xFFFF;
372 bs1->bs_forkoff = bs5->bs_forkoff;
373 bs1->bs_projid_hi = bs5->bs_projectid >> 16;
374 bs1->bs_sick = bs5->bs_sick;
375 bs1->bs_checked = bs5->bs_checked;
376 bs1->bs_cowextsize = cvt_off_fsb_to_b(xfd, bs5->bs_cowextsize_blks);
377 bs1->bs_dmevmask = 0;
378 bs1->bs_dmstate = 0;
379 bs1->bs_aextents = bs5->bs_aextents;
380 return 0;
381 }
382
383 /* Convert bulkstat data from v1 format to v5 format. */
384 void
385 xfrog_bulkstat_v1_to_v5(
386 struct xfs_fd *xfd,
387 struct xfs_bulkstat *bs5,
388 const struct xfs_bstat *bs1)
389 {
390 memset(bs5, 0, sizeof(*bs5));
391 bs5->bs_version = XFS_BULKSTAT_VERSION_V1;
392
393 bs5->bs_ino = bs1->bs_ino;
394 bs5->bs_mode = bs1->bs_mode;
395 bs5->bs_nlink = bs1->bs_nlink;
396 bs5->bs_uid = bs1->bs_uid;
397 bs5->bs_gid = bs1->bs_gid;
398 bs5->bs_rdev = bs1->bs_rdev;
399 bs5->bs_blksize = bs1->bs_blksize;
400 bs5->bs_size = bs1->bs_size;
401 bs5->bs_atime = bs1->bs_atime.tv_sec;
402 bs5->bs_mtime = bs1->bs_mtime.tv_sec;
403 bs5->bs_ctime = bs1->bs_ctime.tv_sec;
404 bs5->bs_atime_nsec = bs1->bs_atime.tv_nsec;
405 bs5->bs_mtime_nsec = bs1->bs_mtime.tv_nsec;
406 bs5->bs_ctime_nsec = bs1->bs_ctime.tv_nsec;
407 bs5->bs_blocks = bs1->bs_blocks;
408 bs5->bs_xflags = bs1->bs_xflags;
409 bs5->bs_extsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_extsize);
410 bs5->bs_extents = bs1->bs_extents;
411 bs5->bs_gen = bs1->bs_gen;
412 bs5->bs_projectid = bstat_get_projid(bs1);
413 bs5->bs_forkoff = bs1->bs_forkoff;
414 bs5->bs_sick = bs1->bs_sick;
415 bs5->bs_checked = bs1->bs_checked;
416 bs5->bs_cowextsize_blks = cvt_b_to_off_fsbt(xfd, bs1->bs_cowextsize);
417 bs5->bs_aextents = bs1->bs_aextents;
418 }
419
420 /* Allocate a bulkstat request. On error returns NULL and sets errno. */
421 struct xfs_bulkstat_req *
422 xfrog_bulkstat_alloc_req(
423 uint32_t nr,
424 uint64_t startino)
425 {
426 struct xfs_bulkstat_req *breq;
427
428 breq = calloc(1, XFS_BULKSTAT_REQ_SIZE(nr));
429 if (!breq)
430 return NULL;
431
432 breq->hdr.icount = nr;
433 breq->hdr.ino = startino;
434
435 return breq;
436 }
437
438 /* Set a bulkstat cursor to iterate only a particular AG. */
439 void
440 xfrog_bulkstat_set_ag(
441 struct xfs_bulkstat_req *req,
442 uint32_t agno)
443 {
444 req->hdr.agno = agno;
445 req->hdr.flags |= XFS_BULK_IREQ_AGNO;
446 }
447
448 /* Convert a inumbers data from v5 format to v1 format. */
449 void
450 xfrog_inumbers_v5_to_v1(
451 struct xfs_inogrp *ig1,
452 const struct xfs_inumbers *ig5)
453 {
454 ig1->xi_startino = ig5->xi_startino;
455 ig1->xi_alloccount = ig5->xi_alloccount;
456 ig1->xi_allocmask = ig5->xi_allocmask;
457 }
458
459 /* Convert a inumbers data from v1 format to v5 format. */
460 void
461 xfrog_inumbers_v1_to_v5(
462 struct xfs_inumbers *ig5,
463 const struct xfs_inogrp *ig1)
464 {
465 memset(ig5, 0, sizeof(*ig5));
466 ig5->xi_version = XFS_INUMBERS_VERSION_V1;
467
468 ig5->xi_startino = ig1->xi_startino;
469 ig5->xi_alloccount = ig1->xi_alloccount;
470 ig5->xi_allocmask = ig1->xi_allocmask;
471 }
472
473 static uint64_t xfrog_inum_ino(void *v1_rec)
474 {
475 return ((struct xfs_inogrp *)v1_rec)->xi_startino;
476 }
477
478 static void xfrog_inum_cvt(struct xfs_fd *xfd, void *v5, void *v1)
479 {
480 xfrog_inumbers_v1_to_v5(v5, v1);
481 }
482
483 /* Query inode allocation bitmask information using v5 ioctl. */
484 static int
485 xfrog_inumbers5(
486 struct xfs_fd *xfd,
487 struct xfs_inumbers_req *req)
488 {
489 int ret;
490
491 ret = ioctl(xfd->fd, XFS_IOC_INUMBERS, req);
492 if (ret)
493 return errno;
494 return 0;
495 }
496
497 /* Query inode allocation bitmask information using v1 ioctl. */
498 static int
499 xfrog_inumbers1(
500 struct xfs_fd *xfd,
501 struct xfs_inumbers_req *req)
502 {
503 struct xfs_fsop_bulkreq bulkreq = { 0 };
504 int error;
505
506 error = xfrog_bulkstat_prep_v1_emulation(xfd);
507 if (error)
508 return error;
509
510 error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
511 sizeof(struct xfs_inogrp));
512 if (error == ECANCELED)
513 goto out_teardown;
514 if (error)
515 return error;
516
517 error = ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq);
518 if (error)
519 error = errno;
520
521 out_teardown:
522 return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
523 sizeof(struct xfs_inogrp), xfrog_inum_ino,
524 &req->inumbers, sizeof(struct xfs_inumbers),
525 xfrog_inum_cvt, 64, error);
526 }
527
528 /*
529 * Query inode allocation bitmask information. Returns zero or a positive
530 * error code.
531 */
532 int
533 xfrog_inumbers(
534 struct xfs_fd *xfd,
535 struct xfs_inumbers_req *req)
536 {
537 int error;
538
539 if (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V1)
540 goto try_v1;
541
542 error = xfrog_inumbers5(xfd, req);
543 if (error == 0 || (xfd->flags & XFROG_FLAG_BULKSTAT_FORCE_V5))
544 return error;
545
546 /* If the v5 ioctl wasn't found, we punt to v1. */
547 switch (error) {
548 case EOPNOTSUPP:
549 case ENOTTY:
550 xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
551 break;
552 }
553
554 try_v1:
555 return xfrog_inumbers1(xfd, req);
556 }
557
558 /* Allocate a inumbers request. On error returns NULL and sets errno. */
559 struct xfs_inumbers_req *
560 xfrog_inumbers_alloc_req(
561 uint32_t nr,
562 uint64_t startino)
563 {
564 struct xfs_inumbers_req *ireq;
565
566 ireq = calloc(1, XFS_INUMBERS_REQ_SIZE(nr));
567 if (!ireq)
568 return NULL;
569
570 ireq->hdr.icount = nr;
571 ireq->hdr.ino = startino;
572
573 return ireq;
574 }
575
576 /* Set an inumbers cursor to iterate only a particular AG. */
577 void
578 xfrog_inumbers_set_ag(
579 struct xfs_inumbers_req *req,
580 uint32_t agno)
581 {
582 req->hdr.agno = agno;
583 req->hdr.flags |= XFS_BULK_IREQ_AGNO;
584 }