1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2019 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
13 * Wrapper functions for BULKSTAT and INUMBERS
14 * ===========================================
16 * The functions in this file are thin wrappers around the most recent version
17 * of the BULKSTAT and INUMBERS ioctls. BULKSTAT is used to query XFS-specific
18 * stat information about a group of inodes. INUMBERS is used to query
19 * allocation information about batches of XFS inodes.
21 * At the moment, the public xfrog_* functions provide all functionality of the
22 * V5 interface. If the V5 interface is not available on the running kernel,
23 * the functions will emulate them as best they can with previous versions of
24 * the interface (currently V1). If emulation is not possible, EINVAL will be
27 * The XFROG_FLAG_BULKSTAT_FORCE_V[15] flags can be used to force use of a
28 * particular version of the kernel interface for testing.
32 * Grab the fs geometry information that is needed to needed to emulate v5 with
36 xfrog_bulkstat_prep_v1_emulation(
39 if (xfd
->fsgeom
.blocksize
> 0)
42 return xfd_prepare_geometry(xfd
);
45 /* Bulkstat a single inode using v5 ioctl. */
47 xfrog_bulkstat_single5(
51 struct xfs_bulkstat
*bulkstat
)
53 struct xfs_bulkstat_req
*req
;
56 if (flags
& ~(XFS_BULK_IREQ_SPECIAL
))
59 req
= xfrog_bulkstat_alloc_req(1, ino
);
63 req
->hdr
.flags
= flags
;
64 ret
= ioctl(xfd
->fd
, XFS_IOC_BULKSTAT
, req
);
70 if (req
->hdr
.ocount
== 0) {
75 memcpy(bulkstat
, req
->bulkstat
, sizeof(struct xfs_bulkstat
));
81 /* Bulkstat a single inode using v1 ioctl. */
83 xfrog_bulkstat_single1(
87 struct xfs_bulkstat
*bulkstat
)
89 struct xfs_bstat bstat
;
90 struct xfs_fsop_bulkreq bulkreq
= { 0 };
96 error
= xfrog_bulkstat_prep_v1_emulation(xfd
);
100 bulkreq
.lastip
= (__u64
*)&ino
;
102 bulkreq
.ubuffer
= &bstat
;
103 error
= ioctl(xfd
->fd
, XFS_IOC_FSBULKSTAT_SINGLE
, &bulkreq
);
107 xfrog_bulkstat_v1_to_v5(xfd
, bulkstat
, &bstat
);
111 /* Bulkstat a single inode. Returns zero or a positive error code. */
113 xfrog_bulkstat_single(
117 struct xfs_bulkstat
*bulkstat
)
121 if (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V1
)
124 error
= xfrog_bulkstat_single5(xfd
, ino
, flags
, bulkstat
);
125 if (error
== 0 || (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V5
))
128 /* If the v5 ioctl wasn't found, we punt to v1. */
132 xfd
->flags
|= XFROG_FLAG_BULKSTAT_FORCE_V1
;
137 return xfrog_bulkstat_single1(xfd
, ino
, flags
, bulkstat
);
141 * Set up the necessary control structures to emulate a V5 bulk request ioctl
142 * by calling a V1 bulk request ioctl. This enables callers to run on older
145 * Returns 0 if the emulation should proceed; ECANCELED if there are no
146 * records; or a positive error code.
149 xfrog_bulk_req_v1_setup(
151 struct xfs_bulk_ireq
*hdr
,
152 struct xfs_fsop_bulkreq
*bulkreq
,
157 if (hdr
->flags
& XFS_BULK_IREQ_AGNO
) {
158 uint32_t agno
= cvt_ino_to_agno(xfd
, hdr
->ino
);
161 hdr
->ino
= cvt_agino_to_ino(xfd
, hdr
->agno
, 0);
162 else if (agno
< hdr
->agno
)
164 else if (agno
> hdr
->agno
)
168 if (cvt_ino_to_agno(xfd
, hdr
->ino
) > xfd
->fsgeom
.agcount
)
171 buf
= malloc(hdr
->icount
* rec_size
);
177 bulkreq
->lastip
= (__u64
*)&hdr
->ino
,
178 bulkreq
->icount
= hdr
->icount
,
179 bulkreq
->ocount
= (__s32
*)&hdr
->ocount
,
180 bulkreq
->ubuffer
= buf
;
189 * Clean up after using a V1 bulk request to emulate a V5 bulk request call.
191 * If the ioctl was successful, we need to convert the returned V1-format bulk
192 * request data into the V5-format bulk request data and copy it into the
193 * caller's buffer. We also need to free all resources allocated during the
197 xfrog_bulk_req_v1_cleanup(
199 struct xfs_bulk_ireq
*hdr
,
200 struct xfs_fsop_bulkreq
*bulkreq
,
202 uint64_t (*v1_ino
)(void *v1_rec
),
205 void (*cvt
)(struct xfs_fd
*xfd
, void *v5
, void *v1
),
206 unsigned int startino_adj
,
209 void *v1_rec
= bulkreq
->ubuffer
;
210 void *v5_rec
= v5_records
;
213 if (error
== ECANCELED
) {
221 * Convert each record from v1 to v5 format, keeping the startino
222 * value up to date and (if desired) stopping at the end of the
227 i
++, v1_rec
+= v1_rec_size
, v5_rec
+= v5_rec_size
) {
228 uint64_t ino
= v1_ino(v1_rec
);
230 /* Stop if we hit a different AG. */
231 if ((hdr
->flags
& XFS_BULK_IREQ_AGNO
) &&
232 cvt_ino_to_agno(xfd
, ino
) != hdr
->agno
) {
236 cvt(xfd
, v5_rec
, v1_rec
);
237 hdr
->ino
= ino
+ startino_adj
;
241 free(bulkreq
->ubuffer
);
245 static uint64_t xfrog_bstat_ino(void *v1_rec
)
247 return ((struct xfs_bstat
*)v1_rec
)->bs_ino
;
250 static void xfrog_bstat_cvt(struct xfs_fd
*xfd
, void *v5
, void *v1
)
252 xfrog_bulkstat_v1_to_v5(xfd
, v5
, v1
);
255 /* Bulkstat a bunch of inodes using the v5 interface. */
259 struct xfs_bulkstat_req
*req
)
263 ret
= ioctl(xfd
->fd
, XFS_IOC_BULKSTAT
, req
);
269 /* Bulkstat a bunch of inodes using the v1 interface. */
273 struct xfs_bulkstat_req
*req
)
275 struct xfs_fsop_bulkreq bulkreq
= { 0 };
278 error
= xfrog_bulkstat_prep_v1_emulation(xfd
);
282 error
= xfrog_bulk_req_v1_setup(xfd
, &req
->hdr
, &bulkreq
,
283 sizeof(struct xfs_bstat
));
284 if (error
== ECANCELED
)
289 error
= ioctl(xfd
->fd
, XFS_IOC_FSBULKSTAT
, &bulkreq
);
294 return xfrog_bulk_req_v1_cleanup(xfd
, &req
->hdr
, &bulkreq
,
295 sizeof(struct xfs_bstat
), xfrog_bstat_ino
,
296 &req
->bulkstat
, sizeof(struct xfs_bulkstat
),
297 xfrog_bstat_cvt
, 1, error
);
300 /* Bulkstat a bunch of inodes. Returns zero or a positive error code. */
304 struct xfs_bulkstat_req
*req
)
308 if (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V1
)
311 error
= xfrog_bulkstat5(xfd
, req
);
312 if (error
== 0 || (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V5
))
315 /* If the v5 ioctl wasn't found, we punt to v1. */
319 xfd
->flags
|= XFROG_FLAG_BULKSTAT_FORCE_V1
;
324 return xfrog_bulkstat1(xfd
, req
);
333 memset(&TIME_MAX
, 0xFF, sizeof(TIME_MAX
));
334 return time
> TIME_MAX
;
337 /* Convert bulkstat data from v5 format to v1 format. */
339 xfrog_bulkstat_v5_to_v1(
341 struct xfs_bstat
*bs1
,
342 const struct xfs_bulkstat
*bs5
)
344 if (bs5
->bs_aextents
> UINT16_MAX
||
345 cvt_off_fsb_to_b(xfd
, bs5
->bs_extsize_blks
) > UINT32_MAX
||
346 cvt_off_fsb_to_b(xfd
, bs5
->bs_cowextsize_blks
) > UINT32_MAX
||
347 time_too_big(bs5
->bs_atime
) ||
348 time_too_big(bs5
->bs_ctime
) ||
349 time_too_big(bs5
->bs_mtime
))
352 bs1
->bs_ino
= bs5
->bs_ino
;
353 bs1
->bs_mode
= bs5
->bs_mode
;
354 bs1
->bs_nlink
= bs5
->bs_nlink
;
355 bs1
->bs_uid
= bs5
->bs_uid
;
356 bs1
->bs_gid
= bs5
->bs_gid
;
357 bs1
->bs_rdev
= bs5
->bs_rdev
;
358 bs1
->bs_blksize
= bs5
->bs_blksize
;
359 bs1
->bs_size
= bs5
->bs_size
;
360 bs1
->bs_atime
.tv_sec
= bs5
->bs_atime
;
361 bs1
->bs_mtime
.tv_sec
= bs5
->bs_mtime
;
362 bs1
->bs_ctime
.tv_sec
= bs5
->bs_ctime
;
363 bs1
->bs_atime
.tv_nsec
= bs5
->bs_atime_nsec
;
364 bs1
->bs_mtime
.tv_nsec
= bs5
->bs_mtime_nsec
;
365 bs1
->bs_ctime
.tv_nsec
= bs5
->bs_ctime_nsec
;
366 bs1
->bs_blocks
= bs5
->bs_blocks
;
367 bs1
->bs_xflags
= bs5
->bs_xflags
;
368 bs1
->bs_extsize
= cvt_off_fsb_to_b(xfd
, bs5
->bs_extsize_blks
);
369 bs1
->bs_extents
= bs5
->bs_extents
;
370 bs1
->bs_gen
= bs5
->bs_gen
;
371 bs1
->bs_projid_lo
= bs5
->bs_projectid
& 0xFFFF;
372 bs1
->bs_forkoff
= bs5
->bs_forkoff
;
373 bs1
->bs_projid_hi
= bs5
->bs_projectid
>> 16;
374 bs1
->bs_sick
= bs5
->bs_sick
;
375 bs1
->bs_checked
= bs5
->bs_checked
;
376 bs1
->bs_cowextsize
= cvt_off_fsb_to_b(xfd
, bs5
->bs_cowextsize_blks
);
377 bs1
->bs_dmevmask
= 0;
379 bs1
->bs_aextents
= bs5
->bs_aextents
;
383 /* Convert bulkstat data from v1 format to v5 format. */
385 xfrog_bulkstat_v1_to_v5(
387 struct xfs_bulkstat
*bs5
,
388 const struct xfs_bstat
*bs1
)
390 memset(bs5
, 0, sizeof(*bs5
));
391 bs5
->bs_version
= XFS_BULKSTAT_VERSION_V1
;
393 bs5
->bs_ino
= bs1
->bs_ino
;
394 bs5
->bs_mode
= bs1
->bs_mode
;
395 bs5
->bs_nlink
= bs1
->bs_nlink
;
396 bs5
->bs_uid
= bs1
->bs_uid
;
397 bs5
->bs_gid
= bs1
->bs_gid
;
398 bs5
->bs_rdev
= bs1
->bs_rdev
;
399 bs5
->bs_blksize
= bs1
->bs_blksize
;
400 bs5
->bs_size
= bs1
->bs_size
;
401 bs5
->bs_atime
= bs1
->bs_atime
.tv_sec
;
402 bs5
->bs_mtime
= bs1
->bs_mtime
.tv_sec
;
403 bs5
->bs_ctime
= bs1
->bs_ctime
.tv_sec
;
404 bs5
->bs_atime_nsec
= bs1
->bs_atime
.tv_nsec
;
405 bs5
->bs_mtime_nsec
= bs1
->bs_mtime
.tv_nsec
;
406 bs5
->bs_ctime_nsec
= bs1
->bs_ctime
.tv_nsec
;
407 bs5
->bs_blocks
= bs1
->bs_blocks
;
408 bs5
->bs_xflags
= bs1
->bs_xflags
;
409 bs5
->bs_extsize_blks
= cvt_b_to_off_fsbt(xfd
, bs1
->bs_extsize
);
410 bs5
->bs_extents
= bs1
->bs_extents
;
411 bs5
->bs_gen
= bs1
->bs_gen
;
412 bs5
->bs_projectid
= bstat_get_projid(bs1
);
413 bs5
->bs_forkoff
= bs1
->bs_forkoff
;
414 bs5
->bs_sick
= bs1
->bs_sick
;
415 bs5
->bs_checked
= bs1
->bs_checked
;
416 bs5
->bs_cowextsize_blks
= cvt_b_to_off_fsbt(xfd
, bs1
->bs_cowextsize
);
417 bs5
->bs_aextents
= bs1
->bs_aextents
;
420 /* Allocate a bulkstat request. On error returns NULL and sets errno. */
421 struct xfs_bulkstat_req
*
422 xfrog_bulkstat_alloc_req(
426 struct xfs_bulkstat_req
*breq
;
428 breq
= calloc(1, XFS_BULKSTAT_REQ_SIZE(nr
));
432 breq
->hdr
.icount
= nr
;
433 breq
->hdr
.ino
= startino
;
438 /* Set a bulkstat cursor to iterate only a particular AG. */
440 xfrog_bulkstat_set_ag(
441 struct xfs_bulkstat_req
*req
,
444 req
->hdr
.agno
= agno
;
445 req
->hdr
.flags
|= XFS_BULK_IREQ_AGNO
;
448 /* Convert a inumbers data from v5 format to v1 format. */
450 xfrog_inumbers_v5_to_v1(
451 struct xfs_inogrp
*ig1
,
452 const struct xfs_inumbers
*ig5
)
454 ig1
->xi_startino
= ig5
->xi_startino
;
455 ig1
->xi_alloccount
= ig5
->xi_alloccount
;
456 ig1
->xi_allocmask
= ig5
->xi_allocmask
;
459 /* Convert a inumbers data from v1 format to v5 format. */
461 xfrog_inumbers_v1_to_v5(
462 struct xfs_inumbers
*ig5
,
463 const struct xfs_inogrp
*ig1
)
465 memset(ig5
, 0, sizeof(*ig5
));
466 ig5
->xi_version
= XFS_INUMBERS_VERSION_V1
;
468 ig5
->xi_startino
= ig1
->xi_startino
;
469 ig5
->xi_alloccount
= ig1
->xi_alloccount
;
470 ig5
->xi_allocmask
= ig1
->xi_allocmask
;
473 static uint64_t xfrog_inum_ino(void *v1_rec
)
475 return ((struct xfs_inogrp
*)v1_rec
)->xi_startino
;
478 static void xfrog_inum_cvt(struct xfs_fd
*xfd
, void *v5
, void *v1
)
480 xfrog_inumbers_v1_to_v5(v5
, v1
);
483 /* Query inode allocation bitmask information using v5 ioctl. */
487 struct xfs_inumbers_req
*req
)
491 ret
= ioctl(xfd
->fd
, XFS_IOC_INUMBERS
, req
);
497 /* Query inode allocation bitmask information using v1 ioctl. */
501 struct xfs_inumbers_req
*req
)
503 struct xfs_fsop_bulkreq bulkreq
= { 0 };
506 error
= xfrog_bulkstat_prep_v1_emulation(xfd
);
510 error
= xfrog_bulk_req_v1_setup(xfd
, &req
->hdr
, &bulkreq
,
511 sizeof(struct xfs_inogrp
));
512 if (error
== ECANCELED
)
517 error
= ioctl(xfd
->fd
, XFS_IOC_FSINUMBERS
, &bulkreq
);
522 return xfrog_bulk_req_v1_cleanup(xfd
, &req
->hdr
, &bulkreq
,
523 sizeof(struct xfs_inogrp
), xfrog_inum_ino
,
524 &req
->inumbers
, sizeof(struct xfs_inumbers
),
525 xfrog_inum_cvt
, 64, error
);
529 * Query inode allocation bitmask information. Returns zero or a positive
535 struct xfs_inumbers_req
*req
)
539 if (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V1
)
542 error
= xfrog_inumbers5(xfd
, req
);
543 if (error
== 0 || (xfd
->flags
& XFROG_FLAG_BULKSTAT_FORCE_V5
))
546 /* If the v5 ioctl wasn't found, we punt to v1. */
550 xfd
->flags
|= XFROG_FLAG_BULKSTAT_FORCE_V1
;
555 return xfrog_inumbers1(xfd
, req
);
558 /* Allocate a inumbers request. On error returns NULL and sets errno. */
559 struct xfs_inumbers_req
*
560 xfrog_inumbers_alloc_req(
564 struct xfs_inumbers_req
*ireq
;
566 ireq
= calloc(1, XFS_INUMBERS_REQ_SIZE(nr
));
570 ireq
->hdr
.icount
= nr
;
571 ireq
->hdr
.ino
= startino
;
576 /* Set an inumbers cursor to iterate only a particular AG. */
578 xfrog_inumbers_set_ag(
579 struct xfs_inumbers_req
*req
,
582 req
->hdr
.agno
= agno
;
583 req
->hdr
.flags
|= XFS_BULK_IREQ_AGNO
;