1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
10 #include <sys/statvfs.h>
11 #include "platform_defs.h"
14 #include "libfrog/paths.h"
15 #include "libfrog/workqueue.h"
16 #include "xfs_scrub.h"
20 #include "libfrog/fsgeom.h"
21 #include "libfrog/bulkstat.h"
22 #include "libfrog/handle_priv.h"
24 #include "libfrog/bitmask.h"
27 * Iterate a range of inodes.
29 * This is a little more involved than repeatedly asking BULKSTAT for a
30 * buffer's worth of stat data for some number of inodes. We want to scan as
31 * many of the inodes that the inobt thinks there are, so we use the INUMBERS
32 * ioctl to walk all the inobt records in the filesystem and spawn a worker to
33 * bulkstat and iterate. The worker starts with an inumbers record that can
36 * {startino = S, allocmask = 0b11011}
38 * Given a starting inumber S and count C=64, bulkstat will return a sorted
39 * array of stat information. The bs_ino of those array elements can look like
40 * any of the following:
42 * 0. [S, S+1, S+3, S+4]
43 * 1. [S+e, S+e+1, S+e+3, S+e+4, S+e+C+1...], where e >= 0
44 * 2. [S+e+n], where n >= 0
46 * 4. [], errno == EFSCORRUPTED
48 * We know that bulkstat scanned the entire inode range between S and bs_ino of
49 * the last array element, even though it only fills out an array element for
50 * allocated inodes. Therefore, we can say in cases 0-2 that S was filled,
51 * even if there is no bstat[] record for S. In turn, we can create a bitmask
52 * of inodes that we have seen, and set bits 0 through (bstat[-1].bs_ino - S),
53 * being careful not to set any bits past S+C.
55 * In case (0) we find that seen mask matches the inumber record
56 * exactly, so the caller can walk the stat records and move on. In case (1)
57 * this is also true, but we must be careful to reduce the array length to
58 * avoid scanning inodes that are not in the inumber chunk. In case (3) we
59 * conclude that there were no inodes left to scan and terminate.
61 * In case (2) and (4) we don't know why bulkstat returned fewer than C
62 * elements. We might have found the end of the filesystem, or the kernel
63 * might have found a corrupt inode and stopped. This we must investigate by
64 * trying to fill out the rest of the bstat array starting with the next
65 * inumber after the last bstat array element filled, and continuing until S'
66 * is beyond S0 + C, or the array is full. Each time we succeed in loading
67 * new records, the kernel increases S' for us; if instead we encounter case
68 * (4), we can increment S' ourselves.
70 * Inodes that are set in the allocmask but not set in the seen mask are the
71 * corrupt inodes. For each of these cases, we try to populate the bulkstat
72 * array one inode at a time. If the kernel returns a matching record we can
73 * use it; if instead we receive an error, we synthesize enough of a record
74 * to be able to run online scrub by handle.
76 * If the iteration function returns ESTALE, that means that the inode has
77 * been deleted and possibly recreated since the BULKSTAT call. We wil
78 * refresh the stat information and try again up to 30 times before reporting
79 * the staleness as an error.
83 * Return the inumber of the highest inode in the bulkstat data, assuming the
84 * records are sorted in inumber order.
86 static inline uint64_t last_bstat_ino(const struct xfs_bulkstat_req
*b
)
88 return b
->hdr
.ocount
? b
->bulkstat
[b
->hdr
.ocount
- 1].bs_ino
: 0;
92 * Deduce the bitmask of the inodes in inums that were seen by bulkstat. If
93 * the inode is present in the bstat array this is trivially true; or if it is
94 * not in the array but higher inumbers are present, then it was freed.
97 seen_mask_from_bulkstat(
98 const struct xfs_inumbers
*inums
,
100 const struct xfs_bulkstat_req
*breq
)
102 const __u64 limit_ino
=
103 inums
->xi_startino
+ LIBFROG_BULKSTAT_CHUNKSIZE
;
104 const __u64 last
= last_bstat_ino(breq
);
108 /* Ignore the bulkstat results if they don't cover inumbers */
109 if (breq_startino
> limit_ino
|| last
< inums
->xi_startino
)
112 maxi
= min(LIBFROG_BULKSTAT_CHUNKSIZE
, last
- inums
->xi_startino
+ 1);
113 for (i
= breq_startino
- inums
->xi_startino
; i
< maxi
; i
++)
120 * Try to fill the rest of orig_breq with bulkstat data by re-running bulkstat
121 * with increasing start_ino until we either hit the end of the inumbers info
122 * or fill up the bstat array with something. Returns a bitmask of the inodes
123 * within inums that were filled by the bulkstat requests.
127 struct scrub_ctx
*ctx
,
128 const struct xfs_inumbers
*inums
,
129 struct xfs_bulkstat_req
*orig_breq
,
132 struct xfs_bulkstat_req
*new_breq
;
133 struct xfs_bulkstat
*old_bstat
=
134 &orig_breq
->bulkstat
[orig_breq
->hdr
.ocount
];
135 const __u64 limit_ino
=
136 inums
->xi_startino
+ LIBFROG_BULKSTAT_CHUNKSIZE
;
137 __u64 start_ino
= orig_breq
->hdr
.ino
;
141 assert(orig_breq
->hdr
.ocount
< orig_breq
->hdr
.icount
);
144 * If the first bulkstat returned a corruption error, that means
145 * start_ino is corrupt. Restart instead at the next inumber.
147 if (orig_error
== EFSCORRUPTED
)
149 if (start_ino
>= limit_ino
)
152 error
= -xfrog_bulkstat_alloc_req(
153 orig_breq
->hdr
.icount
- orig_breq
->hdr
.ocount
,
154 start_ino
, &new_breq
);
157 new_breq
->hdr
.flags
= orig_breq
->hdr
.flags
;
161 * Fill the new bulkstat request with stat data starting at
164 error
= -xfrog_bulkstat(&ctx
->mnt
, new_breq
);
165 if (error
== EFSCORRUPTED
) {
167 * start_ino is corrupt, increment and try the next
171 new_breq
->hdr
.ino
= start_ino
;
176 * Any other error means the caller falls back to
181 if (new_breq
->hdr
.ocount
== 0)
184 /* Copy new results to the original bstat buffer */
185 memcpy(old_bstat
, new_breq
->bulkstat
,
186 new_breq
->hdr
.ocount
* sizeof(struct xfs_bulkstat
));
187 orig_breq
->hdr
.ocount
+= new_breq
->hdr
.ocount
;
188 old_bstat
+= new_breq
->hdr
.ocount
;
189 seen_mask
|= seen_mask_from_bulkstat(inums
, start_ino
,
192 new_breq
->hdr
.icount
-= new_breq
->hdr
.ocount
;
193 start_ino
= new_breq
->hdr
.ino
;
194 } while (new_breq
->hdr
.icount
> 0 && new_breq
->hdr
.ino
< limit_ino
);
200 #define cmp_int(l, r) ((l > r) - (l < r))
202 /* Compare two bulkstat records by inumber. */
208 const struct xfs_bulkstat
*ba
= a
;
209 const struct xfs_bulkstat
*bb
= b
;
211 return cmp_int(ba
->bs_ino
, bb
->bs_ino
);
215 * Walk the xi_allocmask looking for set bits that aren't present in
216 * the fill mask. For each such inode, fill the entries at the end of
217 * the array with stat information one at a time, synthesizing them if
218 * necessary. At this point, (xi_allocmask & ~seen_mask) should be the
222 bulkstat_single_step(
223 struct scrub_ctx
*ctx
,
224 const struct xfs_inumbers
*inumbers
,
226 struct xfs_bulkstat_req
*breq
)
228 struct xfs_bulkstat
*bs
= NULL
;
232 for (i
= 0; i
< LIBFROG_BULKSTAT_CHUNKSIZE
; i
++) {
234 * Don't single-step if inumbers said it wasn't allocated or
235 * bulkstat actually filled it.
237 if (!(inumbers
->xi_allocmask
& (1ULL << i
)))
239 if (seen_mask
& (1ULL << i
))
242 assert(breq
->hdr
.ocount
< LIBFROG_BULKSTAT_CHUNKSIZE
);
245 bs
= &breq
->bulkstat
[breq
->hdr
.ocount
];
248 * Didn't get desired stat data and we've hit the end of the
249 * returned data. We can't distinguish between the inode being
250 * freed vs. the inode being to corrupt to load, so try a
251 * bulkstat single to see if we can load the inode.
253 error
= -xfrog_bulkstat_single(&ctx
->mnt
,
254 inumbers
->xi_startino
+ i
, breq
->hdr
.flags
, bs
);
258 * This inode wasn't found, and no results were
259 * returned. We've likely hit the end of the
260 * filesystem, but we'll move on to the next inode in
261 * the mask for the sake of caution.
266 * If a result was returned but it wasn't the inode
267 * we were looking for, then the missing inode was
268 * freed. Move on to the next inode in the mask.
270 if (bs
->bs_ino
!= inumbers
->xi_startino
+ i
)
275 * Some error happened. Synthesize a bulkstat record
276 * so that phase3 can try to see if there's a corrupt
277 * inode that needs repairing.
279 memset(bs
, 0, sizeof(struct xfs_bulkstat
));
280 bs
->bs_ino
= inumbers
->xi_startino
+ i
;
281 bs
->bs_blksize
= ctx
->mnt_sv
.f_frsize
;
289 /* If we added any entries, re-sort the array. */
291 qsort(breq
->bulkstat
, breq
->hdr
.ocount
,
292 sizeof(struct xfs_bulkstat
), compare_bstat
);
295 /* Return the inumber of the highest allocated inode in the inumbers data. */
296 static inline uint64_t last_allocmask_ino(const struct xfs_inumbers
*i
)
298 return i
->xi_startino
+ xfrog_highbit64(i
->xi_allocmask
);
302 * Run bulkstat on an entire inode allocation group, then check that we got
303 * exactly the inodes we expected. If not, load them one at a time (or fake
304 * it) into the bulkstat data.
307 bulkstat_for_inumbers(
308 struct scrub_ctx
*ctx
,
309 const struct xfs_inumbers
*inumbers
,
310 struct xfs_bulkstat_req
*breq
)
312 const uint64_t limit_ino
=
313 inumbers
->xi_startino
+ LIBFROG_BULKSTAT_CHUNKSIZE
;
314 uint64_t seen_mask
= 0;
318 assert(inumbers
->xi_allocmask
!= 0);
320 /* First we try regular bulkstat, for speed. */
321 breq
->hdr
.ino
= inumbers
->xi_startino
;
322 error
= -xfrog_bulkstat(&ctx
->mnt
, breq
);
324 if (!breq
->hdr
.ocount
)
326 seen_mask
|= seen_mask_from_bulkstat(inumbers
,
327 inumbers
->xi_startino
, breq
);
331 * If the last allocated inode as reported by inumbers is higher than
332 * the last inode reported by bulkstat, two things could have happened.
333 * Either all the inodes at the high end of the cluster were freed
334 * since the inumbers call; or bulkstat encountered a corrupt inode and
335 * returned early. Try to bulkstat the rest of the array.
337 if (last_allocmask_ino(inumbers
) > last_bstat_ino(breq
))
338 seen_mask
|= bulkstat_the_rest(ctx
, inumbers
, breq
, error
);
341 * Bulkstat might return inodes beyond xi_startino + CHUNKSIZE. Reduce
342 * ocount to ignore inodes not described by the inumbers record.
344 for (i
= breq
->hdr
.ocount
- 1; i
>= 0; i
--) {
345 if (breq
->bulkstat
[i
].bs_ino
< limit_ino
)
351 * Fill in any missing inodes that are mentioned in the alloc mask but
352 * weren't previously seen by bulkstat. These are the corrupt inodes.
354 bulkstat_single_step(ctx
, inumbers
, seen_mask
, breq
);
357 /* BULKSTAT wrapper routines. */
359 struct workqueue wq_bulkstat
;
360 scrub_inode_iter_fn fn
;
362 unsigned int nr_threads
;
367 * A single unit of inode scan work. This contains a pointer to the parent
368 * information, followed by an INUMBERS request structure, followed by a
369 * BULKSTAT request structure. The last two are VLAs, so we can't represent
373 struct scan_inodes
*si
;
376 static inline struct xfs_inumbers_req
*
378 struct scan_ichunk
*ichunk
)
380 char *p
= (char *)ichunk
;
382 return (struct xfs_inumbers_req
*)(p
+ sizeof(struct scan_ichunk
));
385 static inline struct xfs_bulkstat_req
*
387 struct scan_ichunk
*ichunk
)
389 char *p
= (char *)ichunk_to_inumbers(ichunk
);
391 return (struct xfs_bulkstat_req
*)(p
+ XFS_INUMBERS_REQ_SIZE(1));
396 struct scrub_ctx
*ctx
,
397 struct scan_inodes
*si
,
400 struct scan_ichunk
**ichunkp
)
402 struct scan_ichunk
*ichunk
;
403 struct xfs_inumbers_req
*ireq
;
404 struct xfs_bulkstat_req
*breq
;
406 ichunk
= calloc(1, sizeof(struct scan_ichunk
) +
407 XFS_INUMBERS_REQ_SIZE(1) +
408 XFS_BULKSTAT_REQ_SIZE(LIBFROG_BULKSTAT_CHUNKSIZE
));
414 ireq
= ichunk_to_inumbers(ichunk
);
415 ireq
->hdr
.icount
= 1;
416 ireq
->hdr
.ino
= startino
;
417 ireq
->hdr
.agno
= agno
;
418 ireq
->hdr
.flags
|= XFS_BULK_IREQ_AGNO
;
420 breq
= ichunk_to_bulkstat(ichunk
);
421 breq
->hdr
.icount
= LIBFROG_BULKSTAT_CHUNKSIZE
;
423 /* Scan the metadata directory tree too. */
424 if (ctx
->mnt
.fsgeom
.flags
& XFS_FSOP_GEOM_FLAGS_METADIR
)
425 breq
->hdr
.flags
|= XFS_BULK_IREQ_METADIR
;
432 render_ino_from_bulkstat(
433 struct scrub_ctx
*ctx
,
438 struct xfs_bulkstat
*bstat
= data
;
440 return scrub_render_ino_descr(ctx
, buf
, buflen
, bstat
->bs_ino
,
441 bstat
->bs_gen
, NULL
);
445 render_inumbers_from_agno(
446 struct scrub_ctx
*ctx
,
451 xfs_agnumber_t
*agno
= data
;
453 return snprintf(buf
, buflen
, _("dev %d:%d AG %u inodes"),
454 major(ctx
->fsinfo
.fs_datadev
),
455 minor(ctx
->fsinfo
.fs_datadev
),
460 * Call BULKSTAT for information on a single chunk's worth of inodes and call
461 * our iterator function. We'll try to fill the bulkstat information in
462 * batches, but we also can detect iget failures.
466 struct workqueue
*wq
,
470 struct xfs_handle handle
;
471 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
472 struct scan_ichunk
*ichunk
= arg
;
473 struct xfs_inumbers_req
*ireq
= ichunk_to_inumbers(ichunk
);
474 struct xfs_bulkstat_req
*breq
= ichunk_to_bulkstat(ichunk
);
475 struct scan_inodes
*si
= ichunk
->si
;
476 struct xfs_bulkstat
*bs
= &breq
->bulkstat
[0];
477 struct xfs_inumbers
*inumbers
= &ireq
->inumbers
[0];
478 uint64_t last_ino
= 0;
482 DEFINE_DESCR(dsc_bulkstat
, ctx
, render_ino_from_bulkstat
);
483 DEFINE_DESCR(dsc_inumbers
, ctx
, render_inumbers_from_agno
);
485 descr_set(&dsc_inumbers
, &agno
);
486 handle_from_fshandle(&handle
, ctx
->fshandle
, ctx
->fshandle_len
);
488 bulkstat_for_inumbers(ctx
, inumbers
, breq
);
490 /* Iterate all the inodes. */
491 for (i
= 0; !si
->aborted
&& i
< breq
->hdr
.ocount
; i
++, bs
++) {
492 uint64_t scan_ino
= bs
->bs_ino
;
494 /* ensure forward progress if we retried */
495 if (scan_ino
< last_ino
)
498 descr_set(&dsc_bulkstat
, bs
);
499 handle_from_bulkstat(&handle
, bs
);
500 error
= si
->fn(ctx
, &handle
, bs
, si
->arg
);
506 if (stale_count
< 30) {
507 uint64_t old_startino
;
509 ireq
->hdr
.ino
= old_startino
=
510 inumbers
->xi_startino
;
511 error
= -xfrog_inumbers(&ctx
->mnt
, ireq
);
515 * Retry only if inumbers returns the same
516 * inobt record as the previous record and
517 * there are allocated inodes in it.
520 ireq
->hdr
.ocount
> 0 &&
521 inumbers
->xi_alloccount
> 0 &&
522 inumbers
->xi_startino
== old_startino
)
526 str_info(ctx
, descr_render(&dsc_bulkstat
),
527 _("Changed too many times during scan; giving up."));
537 if (scrub_excessive_errors(ctx
)) {
546 str_liberror(ctx
, error
, descr_render(&dsc_bulkstat
));
554 * Call INUMBERS for information about inode chunks, then queue the inumbers
555 * responses in the bulkstat workqueue. This helps us maximize CPU parallelism
556 * if the filesystem AGs are not evenly loaded.
560 struct workqueue
*wq
,
564 struct scan_ichunk
*ichunk
= NULL
;
565 struct scan_inodes
*si
= arg
;
566 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
567 struct xfs_inumbers_req
*ireq
;
568 uint64_t nextino
= cvt_agino_to_ino(&ctx
->mnt
, agno
, 0);
570 DEFINE_DESCR(dsc
, ctx
, render_inumbers_from_agno
);
572 descr_set(&dsc
, &agno
);
574 error
= alloc_ichunk(ctx
, si
, agno
, 0, &ichunk
);
577 ireq
= ichunk_to_inumbers(ichunk
);
579 /* Find the inode chunk & alloc mask */
580 error
= -xfrog_inumbers(&ctx
->mnt
, ireq
);
581 while (!error
&& !si
->aborted
&& ireq
->hdr
.ocount
> 0) {
583 * Make sure that we always make forward progress while we
584 * scan the inode btree.
586 if (nextino
> ireq
->inumbers
[0].xi_startino
) {
587 str_corrupt(ctx
, descr_render(&dsc
),
588 _("AG %u inode btree is corrupt near agino %lu, got %lu"), agno
,
589 cvt_ino_to_agino(&ctx
->mnt
, nextino
),
590 cvt_ino_to_agino(&ctx
->mnt
,
591 ireq
->inumbers
[0].xi_startino
));
595 nextino
= ireq
->hdr
.ino
;
597 if (ireq
->inumbers
[0].xi_alloccount
== 0) {
599 * We can have totally empty inode chunks on
600 * filesystems where there are more than 64 inodes per
604 } else if (si
->nr_threads
> 0) {
605 /* Queue this inode chunk on the bulkstat workqueue. */
606 error
= -workqueue_add(&si
->wq_bulkstat
,
607 scan_ag_bulkstat
, agno
, ichunk
);
610 str_liberror(ctx
, error
,
611 _("queueing bulkstat work"));
617 * Only one thread, call bulkstat directly. Remember,
618 * ichunk is freed by the worker before returning.
620 scan_ag_bulkstat(wq
, agno
, ichunk
);
627 error
= alloc_ichunk(ctx
, si
, agno
, nextino
, &ichunk
);
631 ireq
= ichunk_to_inumbers(ichunk
);
633 error
= -xfrog_inumbers(&ctx
->mnt
, ireq
);
638 str_liberror(ctx
, error
, descr_render(&dsc
));
647 * Scan all the inodes in a filesystem, including metadata directory files and
648 * broken files. On error, this function will log an error message and return
652 scrub_scan_all_inodes(
653 struct scrub_ctx
*ctx
,
654 scrub_inode_iter_fn fn
,
657 struct scan_inodes si
= {
660 .nr_threads
= scrub_nproc_workqueue(ctx
),
663 struct workqueue wq_inumbers
;
664 unsigned int max_bulkstat
;
668 * The bulkstat workqueue should queue at most one inobt block's worth
669 * of inode chunk records per worker thread. If we're running in
670 * single thread mode (nr_threads==0) then we skip the workqueues.
672 max_bulkstat
= si
.nr_threads
* (ctx
->mnt
.fsgeom
.blocksize
/ 16);
674 ret
= -workqueue_create_bound(&si
.wq_bulkstat
, (struct xfs_mount
*)ctx
,
675 si
.nr_threads
, max_bulkstat
);
677 str_liberror(ctx
, ret
, _("creating bulkstat workqueue"));
681 ret
= -workqueue_create(&wq_inumbers
, (struct xfs_mount
*)ctx
,
684 str_liberror(ctx
, ret
, _("creating inumbers workqueue"));
689 for (agno
= 0; agno
< ctx
->mnt
.fsgeom
.agcount
; agno
++) {
690 ret
= -workqueue_add(&wq_inumbers
, scan_ag_inumbers
, agno
, &si
);
693 str_liberror(ctx
, ret
, _("queueing inumbers work"));
698 ret
= -workqueue_terminate(&wq_inumbers
);
701 str_liberror(ctx
, ret
, _("finishing inumbers work"));
703 workqueue_destroy(&wq_inumbers
);
706 ret
= -workqueue_terminate(&si
.wq_bulkstat
);
709 str_liberror(ctx
, ret
, _("finishing bulkstat work"));
711 workqueue_destroy(&si
.wq_bulkstat
);
713 return si
.aborted
? -1 : 0;
716 struct user_bulkstat
{
717 struct scan_inodes
*si
;
719 /* vla, must be last */
720 struct xfs_bulkstat_req breq
;
723 /* Iterate all the user files returned by a bulkstat. */
726 struct workqueue
*wq
,
730 struct xfs_handle handle
;
731 struct scrub_ctx
*ctx
= (struct scrub_ctx
*)wq
->wq_ctx
;
732 struct user_bulkstat
*ureq
= arg
;
733 struct xfs_bulkstat
*bs
= &ureq
->breq
.bulkstat
[0];
734 struct scan_inodes
*si
= ureq
->si
;
737 DEFINE_DESCR(dsc_bulkstat
, ctx
, render_ino_from_bulkstat
);
739 handle_from_fshandle(&handle
, ctx
->fshandle
, ctx
->fshandle_len
);
741 for (i
= 0; !si
->aborted
&& i
< ureq
->breq
.hdr
.ocount
; i
++, bs
++) {
742 descr_set(&dsc_bulkstat
, bs
);
743 handle_from_bulkstat(&handle
, bs
);
744 error
= si
->fn(ctx
, &handle
, bs
, si
->arg
);
755 if (scrub_excessive_errors(ctx
)) {
763 str_liberror(ctx
, error
, descr_render(&dsc_bulkstat
));
771 * Run one step of the user files bulkstat scan and schedule background
772 * processing of the stat data returned. Returns 1 to keep going, or 0 to
777 struct scrub_ctx
*ctx
,
778 struct scan_inodes
*si
,
781 struct user_bulkstat
*ureq
;
782 const char *what
= NULL
;
785 ureq
= calloc(1, sizeof(struct user_bulkstat
) +
786 XFS_BULKSTAT_REQ_SIZE(LIBFROG_BULKSTAT_CHUNKSIZE
));
789 what
= _("creating bulkstat work item");
793 ureq
->breq
.hdr
.icount
= LIBFROG_BULKSTAT_CHUNKSIZE
;
794 ureq
->breq
.hdr
.ino
= *cursor
;
796 ret
= -xfrog_bulkstat(&ctx
->mnt
, &ureq
->breq
);
798 what
= _("user files bulkstat");
801 if (ureq
->breq
.hdr
.ocount
== 0) {
807 *cursor
= ureq
->breq
.hdr
.ino
;
809 /* scan_user_files frees ureq; do not access it */
810 ret
= -workqueue_add(&si
->wq_bulkstat
, scan_user_files
, 0, ureq
);
812 what
= _("queueing bulkstat work");
823 str_liberror(ctx
, ret
, what
);
828 * Scan all the user files in a filesystem in inumber order. On error, this
829 * function will log an error message and return -1.
832 scrub_scan_user_files(
833 struct scrub_ctx
*ctx
,
834 scrub_inode_iter_fn fn
,
837 struct scan_inodes si
= {
840 .nr_threads
= scrub_nproc_workqueue(ctx
),
845 /* Queue up to four bulkstat result sets per thread. */
846 ret
= -workqueue_create_bound(&si
.wq_bulkstat
, (struct xfs_mount
*)ctx
,
847 si
.nr_threads
, si
.nr_threads
* 4);
849 str_liberror(ctx
, ret
, _("creating bulkstat workqueue"));
853 while ((ret
= scan_user_bulkstat(ctx
, &si
, &ino
)) == 1) {
857 ret
= -workqueue_terminate(&si
.wq_bulkstat
);
860 str_liberror(ctx
, ret
, _("finishing bulkstat work"));
862 workqueue_destroy(&si
.wq_bulkstat
);
864 return si
.aborted
? -1 : 0;
867 /* Open a file by handle, returning either the fd or -1 on error. */
870 struct xfs_handle
*handle
)
872 return open_by_fshandle(handle
, sizeof(*handle
),
873 O_RDONLY
| O_NOATIME
| O_NOFOLLOW
| O_NOCTTY
);