tmp_init(mntdir);
- breq = xfrog_bulkstat_alloc_req(GRABSZ, startino);
- if (!breq) {
- fsrprintf(_("Skipping %s: not enough memory\n"),
- mntdir);
+ ret = -xfrog_bulkstat_alloc_req(GRABSZ, startino, &breq);
+ if (ret) {
+ fsrprintf(_("Skipping %s: %s\n"), mntdir, strerror(ret));
xfd_close(&fsxfd);
free(fshandlep);
return -1;
}
- while ((ret = xfrog_bulkstat(&fsxfd, breq) == 0)) {
+ while ((ret = -xfrog_bulkstat(&fsxfd, breq) == 0)) {
struct xfs_bstat bs1;
struct xfs_bulkstat *buf = breq->bulkstat;
struct xfs_bulkstat *p;
(p->bs_extents < 2))
continue;
- ret = xfrog_bulkstat_v5_to_v1(&fsxfd, &bs1, p);
+ ret = -xfrog_bulkstat_v5_to_v1(&fsxfd, &bs1, p);
if (ret) {
fsrprintf(_("bstat conversion error: %s\n"),
strerror(ret));
goto out;
}
- error = xfrog_bulkstat_single(&fsxfd, ino, 0, &bulkstat);
+ error = -xfrog_bulkstat_single(&fsxfd, ino, 0, &bulkstat);
if (error) {
fsrprintf(_("unable to get bstat on %s: %s\n"),
fname, strerror(error));
goto out;
}
- error = xfrog_bulkstat_v5_to_v1(&fsxfd, &statbuf, &bulkstat);
+ error = -xfrog_bulkstat_v5_to_v1(&fsxfd, &statbuf, &bulkstat);
if (error) {
fsrprintf(_("bstat conversion error on %s: %s\n"),
fname, strerror(error));
* this to compare against the target and determine what we
* need to do.
*/
- ret = xfrog_bulkstat_single(&txfd, tstatbuf.st_ino, 0, &tbstat);
+ ret = -xfrog_bulkstat_single(&txfd, tstatbuf.st_ino, 0,
+ &tbstat);
if (ret) {
fsrprintf(_("unable to get bstat on temp file: %s\n"),
strerror(ret));
return 0;
}
- breq = xfrog_bulkstat_alloc_req(batch_size, startino);
- if (!breq) {
- perror("alloc bulkreq");
+ ret = -xfrog_bulkstat_alloc_req(batch_size, startino, &breq);
+ if (ret) {
+ xfrog_perror(ret, "alloc bulkreq");
exitcode = 1;
return 0;
}
set_xfd_flags(&xfd, ver);
- while ((ret = xfrog_bulkstat(&xfd, breq)) == 0) {
+ while ((ret = -xfrog_bulkstat(&xfd, breq)) == 0) {
if (debug)
printf(
_("bulkstat: startino=%lld flags=0x%x agno=%u ret=%d icount=%u ocount=%u\n"),
}
}
- ret = xfrog_bulkstat_single(&xfd, ino, flags, &bulkstat);
+ ret = -xfrog_bulkstat_single(&xfd, ino, flags, &bulkstat);
if (ret) {
xfrog_perror(ret, "xfrog_bulkstat_single");
continue;
return 0;
}
- ireq = xfrog_inumbers_alloc_req(batch_size, startino);
- if (!ireq) {
- perror("alloc inumbersreq");
+ ret = -xfrog_inumbers_alloc_req(batch_size, startino, &ireq);
+ if (ret) {
+ xfrog_perror(ret, "alloc inumbersreq");
exitcode = 1;
return 0;
}
set_xfd_flags(&xfd, ver);
- while ((ret = xfrog_inumbers(&xfd, ireq)) == 0) {
+ while ((ret = -xfrog_inumbers(&xfd, ireq)) == 0) {
if (debug)
printf(
_("bulkstat: startino=%"PRIu64" flags=0x%"PRIx32" agno=%"PRIu32" ret=%d icount=%"PRIu32" ocount=%"PRIu32"\n"),
else
nent = atoi(argv[1]);
- ireq = xfrog_inumbers_alloc_req(nent, 0);
- if (!ireq) {
- perror("alloc req");
+ error = -xfrog_inumbers_alloc_req(nent, 0, &ireq);
+ if (error) {
+ xfrog_perror(error, "alloc req");
return 0;
}
- while ((error = xfrog_inumbers(&xfd, ireq)) == 0 &&
+ while ((error = -xfrog_inumbers(&xfd, ireq)) == 0 &&
ireq->hdr.ocount > 0) {
for (i = 0; i < ireq->hdr.ocount; i++) {
printf(_("ino %10"PRIu64" count %2d mask %016"PRIx64"\n"),
struct xfs_inumbers_req *ireq;
uint32_t lastgrp = 0;
__u64 last_ino = 0;
+ int ret;
- ireq = xfrog_inumbers_alloc_req(IGROUP_NR, 0);
- if (!ireq) {
- perror("alloc req");
+ ret = -xfrog_inumbers_alloc_req(IGROUP_NR, 0, &ireq);
+ if (ret) {
+ xfrog_perror(ret, "alloc req");
return 0;
}
for (;;) {
- int ret;
-
- ret = xfrog_inumbers(&xfd, ireq);
+ ret = -xfrog_inumbers(&xfd, ireq);
if (ret) {
xfrog_perror(ret, "XFS_IOC_FSINUMBERS");
goto out;
* The -n option means that the caller wants to know the number
* of the next allocated inode, so we need to increment here.
*/
- breq = xfrog_bulkstat_alloc_req(1, userino + 1);
- if (!breq) {
- perror("alloc bulkstat");
+ ret = -xfrog_bulkstat_alloc_req(1, userino + 1, &breq);
+ if (ret) {
+ xfrog_perror(ret, "alloc bulkstat");
exitcode = 1;
return 0;
}
/* get next inode */
- ret = xfrog_bulkstat(&xfd, breq);
+ ret = -xfrog_bulkstat(&xfd, breq);
if (ret) {
xfrog_perror(ret, "bulkstat");
free(breq);
struct xfs_fd xfd = XFS_FD_INIT(file->fd);
/* get this inode */
- ret = xfrog_bulkstat_single(&xfd, userino, 0, &bulkstat);
+ ret = -xfrog_bulkstat_single(&xfd, userino, 0, &bulkstat);
if (ret == EINVAL) {
/* Not in use */
result_ino = 0;
goto out;
}
- error = xfrog_bulkstat_single(&fxfd, stat.st_ino, 0, &bulkstat);
+ error = -xfrog_bulkstat_single(&fxfd, stat.st_ino, 0, &bulkstat);
if (error) {
xfrog_perror(error, "bulkstat");
goto out;
}
- error = xfrog_bulkstat_v5_to_v1(&fxfd, &sx.sx_stat, &bulkstat);
+ error = -xfrog_bulkstat_v5_to_v1(&fxfd, &sx.sx_stat, &bulkstat);
if (error) {
xfrog_perror(error, "bulkstat conversion");
goto out;
if (xfd->fsgeom.blocksize > 0)
return 0;
- return -xfd_prepare_geometry(xfd);
+ return xfd_prepare_geometry(xfd);
}
/* Bulkstat a single inode using v5 ioctl. */
int ret;
if (flags & ~(XFS_BULK_IREQ_SPECIAL))
- return EINVAL;
+ return -EINVAL;
- req = xfrog_bulkstat_alloc_req(1, ino);
- if (!req)
- return ENOMEM;
+ ret = xfrog_bulkstat_alloc_req(1, ino, &req);
+ if (ret)
+ return ret;
req->hdr.flags = flags;
ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
if (ret) {
- ret = errno;
+ ret = -errno;
goto free;
}
if (req->hdr.ocount == 0) {
- ret = ENOENT;
+ ret = -ENOENT;
goto free;
}
int error;
if (flags)
- return EINVAL;
+ return -EINVAL;
error = xfrog_bulkstat_prep_v1_emulation(xfd);
if (error)
bulkreq.ubuffer = &bstat;
error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT_SINGLE, &bulkreq);
if (error)
- return errno;
+ return -errno;
xfrog_bulkstat_v1_to_v5(xfd, bulkstat, &bstat);
return 0;
}
-/* Bulkstat a single inode. Returns zero or a positive error code. */
+/* Bulkstat a single inode. Returns zero or a negative error code. */
int
xfrog_bulkstat_single(
struct xfs_fd *xfd,
/* If the v5 ioctl wasn't found, we punt to v1. */
switch (error) {
- case EOPNOTSUPP:
- case ENOTTY:
+ case -EOPNOTSUPP:
+ case -ENOTTY:
xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
break;
}
* kernels.
*
* Returns 0 if the emulation should proceed; ECANCELED if there are no
- * records; or a positive error code.
+ * records; or a negative error code.
*/
static int
xfrog_bulk_req_v1_setup(
if (hdr->ino == 0)
hdr->ino = cvt_agino_to_ino(xfd, hdr->agno, 0);
else if (agno < hdr->agno)
- return EINVAL;
+ return -EINVAL;
else if (agno > hdr->agno)
goto no_results;
}
buf = malloc(hdr->icount * rec_size);
if (!buf)
- return errno;
+ return -errno;
if (hdr->ino)
hdr->ino--;
no_results:
hdr->ocount = 0;
- return ECANCELED;
+ return -ECANCELED;
}
/*
void *v5_rec = v5_records;
unsigned int i;
- if (error == ECANCELED) {
+ if (error == -ECANCELED) {
error = 0;
goto free;
}
ret = ioctl(xfd->fd, XFS_IOC_BULKSTAT, req);
if (ret)
- return errno;
+ return -errno;
return 0;
}
error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
sizeof(struct xfs_bstat));
- if (error == ECANCELED)
+ if (error == -ECANCELED)
goto out_teardown;
if (error)
return error;
error = ioctl(xfd->fd, XFS_IOC_FSBULKSTAT, &bulkreq);
if (error)
- error = errno;
+ error = -errno;
out_teardown:
return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
/* If the v5 ioctl wasn't found, we punt to v1. */
switch (error) {
- case EOPNOTSUPP:
- case ENOTTY:
+ case -EOPNOTSUPP:
+ case -ENOTTY:
xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
break;
}
time_too_big(bs5->bs_atime) ||
time_too_big(bs5->bs_ctime) ||
time_too_big(bs5->bs_mtime))
- return ERANGE;
+ return -ERANGE;
bs1->bs_ino = bs5->bs_ino;
bs1->bs_mode = bs5->bs_mode;
bs5->bs_aextents = bs1->bs_aextents;
}
-/* Allocate a bulkstat request. On error returns NULL and sets errno. */
-struct xfs_bulkstat_req *
+/* Allocate a bulkstat request. Returns zero or a negative error code. */
+int
xfrog_bulkstat_alloc_req(
uint32_t nr,
- uint64_t startino)
+ uint64_t startino,
+ struct xfs_bulkstat_req **preq)
{
struct xfs_bulkstat_req *breq;
breq = calloc(1, XFS_BULKSTAT_REQ_SIZE(nr));
if (!breq)
- return NULL;
+ return -errno;
breq->hdr.icount = nr;
breq->hdr.ino = startino;
- return breq;
+ *preq = breq;
+ return 0;
}
/* Set a bulkstat cursor to iterate only a particular AG. */
ret = ioctl(xfd->fd, XFS_IOC_INUMBERS, req);
if (ret)
- return errno;
+ return -errno;
return 0;
}
error = xfrog_bulk_req_v1_setup(xfd, &req->hdr, &bulkreq,
sizeof(struct xfs_inogrp));
- if (error == ECANCELED)
+ if (error == -ECANCELED)
goto out_teardown;
if (error)
return error;
error = ioctl(xfd->fd, XFS_IOC_FSINUMBERS, &bulkreq);
if (error)
- error = errno;
+ error = -errno;
out_teardown:
return xfrog_bulk_req_v1_cleanup(xfd, &req->hdr, &bulkreq,
}
/*
- * Query inode allocation bitmask information. Returns zero or a positive
+ * Query inode allocation bitmask information. Returns zero or a negative
* error code.
*/
int
/* If the v5 ioctl wasn't found, we punt to v1. */
switch (error) {
- case EOPNOTSUPP:
- case ENOTTY:
+ case -EOPNOTSUPP:
+ case -ENOTTY:
xfd->flags |= XFROG_FLAG_BULKSTAT_FORCE_V1;
break;
}
return xfrog_inumbers1(xfd, req);
}
-/* Allocate a inumbers request. On error returns NULL and sets errno. */
-struct xfs_inumbers_req *
+/* Allocate a inumbers request. Returns zero or a negative error code. */
+int
xfrog_inumbers_alloc_req(
uint32_t nr,
- uint64_t startino)
+ uint64_t startino,
+ struct xfs_inumbers_req **preq)
{
struct xfs_inumbers_req *ireq;
ireq = calloc(1, XFS_INUMBERS_REQ_SIZE(nr));
if (!ireq)
- return NULL;
+ return -errno;
ireq->hdr.icount = nr;
ireq->hdr.ino = startino;
- return ireq;
+ *preq = ireq;
+ return 0;
}
/* Set an inumbers cursor to iterate only a particular AG. */
struct xfs_bulkstat *bulkstat);
int xfrog_bulkstat(struct xfs_fd *xfd, struct xfs_bulkstat_req *req);
-struct xfs_bulkstat_req *xfrog_bulkstat_alloc_req(uint32_t nr,
- uint64_t startino);
+int xfrog_bulkstat_alloc_req(uint32_t nr, uint64_t startino,
+ struct xfs_bulkstat_req **preq);
int xfrog_bulkstat_v5_to_v1(struct xfs_fd *xfd, struct xfs_bstat *bs1,
const struct xfs_bulkstat *bstat);
void xfrog_bulkstat_v1_to_v5(struct xfs_fd *xfd, struct xfs_bulkstat *bstat,
struct xfs_inogrp;
int xfrog_inumbers(struct xfs_fd *xfd, struct xfs_inumbers_req *req);
-struct xfs_inumbers_req *xfrog_inumbers_alloc_req(uint32_t nr,
- uint64_t startino);
+int xfrog_inumbers_alloc_req(uint32_t nr, uint64_t startino,
+ struct xfs_inumbers_req **preq);
void xfrog_inumbers_set_ag(struct xfs_inumbers_req *req, uint32_t agno);
void xfrog_inumbers_v5_to_v1(struct xfs_inogrp *ig1,
const struct xfs_inumbers *ig);
return;
}
- breq = xfrog_bulkstat_alloc_req(NBSTAT, 0);
- if (!breq) {
- perror("calloc");
+ ret = -xfrog_bulkstat_alloc_req(NBSTAT, 0, &breq);
+ if (ret) {
+ xfrog_perror(ret, "calloc");
xfd_close(&fsxfd);
return;
}
- while ((sts = xfrog_bulkstat(&fsxfd, breq)) == 0) {
+ while ((sts = -xfrog_bulkstat(&fsxfd, breq)) == 0) {
if (breq->hdr.ocount == 0)
break;
for (i = 0; i < breq->hdr.ocount; i++)
unsigned int i;
int error;
- ireq = xfrog_inumbers_alloc_req(64, 0);
- if (!ireq) {
- ci->error = errno;
+ error = -xfrog_inumbers_alloc_req(64, 0, &ireq);
+ if (error) {
+ ci->error = error;
return;
}
xfrog_inumbers_set_ag(ireq, agno);
- while (!ci->error && (error = xfrog_inumbers(&ctx->mnt, ireq)) == 0) {
+ while (!ci->error && (error = -xfrog_inumbers(&ctx->mnt, ireq)) == 0) {
if (ireq->hdr.ocount == 0)
break;
for (i = 0; i < ireq->hdr.ocount; i++)
/* First we try regular bulkstat, for speed. */
breq->hdr.ino = inumbers->xi_startino;
breq->hdr.icount = inumbers->xi_alloccount;
- error = xfrog_bulkstat(&ctx->mnt, breq);
+ error = -xfrog_bulkstat(&ctx->mnt, breq);
if (error) {
char errbuf[DESCR_BUFSZ];
}
/* Load the one inode. */
- error = xfrog_bulkstat_single(&ctx->mnt,
+ error = -xfrog_bulkstat_single(&ctx->mnt,
inumbers->xi_startino + i, 0, bs);
if (error || bs->bs_ino != inumbers->xi_startino + i) {
memset(bs, 0, sizeof(struct xfs_bulkstat));
sizeof(handle.ha_fid.fid_len);
handle.ha_fid.fid_pad = 0;
- breq = xfrog_bulkstat_alloc_req(XFS_INODES_PER_CHUNK, 0);
- if (!breq) {
- str_errno(ctx, descr);
+ error = -xfrog_bulkstat_alloc_req(XFS_INODES_PER_CHUNK, 0, &breq);
+ if (error) {
+ str_liberror(ctx, error, descr);
si->aborted = true;
return;
}
- ireq = xfrog_inumbers_alloc_req(1, 0);
- if (!ireq) {
- str_errno(ctx, descr);
+ error = -xfrog_inumbers_alloc_req(1, 0, &ireq);
+ if (error) {
+ str_liberror(ctx, error, descr);
free(breq);
si->aborted = true;
return;
xfrog_inumbers_set_ag(ireq, agno);
/* Find the inode chunk & alloc mask */
- error = xfrog_inumbers(&ctx->mnt, ireq);
+ error = -xfrog_inumbers(&ctx->mnt, ireq);
while (!error && !si->aborted && ireq->hdr.ocount > 0) {
/*
* We can have totally empty inode chunks on filesystems where
stale_count = 0;
igrp_retry:
- error = xfrog_inumbers(&ctx->mnt, ireq);
+ error = -xfrog_inumbers(&ctx->mnt, ireq);
}
err:
descr = d;
}
- ret = xfrog_bulkstat_single(&file->xfd, ino, 0, &bs);
+ ret = -xfrog_bulkstat_single(&file->xfd, ino, 0, &bs);
if (ret) {
xfrog_perror(ret, descr);
return 1;
uint32_t i;
int error;
- breq = xfrog_bulkstat_alloc_req(BULKSTAT_NR, 0);
- if (!breq) {
- perror("bulk alloc req");
+ error = -xfrog_bulkstat_alloc_req(BULKSTAT_NR, 0, &breq);
+ if (error) {
+ xfrog_perror(error, "bulk alloc req");
exitcode = 1;
return 1;
}
xfrog_bulkstat_set_ag(breq, agno);
do {
- error = xfrog_bulkstat(&file->xfd, breq);
+ error = -xfrog_bulkstat(&file->xfd, breq);
if (error)
break;
for (i = 0; i < breq->hdr.ocount; i++) {