+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libxfs.h"
DBM_FREE1, DBM_FREE2, DBM_FREELIST, DBM_INODE,
DBM_LOG, DBM_MISSING, DBM_QUOTA, DBM_RTBITMAP,
DBM_RTDATA, DBM_RTFREE, DBM_RTSUM, DBM_SB,
- DBM_SYMLINK,
+ DBM_SYMLINK, DBM_BTFINO, DBM_BTRMAP, DBM_BTREFC,
+ DBM_RLDATA, DBM_COWDATA,
DBM_NDBM
} dbm_t;
struct inodata *next;
nlink_t link_set;
nlink_t link_add;
- char isdir;
+ bool isdir;
+ bool isreflink;
char security;
char ilist;
xfs_ino_t ino;
static xfs_extlen_t agffreeblks;
static xfs_extlen_t agflongest;
-static __uint64_t agf_aggr_freeblks; /* aggregate count over all */
-static __uint32_t agfbtreeblks;
+static uint64_t agf_aggr_freeblks; /* aggregate count over all */
+static uint32_t agfbtreeblks;
static int lazycount;
static xfs_agino_t agicount;
static xfs_agino_t agifreecount;
static char **dbmap; /* really dbm_t:8 */
static dirhash_t **dirhash;
static int error;
-static __uint64_t fdblocks;
-static __uint64_t frextents;
-static __uint64_t icount;
-static __uint64_t ifree;
+static uint64_t fdblocks;
+static uint64_t frextents;
+static uint64_t icount;
+static uint64_t ifree;
static inodata_t ***inodata;
static int inodata_hash_size;
static inodata_t ***inomap;
"rtsum",
"sb",
"symlink",
+ "btfino",
+ "btrmap",
+ "btrefcnt",
+ "rldata",
NULL
};
static int verbose;
static int blockuse_f(int argc, char **argv);
static int check_blist(xfs_fsblock_t bno);
static void check_dbmap(xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_extlen_t len, dbm_t type);
+ xfs_extlen_t len, dbm_t type,
+ int ignore_reflink);
static int check_inomap(xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_extlen_t len, xfs_ino_t c_ino);
static void check_linkcounts(xfs_agnumber_t agno);
static void scanfunc_ino(struct xfs_btree_block *block, int level,
xfs_agf_t *agf, xfs_agblock_t bno,
int isroot);
+static void scanfunc_fino(struct xfs_btree_block *block, int level,
+ struct xfs_agf *agf, xfs_agblock_t bno,
+ int isroot);
+static void scanfunc_rmap(struct xfs_btree_block *block, int level,
+ struct xfs_agf *agf, xfs_agblock_t bno,
+ int isroot);
+static void scanfunc_refcnt(struct xfs_btree_block *block, int level,
+ struct xfs_agf *agf, xfs_agblock_t bno,
+ int isroot);
static void set_dbmap(xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_extlen_t len, dbm_t type,
xfs_agnumber_t c_agno, xfs_agblock_t c_agbno);
return 0;
}
- /*
- * XXX: check does not support CRC enabled filesystems. Return
- * immediately, silently, with success but without doing anything here
- * initially so that xfstests can run without modification on metadata
- * enabled filesystems.
- *
- * XXX: ultimately we need to dump an error message here that xfstests
- * filters out, or we need to actually do the work to make check support
- * crc enabled filesystems.
- */
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return 0;
-
if (!init(argc, argv)) {
if (serious_error)
exitcode = 3;
static void
blocktrash_b(
- xfs_agnumber_t agno,
- xfs_agblock_t agbno,
+ int bit_offset,
dbm_t type,
ltab_t *ltabp,
int mode)
int len;
int mask;
int newbit;
- int offset;
+ const struct xfs_buf_ops *stashed_ops;
static char *modestr[] = {
N_("zeroed"), N_("set"), N_("flipped"), N_("randomized")
};
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ agno = XFS_FSB_TO_AGNO(mp, XFS_DADDR_TO_FSB(mp, iocur_top->bb));
+ agbno = XFS_FSB_TO_AGBNO(mp, XFS_DADDR_TO_FSB(mp, iocur_top->bb));
+ if (iocur_top->len == 0) {
+ dbprintf(_("zero-length block %u/%u buffer to trash??\n"),
+ agno, agbno);
+ return;
+ }
len = (int)((random() % (ltabp->max - ltabp->min + 1)) + ltabp->min);
- offset = (int)(random() % (int)(mp->m_sb.sb_blocksize * NBBY));
+ /*
+ * bit_offset >= 0: start fuzzing at this exact bit_offset.
+ * bit_offset < 0: pick an offset at least as high at -(bit_offset + 1).
+ */
+ if (bit_offset < 0) {
+ bit_offset = -(bit_offset + 1);
+ bit_offset += (int)(random() % (int)((iocur_top->len - bit_offset) * NBBY));
+ }
+ if (bit_offset + len >= iocur_top->len * NBBY)
+ len = (iocur_top->len * NBBY) - bit_offset;
newbit = 0;
- push_cur();
- set_cur(&typtab[DBM_UNKNOWN],
- XFS_AGB_TO_DADDR(mp, agno, agbno), blkbb, DB_RING_IGN, NULL);
+ stashed_ops = iocur_top->bp->b_ops;
+ iocur_top->bp->b_ops = NULL;
if ((buf = iocur_top->data) == NULL) {
dbprintf(_("can't read block %u/%u for trashing\n"), agno, agbno);
- pop_cur();
return;
}
for (bitno = 0; bitno < len; bitno++) {
- bit = (offset + bitno) % (mp->m_sb.sb_blocksize * NBBY);
+ bit = (bit_offset + bitno) % (mp->m_sb.sb_blocksize * NBBY);
byte = bit / NBBY;
bit %= NBBY;
mask = 1 << bit;
buf[byte] &= ~mask;
}
write_cur();
- pop_cur();
+ iocur_top->bp->b_ops = stashed_ops;
printf(_("blocktrash: %u/%u %s block %d bit%s starting %d:%d %s\n"),
agno, agbno, typename[type], len, len == 1 ? "" : "s",
- offset / NBBY, offset % NBBY, modestr[mode]);
+ bit_offset / NBBY, bit_offset % NBBY, modestr[mode]);
}
-int
+static int
blocktrash_f(
int argc,
char **argv)
uint seed;
int sopt;
int tmask;
+ bool this_block = false;
+ int bit_offset = -1;
- if (!dbmap) {
- dbprintf(_("must run blockget first\n"));
- return 0;
- }
optind = 0;
count = 1;
min = 1;
(1 << DBM_BTINO) |
(1 << DBM_DIR) |
(1 << DBM_INODE) |
+ (1 << DBM_LOG) |
(1 << DBM_QUOTA) |
(1 << DBM_RTBITMAP) |
(1 << DBM_RTSUM) |
+ (1 << DBM_SYMLINK) |
+ (1 << DBM_BTFINO) |
+ (1 << DBM_BTRMAP) |
+ (1 << DBM_BTREFC) |
(1 << DBM_SB);
- while ((c = getopt(argc, argv, "0123n:s:t:x:y:")) != EOF) {
+ while ((c = getopt(argc, argv, "0123n:o:s:t:x:y:z")) != EOF) {
switch (c) {
case '0':
mode = 0;
return 0;
}
break;
+ case 'o': {
+ int relative = 0;
+ if (optarg[0] == '+') {
+ optarg++;
+ relative = 1;
+ }
+ bit_offset = (int)strtol(optarg, &p, 0);
+ if (*p != '\0' || bit_offset < 0) {
+ dbprintf(_("bad blocktrash offset %s\n"), optarg);
+ return 0;
+ }
+ if (relative)
+ bit_offset = -bit_offset - 1;
+ break;
+ }
case 's':
seed = (uint)strtoul(optarg, &p, 0);
sopt = 1;
return 0;
}
break;
+ case 'z':
+ this_block = true;
+ break;
default:
dbprintf(_("bad option for blocktrash command\n"));
return 0;
}
}
+ if (!this_block && !dbmap) {
+ dbprintf(_("must run blockget first\n"));
+ return 0;
+ }
+ if (this_block && iocur_sp == 0) {
+ dbprintf(_("nothing on stack\n"));
+ return 0;
+ }
if (min > max) {
dbprintf(_("bad min/max for blocktrash command\n"));
return 0;
}
if (tmask == 0)
- tmask = goodmask;
+ tmask = goodmask & ~((1 << DBM_LOG) | (1 << DBM_SB));
lentab = xmalloc(sizeof(ltab_t));
lentab->min = lentab->max = min;
lentablen = 1;
} else
lentab[lentablen - 1].max = i;
}
+ if (!sopt)
+ dbprintf(_("blocktrash: seed %u\n"), seed);
+ srandom(seed);
+ if (this_block) {
+ blocktrash_b(bit_offset, DBM_UNKNOWN,
+ &lentab[random() % lentablen], mode);
+ goto out;
+ }
for (blocks = 0, agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
for (agbno = 0, p = dbmap[agno];
agbno < mp->m_sb.sb_agblocks;
dbprintf(_("blocktrash: no matching blocks\n"));
goto out;
}
- if (!sopt)
- dbprintf(_("blocktrash: seed %u\n"), seed);
- srandom(seed);
for (i = 0; i < count; i++) {
- randb = (xfs_rfsblock_t)((((__int64_t)random() << 32) |
+ randb = (xfs_rfsblock_t)((((int64_t)random() << 32) |
random()) % blocks);
for (bi = 0, agno = 0, done = 0;
!done && agno < mp->m_sb.sb_agcount;
continue;
if (bi++ < randb)
continue;
- blocktrash_b(agno, agbno, (dbm_t)*p,
+ push_cur();
+ set_cur(NULL,
+ XFS_AGB_TO_DADDR(mp, agno, agbno),
+ blkbb, DB_RING_IGN, NULL);
+ blocktrash_b(bit_offset, (dbm_t)*p,
&lentab[random() % lentablen], mode);
+ pop_cur();
done = 1;
break;
}
return 0;
}
-int
+static int
blockuse_f(
int argc,
char **argv)
xfs_agnumber_t agno,
xfs_agblock_t agbno,
xfs_extlen_t len,
- dbm_t type)
+ dbm_t type,
+ int ignore_reflink)
{
xfs_extlen_t i;
char *p;
+ dbm_t d;
for (i = 0, p = &dbmap[agno][agbno]; i < len; i++, p++) {
+ d = (dbm_t)*p;
+ if (ignore_reflink && (d == DBM_UNKNOWN || d == DBM_DATA ||
+ d == DBM_RLDATA))
+ continue;
if ((dbm_t)*p != type) {
- if (!sflag || CHECK_BLISTA(agno, agbno + i))
+ if (!sflag || CHECK_BLISTA(agno, agbno + i)) {
dbprintf(_("block %u/%u expected type %s got "
"%s\n"),
agno, agbno + i, typename[type],
typename[(dbm_t)*p]);
+ }
error++;
}
}
return 0;
}
for (i = 0, rval = 1, idp = &inomap[agno][agbno]; i < len; i++, idp++) {
- if (*idp) {
+ if (*idp && !(*idp)->isreflink) {
if (!sflag || (*idp)->ilist ||
CHECK_BLISTA(agno, agbno + i))
dbprintf(_("block %u/%u claimed by inode %lld, "
return 1;
}
+/*
+ * We don't check the accuracy of reference counts -- all we do is ensure
+ * that a data block never crosses with non-data blocks. repair can check
+ * those kinds of things.
+ *
+ * So with that in mind, if we're setting a block to be data or rldata,
+ * don't complain so long as the block is currently unknown, data, or rldata.
+ * Don't let blocks downgrade from rldata -> data.
+ */
+static bool
+is_reflink(
+ dbm_t type2)
+{
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return false;
+ if (type2 == DBM_DATA || type2 == DBM_RLDATA)
+ return true;
+ return false;
+}
+
static void
check_set_dbmap(
xfs_agnumber_t agno,
agbno, agbno + len - 1, c_agno, c_agbno);
return;
}
- check_dbmap(agno, agbno, len, type1);
+ check_dbmap(agno, agbno, len, type1, is_reflink(type2));
mayprint = verbose | blist_size;
for (i = 0, p = &dbmap[agno][agbno]; i < len; i++, p++) {
- *p = (char)type2;
+ if (*p == DBM_RLDATA && type2 == DBM_DATA)
+ ; /* do nothing */
+ else if (*p == DBM_DATA && type2 == DBM_DATA)
+ *p = (char)DBM_RLDATA;
+ else
+ *p = (char)type2;
if (mayprint && (verbose || CHECK_BLISTA(agno, agbno + i)))
dbprintf(_("setting block %u/%u to %s\n"), agno, agbno + i,
typename[type2]);
inomap = xmalloc((mp->m_sb.sb_agcount + rt) * sizeof(*inomap));
inodata = xmalloc(mp->m_sb.sb_agcount * sizeof(*inodata));
inodata_hash_size =
- (int)MAX(MIN(mp->m_sb.sb_icount /
+ (int)max(min(mp->m_sb.sb_icount /
(INODATA_AVG_HASH_LENGTH * mp->m_sb.sb_agcount),
MAX_INODATA_HASH_SIZE),
MIN_INODATA_HASH_SIZE);
int nex;
xfs_ino_t parent;
int v;
- int x;
+ int i;
nex = blkmap_getn(blkmap, 0, mp->m_dir_geo->fsbcount, &bmp);
v = id->ilist || verbose;
make_bbmap(&bbmap, nex, bmp);
set_cur(&typtab[TYP_DIR2], XFS_FSB_TO_DADDR(mp, bmp->startblock),
mp->m_dir_geo->fsbcount * blkbb, DB_RING_IGN, nex > 1 ? &bbmap : NULL);
- for (x = 0; !v && x < nex; x++) {
- for (b = bmp[x].startblock;
- !v && b < bmp[x].startblock + bmp[x].blockcount;
+ for (i = 0; !v && i < nex; i++) {
+ for (b = bmp[i].startblock;
+ !v && b < bmp[i].startblock + bmp[i].blockcount;
b++)
v = CHECK_BLIST(b);
}
return;
}
if (be16_to_cpu(dib->bb_numrecs) >
- xfs_bmdr_maxrecs(XFS_DFORK_SIZE(dip, mp, whichfork),
+ libxfs_bmdr_maxrecs(XFS_DFORK_SIZE(dip, mp, whichfork),
be16_to_cpu(dib->bb_level) == 0)) {
if (!sflag || id->ilist)
dbprintf(_("numrecs for ino %lld %s fork bmap root too "
}
if (be16_to_cpu(dib->bb_level) == 0) {
xfs_bmbt_rec_t *rp = XFS_BMDR_REC_ADDR(dib, 1);
- process_bmbt_reclist(rp, be16_to_cpu(dib->bb_numrecs), type,
+ process_bmbt_reclist(rp, be16_to_cpu(dib->bb_numrecs), type,
id, totd, blkmapp);
*nex += be16_to_cpu(dib->bb_numrecs);
return;
} else {
- pp = XFS_BMDR_PTR_ADDR(dib, 1, xfs_bmdr_maxrecs(
+ pp = XFS_BMDR_PTR_ADDR(dib, 1, libxfs_bmdr_maxrecs(
XFS_DFORK_SIZE(dip, mp, whichfork), 0));
for (i = 0; i < be16_to_cpu(dib->bb_numrecs); i++)
- scan_lbtree(be64_to_cpu(pp[i]),
- be16_to_cpu(dib->bb_level),
- scanfunc_bmap, type, id, totd, toti,
- nex, blkmapp, 1,
+ scan_lbtree(get_unaligned_be64(&pp[i]),
+ be16_to_cpu(dib->bb_level),
+ scanfunc_bmap, type, id, totd, toti,
+ nex, blkmapp, 1,
whichfork == XFS_DATA_FORK ?
TYP_BMAPBTD : TYP_BMAPBTA);
}
data = iocur_top->data;
block = iocur_top->data;
if (be32_to_cpu(block->magic) != XFS_DIR2_BLOCK_MAGIC &&
- be32_to_cpu(data->magic) != XFS_DIR2_DATA_MAGIC) {
+ be32_to_cpu(data->magic) != XFS_DIR2_DATA_MAGIC &&
+ be32_to_cpu(block->magic) != XFS_DIR3_BLOCK_MAGIC &&
+ be32_to_cpu(data->magic) != XFS_DIR3_DATA_MAGIC) {
if (!sflag || v)
dbprintf(_("bad directory data magic # %#x for dir ino "
"%lld block %d\n"),
db = xfs_dir2_da_to_db(mp->m_dir_geo, dabno);
bf = M_DIROPS(mp)->data_bestfree_p(data);
ptr = (char *)M_DIROPS(mp)->data_unused_p(data);
- if (be32_to_cpu(block->magic) == XFS_DIR2_BLOCK_MAGIC) {
+ if (be32_to_cpu(block->magic) == XFS_DIR2_BLOCK_MAGIC ||
+ be32_to_cpu(block->magic) == XFS_DIR3_BLOCK_MAGIC) {
btp = xfs_dir2_block_tail_p(mp->m_dir_geo, block);
lep = xfs_dir2_block_leaf_p(btp);
endptr = (char *)lep;
bf_err += (freeseen & (1 << i)) != 0;
freeseen |= 1 << i;
} else
- bf_err += be16_to_cpu(dup->length) >
+ bf_err += be16_to_cpu(dup->length) >
be16_to_cpu(bf[2].length);
ptr += be16_to_cpu(dup->length);
lastfree = 1;
(*dot)++;
}
}
- if (be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC) {
+ if (be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC ||
+ be32_to_cpu(data->magic) == XFS_DIR3_BLOCK_MAGIC) {
endptr = (char *)data + mp->m_dir_geo->blksize;
for (i = stale = 0; lep && i < be32_to_cpu(btp->count); i++) {
if ((char *)&lep[i] >= endptr) {
if (!sflag || v)
dbprintf(_("dir %lld block %d bad count "
- "%u\n"), id->ino, dabno,
+ "%u\n"), id->ino, dabno,
be32_to_cpu(btp->count));
error++;
break;
}
if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR)
stale++;
- else if (dir_hash_see(be32_to_cpu(lep[i].hashval),
+ else if (dir_hash_see(be32_to_cpu(lep[i].hashval),
be32_to_cpu(lep[i].address))) {
if (!sflag || v)
dbprintf(_("dir %lld block %d extra leaf "
- "entry %x %x\n"),
- id->ino, dabno,
+ "entry %x %x\n"),
+ id->ino, dabno,
be32_to_cpu(lep[i].hashval),
be32_to_cpu(lep[i].address));
error++;
id->ino, dabno);
error++;
}
- if (be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC &&
+ if ((be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC ||
+ be32_to_cpu(data->magic) == XFS_DIR3_BLOCK_MAGIC) &&
count != be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)) {
if (!sflag || v)
dbprintf(_("dir %lld block %d bad block tail count %d "
- "(stale %d)\n"),
- id->ino, dabno, be32_to_cpu(btp->count),
+ "(stale %d)\n"),
+ id->ino, dabno, be32_to_cpu(btp->count),
be32_to_cpu(btp->stale));
error++;
}
- if (be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC &&
+ if ((be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC ||
+ be32_to_cpu(data->magic) == XFS_DIR2_BLOCK_MAGIC) &&
stale != be32_to_cpu(btp->stale)) {
if (!sflag || v)
dbprintf(_("dir %lld block %d bad stale tail count %d\n"),
rp = (xfs_bmbt_rec_t *)XFS_DFORK_PTR(dip, whichfork);
*nex = XFS_DFORK_NEXTENTS(dip, whichfork);
- if (*nex < 0 || *nex > XFS_DFORK_SIZE(dip, mp, whichfork) /
+ if (*nex < 0 || *nex > XFS_DFORK_SIZE(dip, mp, whichfork) /
sizeof(xfs_bmbt_rec_t)) {
if (!sflag || id->ilist)
dbprintf(_("bad number of extents %d for inode %lld\n"),
{
blkmap_t *blkmap;
xfs_fsblock_t bno = 0;
- xfs_icdinode_t idic;
+ struct xfs_inode xino;
inodata_t *id = NULL;
xfs_ino_t ino;
xfs_extnum_t nextents = 0;
- int nlink;
int security;
xfs_rfsblock_t totblocks;
xfs_rfsblock_t totdblocks = 0;
xfs_qcnt_t rc = 0;
xfs_dqid_t dqprid;
int v = 0;
+ mode_t mode;
static char okfmts[] = {
0, /* type 0 unused */
1 << XFS_DINODE_FMT_DEV, /* FIFO */
"dev", "local", "extents", "btree", "uuid"
};
- libxfs_dinode_from_disk(&idic, dip);
+ libxfs_inode_from_disk(&xino, dip);
ino = XFS_AGINO_TO_INO(mp, be32_to_cpu(agf->agf_seqno), agino);
if (!isfree) {
blkmap = NULL;
}
v = (!sflag || (id && id->ilist) || CHECK_BLIST(bno));
- if (idic.di_magic != XFS_DINODE_MAGIC) {
+ if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
if (isfree || v)
dbprintf(_("bad magic number %#x for inode %lld\n"),
- idic.di_magic, ino);
+ be16_to_cpu(dip->di_magic), ino);
error++;
return;
}
- if (!XFS_DINODE_GOOD_VERSION(idic.di_version)) {
+ if (!libxfs_dinode_good_version(mp, xino.i_d.di_version)) {
if (isfree || v)
dbprintf(_("bad version number %#x for inode %lld\n"),
- idic.di_version, ino);
+ xino.i_d.di_version, ino);
error++;
return;
}
if (isfree) {
- if (idic.di_nblocks != 0) {
+ if (xino.i_d.di_nblocks != 0) {
if (v)
dbprintf(_("bad nblocks %lld for free inode "
"%lld\n"),
- idic.di_nblocks, ino);
+ xino.i_d.di_nblocks, ino);
error++;
}
- if (idic.di_version == 1)
- nlink = idic.di_onlink;
- else
- nlink = idic.di_nlink;
- if (nlink != 0) {
+ if (dip->di_nlink != 0) {
if (v)
dbprintf(_("bad nlink %d for free inode %lld\n"),
- nlink, ino);
+ be32_to_cpu(dip->di_nlink), ino);
error++;
}
- if (idic.di_mode != 0) {
+ if (dip->di_mode != 0) {
if (v)
dbprintf(_("bad mode %#o for free inode %lld\n"),
- idic.di_mode, ino);
+ be16_to_cpu(dip->di_mode), ino);
error++;
}
return;
/*
* di_mode is a 16-bit uint so no need to check the < 0 case
*/
- if ((((idic.di_mode & S_IFMT) >> 12) > 15) ||
- (!(okfmts[(idic.di_mode & S_IFMT) >> 12] & (1 << idic.di_format)))) {
+ mode = be16_to_cpu(dip->di_mode);
+ if ((((mode & S_IFMT) >> 12) > 15) ||
+ (!(okfmts[(mode & S_IFMT) >> 12] & (1 << xino.i_d.di_format)))) {
if (v)
dbprintf(_("bad format %d for inode %lld type %#o\n"),
- idic.di_format, id->ino, idic.di_mode & S_IFMT);
+ xino.i_d.di_format, id->ino, mode & S_IFMT);
error++;
return;
}
if ((unsigned int)XFS_DFORK_ASIZE(dip, mp) >=
- XFS_LITINO(mp, idic.di_version)) {
+ XFS_LITINO(mp, xino.i_d.di_version)) {
if (v)
dbprintf(_("bad fork offset %d for inode %lld\n"),
- idic.di_forkoff, id->ino);
+ xino.i_d.di_forkoff, id->ino);
error++;
return;
}
- if ((unsigned int)idic.di_aformat > XFS_DINODE_FMT_BTREE) {
+ if ((unsigned int)xino.i_d.di_aformat > XFS_DINODE_FMT_BTREE) {
if (v)
dbprintf(_("bad attribute format %d for inode %lld\n"),
- idic.di_aformat, id->ino);
+ xino.i_d.di_aformat, id->ino);
error++;
return;
}
dbprintf(_("inode %lld mode %#o fmt %s "
"afmt %s "
"nex %d anex %d nblk %lld sz %lld%s%s%s%s%s%s%s\n"),
- id->ino, idic.di_mode, fmtnames[(int)idic.di_format],
- fmtnames[(int)idic.di_aformat],
- idic.di_nextents,
- idic.di_anextents,
- idic.di_nblocks, idic.di_size,
- idic.di_flags & XFS_DIFLAG_REALTIME ? " rt" : "",
- idic.di_flags & XFS_DIFLAG_PREALLOC ? " pre" : "",
- idic.di_flags & XFS_DIFLAG_IMMUTABLE? " imm" : "",
- idic.di_flags & XFS_DIFLAG_APPEND ? " app" : "",
- idic.di_flags & XFS_DIFLAG_SYNC ? " syn" : "",
- idic.di_flags & XFS_DIFLAG_NOATIME ? " noa" : "",
- idic.di_flags & XFS_DIFLAG_NODUMP ? " nod" : "");
+ id->ino, mode, fmtnames[(int)xino.i_d.di_format],
+ fmtnames[(int)xino.i_d.di_aformat],
+ xino.i_d.di_nextents,
+ xino.i_d.di_anextents,
+ xino.i_d.di_nblocks, xino.i_d.di_size,
+ xino.i_d.di_flags & XFS_DIFLAG_REALTIME ? " rt" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_PREALLOC ? " pre" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_IMMUTABLE? " imm" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_APPEND ? " app" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_SYNC ? " syn" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_NOATIME ? " noa" : "",
+ xino.i_d.di_flags & XFS_DIFLAG_NODUMP ? " nod" : "");
security = 0;
- switch (idic.di_mode & S_IFMT) {
+ switch (mode & S_IFMT) {
case S_IFDIR:
type = DBM_DIR;
- if (idic.di_format == XFS_DINODE_FMT_LOCAL)
+ if (xino.i_d.di_format == XFS_DINODE_FMT_LOCAL)
break;
- blkmap = blkmap_alloc(idic.di_nextents);
+ blkmap = blkmap_alloc(xino.i_d.di_nextents);
break;
case S_IFREG:
- if (idic.di_flags & XFS_DIFLAG_REALTIME)
+ if (xino.i_d.di_flags & XFS_DIFLAG_REALTIME)
type = DBM_RTDATA;
else if (id->ino == mp->m_sb.sb_rbmino) {
type = DBM_RTBITMAP;
- blkmap = blkmap_alloc(idic.di_nextents);
+ blkmap = blkmap_alloc(xino.i_d.di_nextents);
addlink_inode(id);
} else if (id->ino == mp->m_sb.sb_rsumino) {
type = DBM_RTSUM;
- blkmap = blkmap_alloc(idic.di_nextents);
+ blkmap = blkmap_alloc(xino.i_d.di_nextents);
addlink_inode(id);
}
else if (id->ino == mp->m_sb.sb_uquotino ||
id->ino == mp->m_sb.sb_gquotino ||
id->ino == mp->m_sb.sb_pquotino) {
type = DBM_QUOTA;
- blkmap = blkmap_alloc(idic.di_nextents);
+ blkmap = blkmap_alloc(xino.i_d.di_nextents);
addlink_inode(id);
}
else
type = DBM_DATA;
- if (idic.di_mode & (S_ISUID | S_ISGID))
+ if (mode & (S_ISUID | S_ISGID))
security = 1;
break;
case S_IFLNK:
type = DBM_UNKNOWN;
break;
}
- if (idic.di_version == 1)
- setlink_inode(id, idic.di_onlink, type == DBM_DIR, security);
- else {
- sbversion |= XFS_SB_VERSION_NLINKBIT;
- setlink_inode(id, idic.di_nlink, type == DBM_DIR, security);
- }
- switch (idic.di_format) {
+
+ id->isreflink = !!(xino.i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
+ setlink_inode(id, VFS_I(&xino)->i_nlink, type == DBM_DIR, security);
+
+ switch (xino.i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
process_lclinode(id, dip, type, &totdblocks, &totiblocks,
&nextents, &blkmap, XFS_DATA_FORK);
}
if (XFS_DFORK_Q(dip)) {
sbversion |= XFS_SB_VERSION_ATTRBIT;
- switch (idic.di_aformat) {
+ switch (xino.i_d.di_aformat) {
case XFS_DINODE_FMT_LOCAL:
process_lclinode(id, dip, DBM_ATTR, &atotdblocks,
&atotiblocks, &anextents, NULL, XFS_ATTR_FORK);
break;
}
if (ic) {
- dqprid = xfs_get_projid(&idic); /* dquot ID is u32 */
- quota_add(&dqprid, &idic.di_gid, &idic.di_uid,
+ dqprid = xfs_get_projid(&xino.i_d); /* dquot ID is u32 */
+ quota_add(&dqprid, &xino.i_d.di_gid, &xino.i_d.di_uid,
0, bc, ic, rc);
}
}
totblocks = totdblocks + totiblocks + atotdblocks + atotiblocks;
- if (totblocks != idic.di_nblocks) {
+ if (totblocks != xino.i_d.di_nblocks) {
if (v)
dbprintf(_("bad nblocks %lld for inode %lld, counted "
"%lld\n"),
- idic.di_nblocks, id->ino, totblocks);
+ xino.i_d.di_nblocks, id->ino, totblocks);
error++;
}
- if (nextents != idic.di_nextents) {
+ if (nextents != xino.i_d.di_nextents) {
if (v)
dbprintf(_("bad nextents %d for inode %lld, counted %d\n"),
- idic.di_nextents, id->ino, nextents);
+ xino.i_d.di_nextents, id->ino, nextents);
error++;
}
- if (anextents != idic.di_anextents) {
+ if (anextents != xino.i_d.di_anextents) {
if (v)
dbprintf(_("bad anextents %d for inode %lld, counted "
"%d\n"),
- idic.di_anextents, id->ino, anextents);
+ xino.i_d.di_anextents, id->ino, anextents);
error++;
}
if (type == DBM_DIR)
int t = 0;
int v;
int v2;
- int x;
v2 = verbose || id->ilist;
v = parent = 0;
while ((dbno = blkmap_next_off(blkmap, dbno, &t)) != NULLFILEOFF) {
nex = blkmap_getn(blkmap, dbno, mp->m_dir_geo->fsbcount, &bmp);
ASSERT(nex > 0);
- for (v = v2, x = 0; !v && x < nex; x++) {
- for (b = bmp[x].startblock;
- !v && b < bmp[x].startblock + bmp[x].blockcount;
+ for (v = v2, i = 0; !v && i < nex; i++) {
+ for (b = bmp[i].startblock;
+ !v && b < bmp[i].startblock + bmp[i].blockcount;
b++)
v = CHECK_BLIST(b);
}
if (v)
dbprintf(_("dir inode %lld block %u=%llu\n"), id->ino,
- (__uint32_t)dbno,
+ (uint32_t)dbno,
(xfs_fsblock_t)bmp->startblock);
push_cur();
if (nex > 1)
if (!sflag || v)
dbprintf(_("can't read block %u for directory "
"inode %lld\n"),
- (__uint32_t)dbno, id->ino);
+ (uint32_t)dbno, id->ino);
error++;
pop_cur();
dbno += mp->m_dir_geo->fsbcount - 1;
return parent;
}
+static void
+process_leaf_node_dir_v3_free(
+ inodata_t *id,
+ int v,
+ xfs_dablk_t dabno,
+ freetab_t *freetab)
+{
+ xfs_dir2_data_off_t ent;
+ struct xfs_dir3_free *free;
+ int i;
+ int maxent;
+ int used;
+
+ free = iocur_top->data;
+ maxent = M_DIROPS(mp)->free_max_bests(mp->m_dir_geo);
+ if (be32_to_cpu(free->hdr.firstdb) != xfs_dir2_da_to_db(mp->m_dir_geo,
+ dabno - mp->m_dir_geo->freeblk) * maxent) {
+ if (!sflag || v)
+ dbprintf(_("bad free block firstdb %d for dir ino %lld "
+ "block %d\n"),
+ be32_to_cpu(free->hdr.firstdb), id->ino, dabno);
+ error++;
+ return;
+ }
+ if (be32_to_cpu(free->hdr.nvalid) > maxent ||
+ be32_to_cpu(free->hdr.nused) > maxent ||
+ be32_to_cpu(free->hdr.nused) >
+ be32_to_cpu(free->hdr.nvalid)) {
+ if (!sflag || v)
+ dbprintf(_("bad free block nvalid/nused %d/%d for dir "
+ "ino %lld block %d\n"),
+ be32_to_cpu(free->hdr.nvalid),
+ be32_to_cpu(free->hdr.nused), id->ino, dabno);
+ error++;
+ return;
+ }
+ for (used = i = 0; i < be32_to_cpu(free->hdr.nvalid); i++) {
+ if (freetab->nents <= be32_to_cpu(free->hdr.firstdb) + i)
+ ent = NULLDATAOFF;
+ else
+ ent = freetab->ents[be32_to_cpu(free->hdr.firstdb) + i];
+ if (ent != be16_to_cpu(free->bests[i])) {
+ if (!sflag || v)
+ dbprintf(_("bad free block ent %d is %d should "
+ "be %d for dir ino %lld block %d\n"),
+ i, be16_to_cpu(free->bests[i]), ent,
+ id->ino, dabno);
+ error++;
+ }
+ if (be16_to_cpu(free->bests[i]) != NULLDATAOFF)
+ used++;
+ if (ent != NULLDATAOFF)
+ freetab->ents[be32_to_cpu(free->hdr.firstdb) + i] =
+ NULLDATAOFF;
+ }
+ if (used != be32_to_cpu(free->hdr.nused)) {
+ if (!sflag || v)
+ dbprintf(_("bad free block nused %d should be %d for dir "
+ "ino %lld block %d\n"),
+ be32_to_cpu(free->hdr.nused), used, id->ino,
+ dabno);
+ error++;
+ }
+}
+
static void
process_leaf_node_dir_v2_free(
inodata_t *id,
int used;
free = iocur_top->data;
- if (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC) {
+ if (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC &&
+ be32_to_cpu(free->hdr.magic) != XFS_DIR3_FREE_MAGIC) {
if (!sflag || v)
dbprintf(_("bad free block magic # %#x for dir ino %lld "
"block %d\n"),
error++;
return;
}
+ if (be32_to_cpu(free->hdr.magic) == XFS_DIR3_FREE_MAGIC) {
+ process_leaf_node_dir_v3_free(id, v, dabno, freetab);
+ return;
+ }
maxent = M_DIROPS(mp)->free_max_bests(mp->m_dir_geo);
- if (be32_to_cpu(free->hdr.firstdb) != xfs_dir2_da_to_db(mp->m_dir_geo,
+ if (be32_to_cpu(free->hdr.firstdb) != xfs_dir2_da_to_db(mp->m_dir_geo,
dabno - mp->m_dir_geo->freeblk) * maxent) {
if (!sflag || v)
dbprintf(_("bad free block firstdb %d for dir ino %lld "
error++;
return;
}
- if (be32_to_cpu(free->hdr.nvalid) > maxent ||
- be32_to_cpu(free->hdr.nvalid) < 0 ||
- be32_to_cpu(free->hdr.nused) > maxent ||
- be32_to_cpu(free->hdr.nused) < 0 ||
- be32_to_cpu(free->hdr.nused) >
+ if (be32_to_cpu(free->hdr.nvalid) > maxent ||
+ be32_to_cpu(free->hdr.nused) > maxent ||
+ be32_to_cpu(free->hdr.nused) >
be32_to_cpu(free->hdr.nvalid)) {
if (!sflag || v)
dbprintf(_("bad free block nvalid/nused %d/%d for dir "
"ino %lld block %d\n"),
- be32_to_cpu(free->hdr.nvalid),
+ be32_to_cpu(free->hdr.nvalid),
be32_to_cpu(free->hdr.nused), id->ino, dabno);
error++;
return;
if (!sflag || v)
dbprintf(_("bad free block ent %d is %d should "
"be %d for dir ino %lld block %d\n"),
- i, be16_to_cpu(free->bests[i]), ent,
+ i, be16_to_cpu(free->bests[i]), ent,
id->ino, dabno);
error++;
}
if (be16_to_cpu(free->bests[i]) != NULLDATAOFF)
used++;
if (ent != NULLDATAOFF)
- freetab->ents[be32_to_cpu(free->hdr.firstdb) + i] =
+ freetab->ents[be32_to_cpu(free->hdr.firstdb) + i] =
NULLDATAOFF;
}
if (used != be32_to_cpu(free->hdr.nused)) {
if (!sflag || v)
dbprintf(_("bad free block nused %d should be %d for dir "
"ino %lld block %d\n"),
- be32_to_cpu(free->hdr.nused), used, id->ino,
+ be32_to_cpu(free->hdr.nused), used, id->ino,
dabno);
error++;
}
}
+/*
+ * Get address of the bestcount field in the single-leaf block.
+ */
+static inline int
+xfs_dir3_leaf_ents_count(struct xfs_dir2_leaf *lp)
+{
+ if (lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
+ lp->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
+ struct xfs_dir3_leaf *lp3 = (struct xfs_dir3_leaf *)lp;
+
+ return be16_to_cpu(lp3->hdr.count);
+ }
+ return be16_to_cpu(lp->hdr.count);
+}
+
static void
process_leaf_node_dir_v2_int(
inodata_t *id,
int i;
__be16 *lbp;
xfs_dir2_leaf_t *leaf;
+ struct xfs_dir3_leaf *leaf3 = NULL;
xfs_dir2_leaf_entry_t *lep;
xfs_dir2_leaf_tail_t *ltp;
xfs_da_intnode_t *node;
leaf = iocur_top->data;
switch (be16_to_cpu(leaf->hdr.info.magic)) {
+ case XFS_DIR3_LEAF1_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ leaf3 = iocur_top->data;
+ break;
+ }
+ switch (be16_to_cpu(leaf->hdr.info.magic)) {
case XFS_DIR2_LEAF1_MAGIC:
- if (be32_to_cpu(leaf->hdr.info.forw) ||
+ case XFS_DIR3_LEAF1_MAGIC:
+ if (be32_to_cpu(leaf->hdr.info.forw) ||
be32_to_cpu(leaf->hdr.info.back)) {
if (!sflag || v)
dbprintf(_("bad leaf block forw/back pointers "
"%d/%d for dir ino %lld block %d\n"),
be32_to_cpu(leaf->hdr.info.forw),
- be32_to_cpu(leaf->hdr.info.back),
+ be32_to_cpu(leaf->hdr.info.back),
id->ino, dabno);
error++;
}
ltp = xfs_dir2_leaf_tail_p(mp->m_dir_geo, leaf);
lbp = xfs_dir2_leaf_bests_p(ltp);
for (i = 0; i < be32_to_cpu(ltp->bestcount); i++) {
- if (freetab->nents <= i || freetab->ents[i] !=
+ if (freetab->nents <= i || freetab->ents[i] !=
be16_to_cpu(lbp[i])) {
if (!sflag || v)
dbprintf(_("bestfree %d for dir ino %lld "
}
break;
case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
/* if it's at the root location then we can check the
* pointers are null XXX */
break;
case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
node = iocur_top->data;
M_DIROPS(mp)->node_hdr_from_disk(&nodehdr, node);
if (nodehdr.level < 1 || nodehdr.level > XFS_DA_NODE_MAXDEPTH) {
if (!sflag || v)
dbprintf(_("bad node block level %d for dir ino "
"%lld block %d\n"),
- nodehdr.level, id->ino,
+ nodehdr.level, id->ino,
dabno);
error++;
}
if (!sflag || v)
dbprintf(_("bad directory data magic # %#x for dir ino "
"%lld block %d\n"),
- be16_to_cpu(leaf->hdr.info.magic), id->ino,
+ be16_to_cpu(leaf->hdr.info.magic), id->ino,
dabno);
error++;
return;
}
lep = M_DIROPS(mp)->leaf_ents_p(leaf);
- for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
+ for (i = stale = 0; i < xfs_dir3_leaf_ents_count(leaf); i++) {
if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR)
stale++;
- else if (dir_hash_see(be32_to_cpu(lep[i].hashval),
+ else if (dir_hash_see(be32_to_cpu(lep[i].hashval),
be32_to_cpu(lep[i].address))) {
if (!sflag || v)
dbprintf(_("dir %lld block %d extra leaf entry "
- "%x %x\n"), id->ino, dabno,
+ "%x %x\n"), id->ino, dabno,
be32_to_cpu(lep[i].hashval),
be32_to_cpu(lep[i].address));
error++;
}
}
- if (stale != be16_to_cpu(leaf->hdr.stale)) {
+ if (leaf3 && stale != be16_to_cpu(leaf3->hdr.stale)) {
+ if (!sflag || v)
+ dbprintf(_("dir3 %lld block %d stale mismatch "
+ "%d/%d\n"),
+ id->ino, dabno, stale,
+ be16_to_cpu(leaf3->hdr.stale));
+ error++;
+ } else if (!leaf && stale != be16_to_cpu(leaf->hdr.stale)) {
if (!sflag || v)
dbprintf(_("dir %lld block %d stale mismatch "
"%d/%d\n"),
int cb;
xfs_dqblk_t *dqb;
xfs_dqid_t dqid;
- u_int8_t exp_flags = 0;
+ uint8_t exp_flags = 0;
uint i;
uint perblock;
xfs_fileoff_t qbno;
M_DIROPS(mp)->sf_entsize(sf, sfe->namelen);
sfe = M_DIROPS(mp)->sf_nextentry(sf, sfe);
}
- if (i < 0 && (intptr_t)sfe - (intptr_t)sf !=
+ if (i < 0 && (intptr_t)sfe - (intptr_t)sf !=
be64_to_cpu(dip->di_size)) {
if (!sflag)
dbprintf(_("dir %llu size is %lld, should be %u\n"),
be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
1, scanfunc_cnt, TYP_CNTBT);
+ if (agf->agf_roots[XFS_BTNUM_RMAP]) {
+ scan_sbtree(agf,
+ be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]),
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]),
+ 1, scanfunc_rmap, TYP_RMAPBT);
+ }
+ if (agf->agf_refcount_root) {
+ scan_sbtree(agf,
+ be32_to_cpu(agf->agf_refcount_root),
+ be32_to_cpu(agf->agf_refcount_level),
+ 1, scanfunc_refcnt, TYP_REFCBT);
+ }
scan_sbtree(agf,
be32_to_cpu(agi->agi_root),
be32_to_cpu(agi->agi_level),
1, scanfunc_ino, TYP_INOBT);
+ if (agi->agi_free_root) {
+ scan_sbtree(agf,
+ be32_to_cpu(agi->agi_free_root),
+ be32_to_cpu(agi->agi_free_level),
+ 1, scanfunc_fino, TYP_FINOBT);
+ }
if (be32_to_cpu(agf->agf_freeblks) != agffreeblks) {
if (!sflag)
dbprintf(_("agf_freeblks %u, counted %u in ag %u\n"),
pop_cur();
}
+struct agfl_state {
+ xfs_agnumber_t agno;
+ unsigned int count;
+};
+
+static int
+scan_agfl(
+ struct xfs_mount *mp,
+ xfs_agblock_t bno,
+ void *priv)
+{
+ struct agfl_state *as = priv;
+
+ set_dbmap(as->agno, bno, 1, DBM_FREELIST, as->agno, XFS_AGFL_BLOCK(mp));
+ as->count++;
+ return 0;
+}
+
static void
scan_freelist(
- xfs_agf_t *agf)
+ xfs_agf_t *agf)
{
- xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
- xfs_agfl_t *agfl;
- xfs_agblock_t bno;
- uint count;
- int i;
- __be32 *freelist;
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ struct agfl_state state;
if (XFS_SB_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
XFS_AGF_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
set_cur(&typtab[TYP_AGFL],
XFS_AG_DADDR(mp, seqno, XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL);
- if ((agfl = iocur_top->data) == NULL) {
+ if (iocur_top->data == NULL) {
dbprintf(_("can't read agfl block for ag %u\n"), seqno);
serious_error++;
pop_cur();
return;
}
- i = be32_to_cpu(agf->agf_flfirst);
/* verify agf values before proceeding */
- if (be32_to_cpu(agf->agf_flfirst) >= XFS_AGFL_SIZE(mp) ||
- be32_to_cpu(agf->agf_fllast) >= XFS_AGFL_SIZE(mp)) {
+ if (be32_to_cpu(agf->agf_flfirst) >= libxfs_agfl_size(mp) ||
+ be32_to_cpu(agf->agf_fllast) >= libxfs_agfl_size(mp)) {
dbprintf(_("agf %d freelist blocks bad, skipping "
- "freelist scan\n"), i);
+ "freelist scan\n"), seqno);
pop_cur();
return;
}
/* open coded XFS_BUF_TO_AGFL_BNO */
- freelist = xfs_sb_version_hascrc(&((mp)->m_sb)) ? &agfl->agfl_bno[0]
- : (__be32 *)agfl;
- count = 0;
- for (;;) {
- bno = be32_to_cpu(freelist[i]);
- set_dbmap(seqno, bno, 1, DBM_FREELIST, seqno,
- XFS_AGFL_BLOCK(mp));
- count++;
- if (i == be32_to_cpu(agf->agf_fllast))
- break;
- if (++i == XFS_AGFL_SIZE(mp))
- i = 0;
- }
- if (count != be32_to_cpu(agf->agf_flcount)) {
+ state.count = 0;
+ state.agno = seqno;
+ libxfs_agfl_walk(mp, agf, iocur_top->bp, scan_agfl, &state);
+ if (state.count != be32_to_cpu(agf->agf_flcount)) {
if (!sflag)
dbprintf(_("freeblk count %u != flcount %u in ag %u\n"),
- count, be32_to_cpu(agf->agf_flcount),
- seqno);
+ state.count,
+ be32_to_cpu(agf->agf_flcount),
+ seqno);
error++;
}
- fdblocks += count;
- agf_aggr_freeblks += count;
+ fdblocks += state.count;
+ agf_aggr_freeblks += state.count;
pop_cur();
}
agno = XFS_FSB_TO_AGNO(mp, bno);
agbno = XFS_FSB_TO_AGBNO(mp, bno);
- if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC) {
+ if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC &&
+ be32_to_cpu(block->bb_magic) != XFS_BMAP_CRC_MAGIC) {
if (!sflag || id->ilist || CHECK_BLIST(bno))
dbprintf(_("bad magic # %#x in inode %lld bmbt block "
"%u/%u\n"),
}
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[0]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
- scan_lbtree(be64_to_cpu(pp[i]), level, scanfunc_bmap, type, id,
+ scan_lbtree(be64_to_cpu(pp[i]), level, scanfunc_bmap, type, id,
totd, toti, nex, blkmapp, 0, btype);
}
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
xfs_agblock_t lastblock;
- if (be32_to_cpu(block->bb_magic) != XFS_ABTB_MAGIC) {
+ if (be32_to_cpu(block->bb_magic) != XFS_ABTB_MAGIC &&
+ be32_to_cpu(block->bb_magic) != XFS_ABTB_CRC_MAGIC) {
dbprintf(_("bad magic # %#x in btbno block %u/%u\n"),
be32_to_cpu(block->bb_magic), seqno, bno);
serious_error++;
xfs_alloc_rec_t *rp;
xfs_extlen_t lastcount;
- if (be32_to_cpu(block->bb_magic) != XFS_ABTC_MAGIC) {
+ if (be32_to_cpu(block->bb_magic) != XFS_ABTC_MAGIC &&
+ be32_to_cpu(block->bb_magic) != XFS_ABTC_CRC_MAGIC) {
dbprintf(_("bad magic # %#x in btcnt block %u/%u\n"),
be32_to_cpu(block->bb_magic), seqno, bno);
serious_error++;
int i;
int isfree;
int j;
+ int freecount;
int nfree;
int off;
xfs_inobt_ptr_t *pp;
xfs_inobt_rec_t *rp;
-
- if (be32_to_cpu(block->bb_magic) != XFS_IBT_MAGIC) {
+ xfs_agblock_t agbno;
+ xfs_agblock_t end_agbno;
+ struct xfs_dinode *dip;
+ int blks_per_buf;
+ int inodes_per_buf;
+ int ioff;
+
+ if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ blks_per_buf = xfs_icluster_size_fsb(mp);
+ else
+ blks_per_buf = mp->m_ialloc_blks;
+ inodes_per_buf = min(blks_per_buf << mp->m_sb.sb_inopblog,
+ XFS_INODES_PER_CHUNK);
+
+ if (be32_to_cpu(block->bb_magic) != XFS_IBT_MAGIC &&
+ be32_to_cpu(block->bb_magic) != XFS_IBT_CRC_MAGIC) {
dbprintf(_("bad magic # %#x in inobt block %u/%u\n"),
be32_to_cpu(block->bb_magic), seqno, bno);
serious_error++;
rp = XFS_INOBT_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
agino = be32_to_cpu(rp[i].ir_startino);
- off = XFS_INO_TO_OFFSET(mp, agino);
+ agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+ off = XFS_AGINO_TO_OFFSET(mp, agino);
+ end_agbno = agbno + mp->m_ialloc_blks;
if (off == 0) {
if ((sbversion & XFS_SB_VERSION_ALIGNBIT) &&
mp->m_sb.sb_inoalignmt &&
(XFS_INO_TO_AGBNO(mp, agino) %
mp->m_sb.sb_inoalignmt))
sbversion &= ~XFS_SB_VERSION_ALIGNBIT;
- set_dbmap(seqno, XFS_AGINO_TO_AGBNO(mp, agino),
- (xfs_extlen_t)MAX(1,
- XFS_INODES_PER_CHUNK >>
- mp->m_sb.sb_inopblog),
- DBM_INODE, seqno, bno);
}
- icount += XFS_INODES_PER_CHUNK;
- agicount += XFS_INODES_PER_CHUNK;
- ifree += be32_to_cpu(rp[i].ir_u.f.ir_freecount);
- agifreecount += be32_to_cpu(rp[i].ir_u.f.ir_freecount);
+
push_cur();
- set_cur(&typtab[TYP_INODE],
- XFS_AGB_TO_DADDR(mp, seqno,
- XFS_AGINO_TO_AGBNO(mp, agino)),
- (int)XFS_FSB_TO_BB(mp, mp->m_ialloc_blks),
- DB_RING_IGN, NULL);
- if (iocur_top->data == NULL) {
- if (!sflag)
- dbprintf(_("can't read inode block "
- "%u/%u\n"),
- seqno,
- XFS_AGINO_TO_AGBNO(mp, agino));
- error++;
- pop_cur();
- continue;
- }
- for (j = 0, nfree = 0; j < XFS_INODES_PER_CHUNK; j++) {
- isfree = XFS_INOBT_IS_FREE_DISK(&rp[i], j);
- if (isfree)
- nfree++;
- process_inode(agf, agino + j,
- (xfs_dinode_t *)((char *)iocur_top->data + ((off + j) << mp->m_sb.sb_inodelog)),
- isfree);
+
+ ioff = 0;
+ nfree = 0;
+ while (agbno < end_agbno &&
+ ioff < XFS_INODES_PER_CHUNK) {
+ if (xfs_inobt_is_sparse_disk(&rp[i], ioff))
+ goto next_buf;
+
+ if (off < XFS_INODES_PER_CHUNK)
+ set_dbmap(seqno, agbno, blks_per_buf,
+ DBM_INODE, seqno, bno);
+
+ icount += inodes_per_buf;
+ agicount += inodes_per_buf;
+
+ set_cur(&typtab[TYP_INODE],
+ XFS_AGB_TO_DADDR(mp, seqno, agbno),
+ XFS_FSB_TO_BB(mp, blks_per_buf),
+ DB_RING_IGN, NULL);
+ if (iocur_top->data == NULL) {
+ if (!sflag)
+ dbprintf(_("can't read inode block "
+ "%u/%u\n"), seqno,
+ agbno);
+ error++;
+ goto next_buf;
+ }
+
+ for (j = 0; j < inodes_per_buf; j++) {
+ isfree = XFS_INOBT_IS_FREE_DISK(&rp[i], ioff + j);
+ if (isfree)
+ nfree++;
+ dip = (xfs_dinode_t *)((char *)iocur_top->data +
+ ((off + j) << mp->m_sb.sb_inodelog));
+ process_inode(agf, agino + ioff + j, dip, isfree);
+ }
+
+next_buf:
+ agbno += blks_per_buf;
+ ioff += inodes_per_buf;
}
- if (nfree != be32_to_cpu(rp[i].ir_u.f.ir_freecount)) {
+
+ if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ freecount = rp[i].ir_u.sp.ir_freecount;
+ else
+ freecount = be32_to_cpu(rp[i].ir_u.f.ir_freecount);
+
+ ifree += freecount;
+ agifreecount += freecount;
+
+ if (nfree != freecount) {
if (!sflag)
dbprintf(_("ir_freecount/free mismatch, "
"inode chunk %u/%u, freecount "
"%d nfree %d\n"),
- seqno, agino,
- be32_to_cpu(rp[i].ir_u.f.ir_freecount), nfree);
+ seqno, agino, freecount, nfree);
error++;
}
pop_cur();
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_ino, TYP_INOBT);
}
+static void
+scanfunc_fino(
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_agf *agf,
+ xfs_agblock_t bno,
+ int isroot)
+{
+ xfs_agino_t agino;
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int i;
+ int off;
+ xfs_inobt_ptr_t *pp;
+ struct xfs_inobt_rec *rp;
+ xfs_agblock_t agbno;
+ xfs_agblock_t end_agbno;
+ int blks_per_buf;
+ int inodes_per_buf;
+ int ioff;
+
+ if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ blks_per_buf = xfs_icluster_size_fsb(mp);
+ else
+ blks_per_buf = mp->m_ialloc_blks;
+ inodes_per_buf = min(blks_per_buf << mp->m_sb.sb_inopblog,
+ XFS_INODES_PER_CHUNK);
+
+ if (be32_to_cpu(block->bb_magic) != XFS_FIBT_MAGIC &&
+ be32_to_cpu(block->bb_magic) != XFS_FIBT_CRC_MAGIC) {
+ dbprintf(_("bad magic # %#x in finobt block %u/%u\n"),
+ be32_to_cpu(block->bb_magic), seqno, bno);
+ serious_error++;
+ return;
+ }
+ if (be16_to_cpu(block->bb_level) != level) {
+ if (!sflag)
+ dbprintf(_("expected level %d got %d in finobt block "
+ "%u/%u\n"),
+ level, be16_to_cpu(block->bb_level), seqno, bno);
+ error++;
+ }
+ set_dbmap(seqno, bno, 1, DBM_BTFINO, seqno, bno);
+ if (level == 0) {
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[0] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[0])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in "
+ "finobt block %u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[0],
+ mp->m_inobt_mxr[0], seqno, bno);
+ serious_error++;
+ return;
+ }
+ rp = XFS_INOBT_REC_ADDR(mp, block, 1);
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
+ agino = be32_to_cpu(rp[i].ir_startino);
+ agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+ off = XFS_AGINO_TO_OFFSET(mp, agino);
+ end_agbno = agbno + mp->m_ialloc_blks;
+ if (off == 0) {
+ if ((sbversion & XFS_SB_VERSION_ALIGNBIT) &&
+ mp->m_sb.sb_inoalignmt &&
+ (XFS_INO_TO_AGBNO(mp, agino) %
+ mp->m_sb.sb_inoalignmt))
+ sbversion &= ~XFS_SB_VERSION_ALIGNBIT;
+ }
+
+ ioff = 0;
+ while (agbno < end_agbno &&
+ ioff < XFS_INODES_PER_CHUNK) {
+ if (xfs_inobt_is_sparse_disk(&rp[i], ioff))
+ goto next_buf;
+
+ check_set_dbmap(seqno, agbno,
+ (xfs_extlen_t)max(1,
+ inodes_per_buf >>
+ mp->m_sb.sb_inopblog),
+ DBM_INODE, DBM_INODE, seqno, bno);
+
+next_buf:
+ agbno += blks_per_buf;
+ ioff += inodes_per_buf;
+ }
+
+ }
+ return;
+ }
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_inobt_mxr[1] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_inobt_mnr[1])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in finobt block "
+ "%u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_inobt_mnr[1],
+ mp->m_inobt_mxr[1], seqno, bno);
+ serious_error++;
+ return;
+ }
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
+ scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_fino, TYP_FINOBT);
+}
+
+static void
+scanfunc_rmap(
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_agf *agf,
+ xfs_agblock_t bno,
+ int isroot)
+{
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int i;
+ xfs_rmap_ptr_t *pp;
+ struct xfs_rmap_rec *rp;
+ xfs_agblock_t lastblock;
+
+ if (be32_to_cpu(block->bb_magic) != XFS_RMAP_CRC_MAGIC) {
+ dbprintf(_("bad magic # %#x in rmapbt block %u/%u\n"),
+ be32_to_cpu(block->bb_magic), seqno, bno);
+ serious_error++;
+ return;
+ }
+ if (be16_to_cpu(block->bb_level) != level) {
+ if (!sflag)
+ dbprintf(_("expected level %d got %d in rmapbt block "
+ "%u/%u\n"),
+ level, be16_to_cpu(block->bb_level), seqno, bno);
+ error++;
+ }
+ if (!isroot) {
+ fdblocks++;
+ agfbtreeblks++;
+ }
+ set_dbmap(seqno, bno, 1, DBM_BTRMAP, seqno, bno);
+ if (level == 0) {
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_rmap_mxr[0] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_rmap_mnr[0])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in "
+ "rmapbt block %u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_rmap_mnr[0],
+ mp->m_rmap_mxr[0], seqno, bno);
+ serious_error++;
+ return;
+ }
+ rp = XFS_RMAP_REC_ADDR(block, 1);
+ lastblock = 0;
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
+ if (be32_to_cpu(rp[i].rm_startblock) < lastblock) {
+ dbprintf(_(
+ "out-of-order rmap btree record %d (%u %u) block %u/%u\n"),
+ i, be32_to_cpu(rp[i].rm_startblock),
+ be32_to_cpu(rp[i].rm_startblock),
+ be32_to_cpu(agf->agf_seqno), bno);
+ } else {
+ lastblock = be32_to_cpu(rp[i].rm_startblock);
+ }
+ }
+ return;
+ }
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_rmap_mxr[1] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_rmap_mnr[1])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in rmapbt "
+ "block %u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_rmap_mnr[1],
+ mp->m_rmap_mxr[1], seqno, bno);
+ serious_error++;
+ return;
+ }
+ pp = XFS_RMAP_PTR_ADDR(block, 1, mp->m_rmap_mxr[1]);
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
+ scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_rmap,
+ TYP_RMAPBT);
+}
+
+static void
+scanfunc_refcnt(
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_agf *agf,
+ xfs_agblock_t bno,
+ int isroot)
+{
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int i;
+ xfs_refcount_ptr_t *pp;
+ struct xfs_refcount_rec *rp;
+ xfs_agblock_t lastblock;
+
+ if (be32_to_cpu(block->bb_magic) != XFS_REFC_CRC_MAGIC) {
+ dbprintf(_("bad magic # %#x in refcntbt block %u/%u\n"),
+ be32_to_cpu(block->bb_magic), seqno, bno);
+ serious_error++;
+ return;
+ }
+ if (be16_to_cpu(block->bb_level) != level) {
+ if (!sflag)
+ dbprintf(_("expected level %d got %d in refcntbt block "
+ "%u/%u\n"),
+ level, be16_to_cpu(block->bb_level), seqno, bno);
+ error++;
+ }
+ set_dbmap(seqno, bno, 1, DBM_BTREFC, seqno, bno);
+ if (level == 0) {
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_refc_mxr[0] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_refc_mnr[0])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in "
+ "refcntbt block %u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_refc_mnr[0],
+ mp->m_refc_mxr[0], seqno, bno);
+ serious_error++;
+ return;
+ }
+ rp = XFS_REFCOUNT_REC_ADDR(block, 1);
+ lastblock = 0;
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
+ if (be32_to_cpu(rp[i].rc_refcount) == 1) {
+ xfs_agblock_t agbno;
+ char *msg;
+
+ agbno = be32_to_cpu(rp[i].rc_startblock);
+ if (agbno >= XFS_REFC_COW_START) {
+ agbno -= XFS_REFC_COW_START;
+ msg = _(
+ "leftover CoW extent (%u/%u) len %u\n");
+ } else {
+ msg = _(
+ "leftover CoW extent at unexpected address (%u/%u) len %u\n");
+ }
+ dbprintf(msg,
+ seqno,
+ agbno,
+ be32_to_cpu(rp[i].rc_blockcount));
+ set_dbmap(seqno,
+ agbno,
+ be32_to_cpu(rp[i].rc_blockcount),
+ DBM_COWDATA, seqno, bno);
+ } else {
+ set_dbmap(seqno,
+ be32_to_cpu(rp[i].rc_startblock),
+ be32_to_cpu(rp[i].rc_blockcount),
+ DBM_RLDATA, seqno, bno);
+ }
+ if (be32_to_cpu(rp[i].rc_startblock) < lastblock) {
+ dbprintf(_(
+ "out-of-order refcnt btree record %d (%u %u) block %u/%u\n"),
+ i, be32_to_cpu(rp[i].rc_startblock),
+ be32_to_cpu(rp[i].rc_startblock),
+ be32_to_cpu(agf->agf_seqno), bno);
+ } else {
+ lastblock = be32_to_cpu(rp[i].rc_startblock) +
+ be32_to_cpu(rp[i].rc_blockcount);
+ }
+ }
+ return;
+ }
+ if (be16_to_cpu(block->bb_numrecs) > mp->m_refc_mxr[1] ||
+ (isroot == 0 && be16_to_cpu(block->bb_numrecs) < mp->m_refc_mnr[1])) {
+ dbprintf(_("bad btree nrecs (%u, min=%u, max=%u) in refcntbt "
+ "block %u/%u\n"),
+ be16_to_cpu(block->bb_numrecs), mp->m_refc_mnr[1],
+ mp->m_refc_mxr[1], seqno, bno);
+ serious_error++;
+ return;
+ }
+ pp = XFS_REFCOUNT_PTR_ADDR(block, 1, mp->m_refc_mxr[1]);
+ for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
+ scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_refcnt,
+ TYP_REFCBT);
+}
+
static void
set_dbmap(
xfs_agnumber_t agno,