Convert the xfs_sb_version_hasfoo() to checks against mp->m_features.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: Eric Sandeen <sandeen@sandeen.net>
* we must copy the original sb_uuid to the sb_meta_uuid slot and set
* the incompat flag for the feature on this copy.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
- !xfs_sb_version_hasmetauuid(&mp->m_sb) &&
+ if (xfs_has_crc(mp) && !xfs_has_metauuid(mp) &&
!uuid_equal(&tcarg->uuid, &mp->m_sb.sb_uuid)) {
uint32_t feat;
platform_uuid_copy(&ag_hdr->xfs_sb->sb_uuid, &tcarg->uuid);
/* We may have changed the UUID, so update the superblock CRC */
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
xfs_update_cksum((char *)ag_hdr->xfs_sb, mp->m_sb.sb_sectsize,
XFS_SB_CRC_OFF);
}
pos - btree_buf.position);
if (be32_to_cpu(block->bb_magic) !=
- (xfs_sb_version_hascrc(&mp->m_sb) ?
+ (xfs_has_crc(mp) ?
XFS_ABTB_CRC_MAGIC : XFS_ABTB_MAGIC)) {
do_log(_("Bad btree magic 0x%x\n"),
be32_to_cpu(block->bb_magic));
}
offset = libxfs_log_header(p, &buf->owner->uuid,
- xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
+ xfs_has_logv2(mp) ? 2 : 1,
mp->m_sb.sb_logsunit, XLOG_FMT, NULLCOMMITLSN,
NULLCOMMITLSN, next_log_chunk, buf);
do_write(buf->owner, NULL);
* all existing metadata LSNs are valid (behind the current LSN) on the
* target fs.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
cycle = mp->m_log->l_curr_cycle + 1;
/*
* write fails, mark the target inactive so the failure is reported.
*/
libxfs_log_clear(NULL, buf->data, logstart, length, &buf->owner->uuid,
- xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
+ xfs_has_logv2(mp) ? 2 : 1,
mp->m_sb.sb_logsunit, XLOG_FMT, cycle, true);
if (do_write(buf->owner, buf))
target[tcarg->id].state = INACTIVE;
wbuf logbuf;
int logsize;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
logsize = XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks);
if (!wbuf_init(&logbuf, logsize, w_buf.data_align,
w_buf.min_io_size, w_buf.id))
}
for (i = 0, tcarg = targ; i < num_targets; i++) {
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
format_log(mp, tcarg, &logbuf);
else
clear_log(mp, tcarg);
tcarg++;
}
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
free(logbuf.data);
return 0;
}
/* Magic is invalid/unknown. Guess based on iocur type */
- crc = xfs_sb_version_hascrc(&mp->m_sb);
+ crc = xfs_has_crc(mp);
switch (iocur_top->typ->typnm) {
case TYP_BMAPBTA:
case TYP_BMAPBTD:
if (attrfork)
prefix = "a.bmbt";
- else if (xfs_sb_version_hascrc(&mp->m_sb))
+ else if (xfs_has_crc(mp))
prefix = "u3.bmbt";
else
prefix = "u.bmbt";
{
bool aflag = false;
bool iflag = false;
- bool crc = xfs_sb_version_hascrc(&mp->m_sb);
+ bool crc = xfs_has_crc(mp);
int c;
if (cur_typ == NULL) {
error++;
}
if ((sbversion & XFS_SB_VERSION_ATTRBIT) &&
- !xfs_sb_version_hasattr(&mp->m_sb)) {
+ !xfs_has_attr(mp)) {
if (!sflag)
dbprintf(_("sb versionnum missing attr bit %x\n"),
XFS_SB_VERSION_ATTRBIT);
error++;
}
if ((sbversion & XFS_SB_VERSION_QUOTABIT) &&
- !xfs_sb_version_hasquota(&mp->m_sb)) {
+ !xfs_has_quota(mp)) {
if (!sflag)
dbprintf(_("sb versionnum missing quota bit %x\n"),
XFS_SB_VERSION_QUOTABIT);
error++;
}
if (!(sbversion & XFS_SB_VERSION_ALIGNBIT) &&
- xfs_sb_version_hasalign(&mp->m_sb)) {
+ xfs_has_align(mp)) {
if (!sflag)
dbprintf(_("sb versionnum extra align bit %x\n"),
XFS_SB_VERSION_ALIGNBIT);
is_reflink(
dbm_t type2)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return false;
if (type2 == DBM_DATA || type2 == DBM_RLDATA)
return true;
* at least one full inode record per block. Check this case explicitly.
*/
if (mp->m_sb.sb_inoalignmt ||
- (xfs_sb_version_hasalign(&mp->m_sb) &&
+ (xfs_has_align(mp) &&
mp->m_sb.sb_inopblock >= XFS_INODES_PER_CHUNK))
sbversion |= XFS_SB_VERSION_ALIGNBIT;
if ((mp->m_sb.sb_uquotino && mp->m_sb.sb_uquotino != NULLFSINO) ||
uid = be32_to_cpu(dip->di_uid);
gid = be32_to_cpu(dip->di_gid);
diflags = be16_to_cpu(dip->di_flags);
- if (xfs_sb_version_has_v3inode(&mp->m_sb))
+ if (xfs_has_v3inodes(mp))
diflags2 = be64_to_cpu(dip->di_flags2);
if (isfree) {
if (be64_to_cpu(dip->di_nblocks) != 0) {
int ioff;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
blks_per_buf = igeo->blocks_per_cluster;
else
blks_per_buf = igeo->ialloc_blks;
ioff += inodes_per_buf;
}
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
freecount = rp[i].ir_u.sp.ir_freecount;
else
freecount = be32_to_cpu(rp[i].ir_u.f.ir_freecount);
int ioff;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
blks_per_buf = igeo->blocks_per_cluster;
else
blks_per_buf = igeo->ialloc_blks;
void
crc_init(void)
{
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
add_command(&crc_cmd);
}
int ioff;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
blks_per_buf = igeo->blocks_per_cluster;
else
blks_per_buf = igeo->ialloc_blks;
xfs_fsblock_t start_fsb = 0;
xfs_fsblock_t end_fsb = NULLFSBLOCK;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (!xfs_has_rmapbt(mp)) {
dbprintf(_("Filesystem does not support reverse mapping btree.\n"));
return 0;
}
if (invalid_data &&
iocur_top->typ->crc_off == TYP_F_NO_CRC_OFF &&
- xfs_sb_version_hascrc(&mp->m_sb)) {
+ xfs_has_crc(mp)) {
dbprintf(_("Cannot recalculate CRCs on this type of object\n"));
return 0;
}
local_ops.verify_read = stashed_ops->verify_read;
iocur_top->bp->b_ops = &local_ops;
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
local_ops.verify_write = xfs_dummy_verify;
} else if (corrupt) {
local_ops.verify_write = xfs_dummy_verify;
* xfs_check needs corrected incore superblock values
*/
if (sbp->sb_rootino != NULLFSINO &&
- xfs_sb_version_haslazysbcount(&mp->m_sb)) {
+ xfs_has_lazysbcount(mp)) {
int error = -libxfs_initialize_perag_data(mp, sbp->sb_agcount);
if (error) {
fprintf(stderr,
}
}
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
type_set_tab_spcrc();
- else if (xfs_sb_version_hascrc(&mp->m_sb))
+ else if (xfs_has_crc(mp))
type_set_tab_crc();
push_cur();
ASSERT((char *)XFS_DFORK_DPTR(dip) - (char *)dip == byteize(startoff));
return dip->di_format == XFS_DINODE_FMT_LOCAL &&
(be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFDIR &&
- !xfs_sb_version_hasftype(&mp->m_sb);
+ !xfs_has_ftype(mp);
}
static int
ASSERT((char *)XFS_DFORK_DPTR(dip) - (char *)dip == byteize(startoff));
return dip->di_format == XFS_DINODE_FMT_LOCAL &&
(be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFDIR &&
- xfs_sb_version_hasftype(&mp->m_sb);
+ xfs_has_ftype(mp);
}
int
if ((iocur_top->mode & S_IFMT) == S_IFDIR)
iocur_top->dirino = ino;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
iocur_top->ino_crc_ok = libxfs_verify_cksum((char *)dip,
mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF);
return;
}
- if (!xfs_sb_version_hascrc(&mp->m_sb) ||
+ if (!xfs_has_crc(mp) ||
!iocur_top->bp->b_ops ||
iocur_top->bp->b_ops->verify_write == xfs_dummy_verify)
skip_crc = true;
write_cur_buf();
/* If we didn't write the crc automatically, re-check inode validity */
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
skip_crc && iocur_top->ino_buf) {
iocur_top->ino_crc_ok = libxfs_verify_cksum(iocur_top->data,
mp->m_sb.sb_inodesize,
int error;
int c;
- logversion = xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1;
+ logversion = xfs_has_logv2(mp) ? 2 : 1;
while ((c = getopt(argc, argv, "c:s:")) != EOF) {
switch (c) {
mp->m_log->l_logBBsize = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
mp->m_log->l_logBBstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
mp->m_log->l_sectBBsize = BBSIZE;
- if (xfs_sb_version_hassector(&mp->m_sb))
+ if (xfs_has_sector(mp))
mp->m_log->l_sectBBsize <<= (mp->m_sb.sb_logsectlog - BBSHIFT);
mp->m_log->l_sectBBsize = BTOBB(mp->m_log->l_sectBBsize);
xfs_agblock_t root;
int levels;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 1;
root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
xfs_agblock_t root;
int levels;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 1;
root = be32_to_cpu(agf->agf_refcount_root);
blp = (xfs_dir2_leaf_entry_t *)btp;
end_of_data = (char *)blp - block;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
wantmagic = XFS_DIR3_BLOCK_MAGIC;
else
wantmagic = XFS_DIR2_BLOCK_MAGIC;
} else { /* leaf/node format */
end_of_data = mp->m_dir_geo->fsbcount << mp->m_sb.sb_blocklog;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
wantmagic = XFS_DIR3_DATA_MAGIC;
else
wantmagic = XFS_DIR2_DATA_MAGIC;
}
link = iocur_top->data;
- if (xfs_sb_version_hascrc(&(mp)->m_sb))
+ if (xfs_has_crc((mp)))
link += sizeof(struct xfs_dsymlink_hdr);
if (obfuscate)
linklen = strlen(link);
zlen = mp->m_sb.sb_blocksize - linklen;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
zlen -= sizeof(struct xfs_dsymlink_hdr);
if (zlen < mp->m_sb.sb_blocksize)
memset(link + linklen, 0, zlen);
* Also make sure that that we don't process more than the single record
* we've been passed (large block sizes can hold multiple inode chunks).
*/
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
blks_per_buf = igeo->blocks_per_cluster;
else
blks_per_buf = igeo->ialloc_blks;
if ((mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK && off != 0) ||
(mp->m_sb.sb_inopblock > XFS_INODES_PER_CHUNK &&
off % XFS_INODES_PER_CHUNK != 0) ||
- (xfs_sb_version_hasalign(&mp->m_sb) &&
+ (xfs_has_align(mp) &&
mp->m_sb.sb_inoalignmt != 0 &&
agbno % mp->m_sb.sb_inoalignmt != 0)) {
if (show_warnings)
if (!scan_btree(agno, root, levels, TYP_INOBT, &finobt, scanfunc_ino))
return 0;
- if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (xfs_has_finobt(mp)) {
root = be32_to_cpu(agi->agi_free_root);
levels = be32_to_cpu(agi->agi_free_level);
logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
logblocks = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
- logversion = xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ logversion = xfs_has_logv2(mp) ? 2 : 1;
+ if (xfs_has_crc(mp))
cycle = log.l_curr_cycle + 1;
libxfs_log_clear(NULL, iocur_top->data, logstart, logblocks,
struct xfs_mount *mp,
uint8_t filetype)
{
- if (!xfs_sb_version_hasftype(&mp->m_sb))
+ if (!xfs_has_ftype(mp))
return filetype_strings[XFS_DIR3_FT_UNKNOWN];
if (filetype >= XFS_DIR3_FT_MAX)
* The log must always move forward on v5 superblocks. Bump it to the
* next cycle.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
cycle = mp->m_log->l_curr_cycle + 1;
dbprintf(_("Clearing log and setting UUID\n"));
XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
(xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
uuidp,
- xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
+ xfs_has_logv2(mp) ? 2 : 1,
mp->m_sb.sb_logsunit, XLOG_FMT, cycle, true);
if (error) {
dbprintf(_("ERROR: cannot clear the log\n"));
* We assume the state of these features now, so macros don't exist for
* them any more.
*/
- if (mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)
+ if (xfs_has_nlink(mp))
strcat(s, ",NLINK");
if (mp->m_sb.sb_versionnum & XFS_SB_VERSION_SHAREDBIT)
strcat(s, ",SHARED");
if (mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT)
strcat(s, ",DIRV2");
- if (xfs_sb_version_hasattr(&mp->m_sb))
+ if (xfs_has_attr(mp))
strcat(s, ",ATTR");
- if (xfs_sb_version_hasquota(&mp->m_sb))
+ if (xfs_has_quota(mp))
strcat(s, ",QUOTA");
- if (xfs_sb_version_hasalign(&mp->m_sb))
+ if (xfs_has_align(mp))
strcat(s, ",ALIGN");
- if (xfs_sb_version_hasdalign(&mp->m_sb))
+ if (xfs_has_dalign(mp))
strcat(s, ",DALIGN");
- if (xfs_sb_version_haslogv2(&mp->m_sb))
+ if (xfs_has_logv2(mp))
strcat(s, ",LOGV2");
/* This feature is required now as well */
- if (mp->m_sb.sb_versionnum & XFS_SB_VERSION_EXTFLGBIT)
+ if (xfs_has_extflg(mp))
strcat(s, ",EXTFLG");
- if (xfs_sb_version_hassector(&mp->m_sb))
+ if (xfs_has_sector(mp))
strcat(s, ",SECTOR");
- if (xfs_sb_version_hasasciici(&mp->m_sb))
+ if (xfs_has_asciici(mp))
strcat(s, ",ASCII_CI");
- if (xfs_sb_version_hasmorebits(&mp->m_sb))
+ if (mp->m_sb.sb_versionnum & XFS_SB_VERSION_MOREBITSBIT)
strcat(s, ",MOREBITS");
- if (xfs_sb_version_hasattr2(&mp->m_sb))
+ if (xfs_has_attr2(mp))
strcat(s, ",ATTR2");
- if (xfs_sb_version_haslazysbcount(&mp->m_sb))
+ if (xfs_has_lazysbcount(mp))
strcat(s, ",LAZYSBCOUNT");
- if (xfs_sb_version_hasprojid32(&mp->m_sb))
+ if (xfs_has_projid32(mp))
strcat(s, ",PROJID32BIT");
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
strcat(s, ",CRC");
- if (xfs_sb_version_hasftype(&mp->m_sb))
+ if (xfs_has_ftype(mp))
strcat(s, ",FTYPE");
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
strcat(s, ",FINOBT");
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
strcat(s, ",SPARSE_INODES");
- if (xfs_sb_version_hasmetauuid(&mp->m_sb))
+ if (xfs_has_metauuid(mp))
strcat(s, ",META_UUID");
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
strcat(s, ",RMAPBT");
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
strcat(s, ",REFLINK");
- if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
+ if (xfs_has_inobtcounts(mp))
strcat(s, ",INOBTCNT");
- if (xfs_sb_version_hasbigtime(&mp->m_sb))
+ if (xfs_has_bigtime(mp))
strcat(s, ",BIGTIME");
- if (xfs_sb_version_needsrepair(&mp->m_sb))
+ if (xfs_has_needsrepair(mp))
strcat(s, ",NEEDSREPAIR");
return s;
}
version = 0x0034 | XFS_SB_VERSION_LOGV2BIT;
break;
case XFS_SB_VERSION_4:
- if (xfs_sb_version_haslogv2(&mp->m_sb))
+ if (xfs_has_logv2(mp))
dbprintf(
_("version 2 log format is already in use\n"));
else
return 0;
} else if (!strcasecmp(argv[1], "attr1")) {
- if (xfs_sb_version_hasattr2(&mp->m_sb)) {
+ if (xfs_has_attr2(mp)) {
if (!(mp->m_sb.sb_features2 &=
~XFS_SB_VERSION2_ATTR2BIT))
mp->m_sb.sb_versionnum &=
}
if (whatkind == SHOW_AUTO) {
- if (xfs_sb_version_hasbigtime(&mp->m_sb))
+ if (xfs_has_bigtime(mp))
whatkind = SHOW_BIGTIME;
else
whatkind = SHOW_CLASSIC;
if (invalid_data &&
iocur_top->typ->crc_off == TYP_F_NO_CRC_OFF &&
- xfs_sb_version_hascrc(&mp->m_sb)) {
+ xfs_has_crc(mp)) {
dbprintf(_("Cannot recalculate CRCs on this type of object\n"));
return 0;
}
local_ops.verify_read = stashed_ops->verify_read;
iocur_top->bp->b_ops = &local_ops;
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
local_ops.verify_write = xfs_dummy_verify;
} else if (corrupt) {
local_ops.verify_write = xfs_dummy_verify;
/*
* Set whether we're using stripe alignment.
*/
- if (xfs_sb_version_hasdalign(&mp->m_sb)) {
+ if (xfs_has_dalign(mp)) {
mp->m_dalign = sbp->sb_unit;
mp->m_swidth = sbp->sb_width;
}
xfs_da_mount(mp);
- if (xfs_sb_version_hasattr2(&mp->m_sb))
+ if (xfs_has_attr2(mp))
mp->m_flags |= LIBXFS_MOUNT_ATTR2;
/* Initialize the precomputed transaction reservations values */
struct xfs_mount *mp = bp->b_mount;
int idx;
- idx = xfs_sb_version_hascrc(&mp->m_sb);
+ idx = xfs_has_crc(mp);
if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])))
return false;
return dmagic == bp->b_ops->magic[idx];
struct xfs_mount *mp = bp->b_mount;
int idx;
- idx = xfs_sb_version_hascrc(&mp->m_sb);
+ idx = xfs_has_crc(mp);
if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])))
return false;
return dmagic == bp->b_ops->magic16[idx];
int iclog_size;
uint num_headers;
- if (xfs_sb_version_haslogv2(&mp->m_sb)) {
+ if (xfs_has_logv2(mp)) {
iclog_size = XLOG_MAX_RECORD_BSIZE;
iclog_header_size = BBTOB(iclog_size / XLOG_HEADER_CYCLE_SIZE);
} else {
unit_bytes += iclog_header_size;
/* for roundoff padding for transaction data and one for commit record */
- if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
+ if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) {
/* log su roundoff */
unit_bytes += 2 * mp->m_sb.sb_logsunit;
} else {
}
} else {
if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
- xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
+ xfs_has_realtime(ip->i_mount))
di_flags |= XFS_DIFLAG_REALTIME;
if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
di_flags |= XFS_DIFLAG_EXTSIZE;
ip->i_extsize = pip ? 0 : fsx->fsx_extsize;
ip->i_diflags = pip ? 0 : xfs_flags2diflags(ip, fsx->fsx_xflags);
- if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
+ if (xfs_has_v3inodes(ip->i_mount)) {
VFS_I(ip)->i_version = 1;
ip->i_diflags2 = pip ? ip->i_mount->m_ino_geo.new_diflags2 :
xfs_flags2diflags2(ip, fsx->fsx_xflags);
ASSERT(ip->i_forkoff <= mp->m_sb.sb_inodesize);
/* bump the change count on v3 inodes */
- if (xfs_sb_version_has_v3inode(&mp->m_sb))
+ if (xfs_has_v3inodes(mp))
VFS_I(ip)->i_version++;
/*
x.logBBsize = XFS_FSB_TO_BB(mp, sb->sb_logblocks);
x.logBBstart = XFS_FSB_TO_DADDR(mp, sb->sb_logstart);
x.lbsize = BBSIZE;
- if (xfs_sb_version_hassector(&mp->m_sb))
+ if (xfs_has_sector(mp))
x.lbsize <<= (sb->sb_logsectlog - BBSHIFT);
if (!x.logname && sb->sb_logstart == 0) {
irec->ir_count = inocnt;
irec->ir_freecount = finocnt;
- if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
+ if (xfs_has_sparseinodes(cur->bc_mp)) {
uint64_t sparse;
int spmask;
uint16_t holemask;
bool finobt;
int error;
- finobt = xfs_sb_version_hasfinobt(&sc->mp->m_sb);
+ finobt = xfs_has_finobt(sc->mp);
init_rebuild(sc, &XFS_RMAP_OINFO_INOBT, free_space, btr_ino);
/* Compute inode statistics. */
/* Since we're not writing the AGI yet, no need to commit the cursor */
libxfs_btree_del_cursor(btr_ino->cur, 0);
- if (!xfs_sb_version_hasfinobt(&sc->mp->m_sb))
+ if (!xfs_has_finobt(sc->mp))
return;
/* Add all observed finobt records. */
xfs_agnumber_t agno = pag->pag_agno;
int error;
- if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb))
+ if (!xfs_has_rmapbt(sc->mp))
return;
init_rebuild(sc, &XFS_RMAP_OINFO_AG, free_space, btr);
xfs_agnumber_t agno = pag->pag_agno;
int error;
- if (!xfs_sb_version_hasreflink(&sc->mp->m_sb))
+ if (!xfs_has_reflink(sc->mp))
return;
init_rebuild(sc, &XFS_RMAP_OINFO_REFC, free_space, btr);
/* don't check freespace btrees -- will be checked by caller */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return retval;
if (platform_uuid_compare(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid)) {
/* don't check inode btree -- will be checked by caller */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return retval;
if (platform_uuid_compare(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid)) {
* superblocks. If it is anything other than 0 it is considered garbage
* data beyond the valid sb and explicitly zeroed above.
*/
- if (xfs_sb_version_haspquotino(&mp->m_sb) &&
+ if (xfs_has_pquotino(mp) &&
sb->sb_inprogress == 1 && sb->sb_pquotino != NULLFSINO) {
if (!no_modify) {
sb->sb_pquotino = 0;
int hdrsize = 0;
int error;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
hdrsize = sizeof(struct xfs_attr3_rmt_hdr);
/* ASSUMPTION: valuelen is a valid number, so use it for looping */
if (cluster_count == 0)
cluster_count = 1;
- if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
+ if (xfs_has_sparseinodes(mp) &&
M_IGEO(mp)->inodes_per_cluster >= XFS_INODES_PER_HOLEMASK_BIT)
can_punch_sparse = true;
{
memset(dinoc, 0, sizeof(*dinoc));
dinoc->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
dinoc->di_version = 3;
else
dinoc->di_version = 2;
case XR_E_INUSE:
case XR_E_MULT:
if (type == XR_INO_DATA &&
- xfs_sb_version_hasreflink(&mp->m_sb))
+ xfs_has_reflink(mp))
break;
do_warn(
_("%s fork in %s inode %" PRIu64 " claims used block %" PRIu64 "\n"),
}
*dipp = xfs_make_iptr(mp, bp, agino - cluster_agino);
- ASSERT(!xfs_sb_version_hascrc(&mp->m_sb) ||
+ ASSERT(!xfs_has_crc(mp) ||
XFS_AGINO_TO_INO(mp, agno, agino) ==
be64_to_cpu((*dipp)->di_ino));
return bp;
*tot = 0;
*nex = 0;
- magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_BMAP_CRC_MAGIC
+ magic = xfs_has_crc(mp) ? XFS_BMAP_CRC_MAGIC
: XFS_BMAP_MAGIC;
level = be16_to_cpu(dib->bb_level);
int bad_dqb = 0;
/* We only print the first problem we find */
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!libxfs_verify_cksum((char *)dqb,
sizeof(*dqb),
XFS_DQUOT_CRC_OFF)) {
byte_cnt = min(pathlen, byte_cnt);
src = bp->b_addr;
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (!libxfs_symlink_hdr_ok(lino, offset,
byte_cnt, bp)) {
do_warn(
* Of course if we make any modifications after this, the inode gets
* rewritten, and the CRC is updated automagically.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
+ if (xfs_has_crc(mp) &&
!libxfs_verify_cksum((char *)dino, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF)) {
retval = 1;
if (!no_modify) {
do_warn(_(" resetting version number\n"));
dino->di_version =
- xfs_sb_version_hascrc(&mp->m_sb) ? 3 : 2;
+ xfs_has_crc(mp) ? 3 : 2;
*dirty = 1;
} else
do_warn(_(" would reset version number\n"));
* we are called here that the inode has not already been modified in
* memory and hence invalidated the CRC.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
if (be64_to_cpu(dino->di_ino) != lino) {
if (!uncertain)
do_warn(
}
if ((flags2 & XFS_DIFLAG2_REFLINK) &&
- !xfs_sb_version_hasreflink(&mp->m_sb)) {
+ !xfs_has_reflink(mp)) {
if (!uncertain) {
do_warn(
_("inode %" PRIu64 " is marked reflinked but file system does not support reflink\n"),
}
if ((flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
- !xfs_sb_version_hasreflink(&mp->m_sb)) {
+ !xfs_has_reflink(mp)) {
if (!uncertain) {
do_warn(
_("inode %" PRIu64 " has CoW extent size hint but file system does not support reflink\n"),
}
if (xfs_dinode_has_bigtime(dino) &&
- !xfs_sb_version_hasbigtime(&mp->m_sb)) {
+ !xfs_has_bigtime(mp)) {
if (!uncertain) {
do_warn(
_("inode %" PRIu64 " is marked bigtime but file system does not support large timestamps\n"),
struct xfs_mount *mp,
struct xfs_inobt_rec *rp)
{
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
return rp->ir_u.sp.ir_freecount;
return be32_to_cpu(rp->ir_u.f.ir_freecount);
}
struct xfs_inobt_rec *rp,
int freecount)
{
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
rp->ir_u.sp.ir_freecount = freecount;
else
rp->ir_u.f.ir_freecount = cpu_to_be32(freecount);
{
uint8_t *ptr;
- if (!xfs_sb_version_hasftype(&mp->m_sb))
+ if (!xfs_has_ftype(mp))
return NULL;
ptr = calloc(XFS_INODES_PER_CHUNK, sizeof(*ptr));
x.logBBsize = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
x.logBBstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
x.lbsize = BBSIZE;
- if (xfs_sb_version_hassector(&mp->m_sb))
+ if (xfs_has_sector(mp))
x.lbsize <<= (mp->m_sb.sb_logsectlog - BBSHIFT);
log->l_dev = mp->m_logdev_targp;
log->l_logBBstart = x.logBBstart;
log->l_sectBBsize = BTOBB(x.lbsize);
log->l_mp = mp;
- if (xfs_sb_version_hassector(&mp->m_sb)) {
+ if (xfs_has_sector(mp)) {
log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
/* for larger sector sizes, must have v2 or external log */
ASSERT(log->l_sectbb_log == 0 ||
log->l_logBBstart == 0 ||
- xfs_sb_version_haslogv2(&mp->m_sb));
+ xfs_has_logv2(mp));
ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
}
log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart),
(xfs_extlen_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks),
&mp->m_sb.sb_uuid,
- xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1,
+ xfs_has_logv2(mp) ? 2 : 1,
mp->m_sb.sb_logsunit, XLOG_FMT, XLOG_INIT_CYCLE, true);
/* update the log data structure with new state */
* Finally, seed the max LSN from the current state of the log if this
* is a v5 filesystem.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
libxfs_max_lsn = atomic64_read(&log->l_last_sync_lsn);
}
set_inobtcount(
struct xfs_mount *mp)
{
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
printf(
_("Inode btree count feature only supported on V5 filesystems.\n"));
exit(0);
}
- if (!xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (!xfs_has_finobt(mp)) {
printf(
_("Inode btree count feature requires free inode btree.\n"));
exit(0);
}
- if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
+ if (xfs_has_inobtcounts(mp)) {
printf(_("Filesystem already has inode btree counts.\n"));
exit(0);
}
set_bigtime(
struct xfs_mount *mp)
{
- if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_has_crc(mp)) {
printf(
_("Large timestamp feature only supported on V5 filesystems.\n"));
exit(0);
}
- if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
+ if (xfs_has_bigtime(mp)) {
printf(_("Filesystem already supports large timestamps.\n"));
exit(0);
}
queue_work(&wq, check_rmap_btrees, i, NULL);
destroy_work_queue(&wq);
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return;
create_work_queue(&wq, mp, platform_nproc());
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO);
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
- if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (xfs_has_finobt(mp)) {
agi->agi_free_root =
cpu_to_be32(btr_fino->newbt.afake.af_root);
agi->agi_free_level =
cpu_to_be32(btr_fino->newbt.afake.af_levels);
}
- if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
+ if (xfs_has_inobtcounts(mp)) {
agi->agi_iblocks = cpu_to_be32(btr_ino->newbt.afake.af_blocks);
agi->agi_fblocks = cpu_to_be32(btr_fino->newbt.afake.af_blocks);
}
cpu_to_be32(btr_cnt->newbt.afake.af_levels);
agf->agf_freeblks = cpu_to_be32(btr_bno->freeblks);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
agf->agf_roots[XFS_BTNUM_RMAP] =
cpu_to_be32(btr_rmap->newbt.afake.af_root);
agf->agf_levels[XFS_BTNUM_RMAP] =
cpu_to_be32(btr_rmap->newbt.afake.af_blocks);
}
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
agf->agf_refcount_root =
cpu_to_be32(btr_refc->newbt.afake.af_root);
agf->agf_refcount_level =
/*
* Count and record the number of btree blocks consumed if required.
*/
- if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
+ if (xfs_has_lazysbcount(mp)) {
unsigned int blks;
/*
* Don't count the root blocks as they are already
*/
blks = btr_bno->newbt.afake.af_blocks +
btr_cnt->newbt.afake.af_blocks - 2;
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
blks += btr_rmap->newbt.afake.af_blocks - 1;
agf->agf_btreeblks = cpu_to_be32(blks);
#ifdef XR_BLD_FREE_TRACE
XFS_BTNUM_CNT);
#endif
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
/* initialise the AGFL, then fill it if there are blocks left over. */
/* setting to 0xff results in initialisation to NULLAGBLOCK */
memset(agfl, 0xff, mp->m_sb.sb_sectsize);
freelist = xfs_buf_to_agfl_bno(agfl_buf);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (xfs_has_crc(mp)) {
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(agno);
platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
freelist = xfs_buf_to_agfl_bno(agfl_buf);
fill_agfl(btr_bno, freelist, &agfl_idx);
fill_agfl(btr_cnt, freelist, &agfl_idx);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
fill_agfl(btr_rmap, freelist, &agfl_idx);
/* Set the AGF counters for the AGFL. */
#endif
ASSERT(btr_bno.freeblks == btr_cnt.freeblks);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
build_rmap_tree(&sc, agno, &btr_rmap);
sb_fdblocks_ag[agno] += btr_rmap.newbt.afake.af_blocks - 1;
}
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
build_refcount_tree(&sc, agno, &btr_refc);
/*
finish_rebuild(mp, &btr_bno, lost_blocks);
finish_rebuild(mp, &btr_cnt, lost_blocks);
finish_rebuild(mp, &btr_ino, lost_blocks);
- if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ if (xfs_has_finobt(mp))
finish_rebuild(mp, &btr_fino, lost_blocks);
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_has_rmapbt(mp))
finish_rebuild(mp, &btr_rmap, lost_blocks);
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
finish_rebuild(mp, &btr_refc, lost_blocks);
/*
set_nlink(VFS_I(ip), 1); /* account for sb ptr */
times = XFS_ICHGTIME_CHG | XFS_ICHGTIME_MOD;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
VFS_I(ip)->i_version = 1;
ip->i_diflags2 = 0;
times |= XFS_ICHGTIME_CREATE;
set_nlink(VFS_I(ip), 1); /* account for sb ptr */
times = XFS_ICHGTIME_CHG | XFS_ICHGTIME_MOD;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
VFS_I(ip)->i_version = 1;
ip->i_diflags2 = 0;
times |= XFS_ICHGTIME_CREATE;
set_nlink(VFS_I(ip), 2); /* account for . and .. */
times = XFS_ICHGTIME_CHG | XFS_ICHGTIME_MOD;
- if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
+ if (xfs_has_v3inodes(mp)) {
VFS_I(ip)->i_version = 1;
ip->i_diflags2 = 0;
times |= XFS_ICHGTIME_CREATE;
endptr = (char *)blp;
if (endptr > (char *)btp)
endptr = (char *)btp;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
wantmagic = XFS_DIR3_BLOCK_MAGIC;
else
wantmagic = XFS_DIR2_BLOCK_MAGIC;
} else {
endptr = (char *)d + mp->m_dir_geo->blksize;
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
wantmagic = XFS_DIR3_DATA_MAGIC;
else
wantmagic = XFS_DIR2_DATA_MAGIC;
continue;
/* validate ftype field if supported */
- if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ if (xfs_has_ftype(mp)) {
uint8_t dir_ftype;
uint8_t ino_ftype;
}
/* validate ftype field if supported */
- if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ if (xfs_has_ftype(mp)) {
uint8_t dir_ftype;
uint8_t ino_ftype;
* expect an exact match for user dquots and for non-root group and
* project dquots.
*/
- if (xfs_sb_version_hascrc(&mp->m_sb) || type == XFS_DQTYPE_USER || id)
+ if (xfs_has_crc(mp) || type == XFS_DQTYPE_USER || id)
return ddq_type == type;
/*
}
if ((ddq->d_type & XFS_DQTYPE_BIGTIME) &&
- !xfs_sb_version_hasbigtime(&mp->m_sb)) {
+ !xfs_has_bigtime(mp)) {
do_warn(
_("%s id %u is marked bigtime but file system does not support large timestamps\n"),
qflags_typestr(dquots->type), id);
rmap_needs_work(
struct xfs_mount *mp)
{
- return xfs_sb_version_hasreflink(&mp->m_sb) ||
- xfs_sb_version_hasrmapbt(&mp->m_sb);
+ return xfs_has_reflink(mp) ||
+ xfs_has_rmapbt(mp);
}
/*
/* inodes */
ino_rec = findfirst_inode_rec(agno);
for (; ino_rec != NULL; ino_rec = next_ino_rec(ino_rec)) {
- if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
+ if (xfs_has_sparseinodes(mp)) {
startidx = find_first_zero_bit(ino_rec->ir_sparse);
nr = XFS_INODES_PER_CHUNK - popcnt(ino_rec->ir_sparse);
} else {
struct bitmap *own_ag_bitmap = NULL;
int error = 0;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
/* Release the ar_rmaps; they were put into the rmapbt during p5. */
size_t old_stack_nr;
int error;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 0;
rmaps = ag_rmaps[agno].ar_rmaps;
int have;
int error;
- if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (!xfs_has_rmapbt(mp))
return 0;
if (rmapbt_suspect) {
if (no_modify && agno == 0)
* the regular lookup doesn't find anything or if it doesn't
* match the observed rmap.
*/
- if (xfs_sb_version_hasreflink(&bt_cur->bc_mp->m_sb) &&
+ if (xfs_has_reflink(bt_cur->bc_mp) &&
(!have || !rmap_is_good(rm_rec, &tmp))) {
error = rmap_lookup_overlapped(bt_cur, rm_rec,
&tmp, &have);
int i;
int error;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_has_reflink(mp))
return 0;
if (refcbt_suspect) {
if (no_modify && agno == 0)
struct xfs_inobt_rec *rp,
int offset)
{
- if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (!xfs_has_sparseinodes(mp))
return false;
return xfs_inobt_is_sparse_disk(rp, offset);
* multiple inode owners are ok with
* reflink enabled
*/
- if (xfs_sb_version_hasreflink(&mp->m_sb) &&
+ if (xfs_has_reflink(mp) &&
!XFS_RMAP_NON_INODE_OWNER(owner))
break;
fallthrough;
} else {
bool bad;
- if (xfs_sb_version_hasreflink(&mp->m_sb))
+ if (xfs_has_reflink(mp))
bad = !rmap_in_order(b, laststartblock,
owner, lastowner,
offset, lastoffset);
}
/* verify sparse record formats have a valid inode count */
- if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
+ if (xfs_has_sparseinodes(mp) &&
ninodes != rp->ir_u.sp.ir_count) {
do_warn(
_("invalid inode count, inode chunk %d/%u, count %d ninodes %d\n"),
}
/* verify sparse record formats have a valid inode count */
- if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
+ if (xfs_has_sparseinodes(mp) &&
ninodes != rp->ir_u.sp.ir_count) {
do_warn(
_("invalid inode count, inode chunk %d/%u, count %d ninodes %d\n"),
* ir_count holds the inode count for all
* records on fs' with sparse inode support
*/
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_has_sparseinodes(mp))
icount = rp[i].ir_u.sp.ir_count;
agcnts->agicount += icount;
bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
if (libxfs_verify_agbno(mp, agno, bno)) {
- magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_ABTB_CRC_MAGIC
+ magic = xfs_has_crc(mp) ? XFS_ABTB_CRC_MAGIC
: XFS_ABTB_MAGIC;
scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
agno, 0, scan_allocbt, 1, magic, agcnts,
bno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
if (libxfs_verify_agbno(mp, agno, bno)) {
- magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_ABTC_CRC_MAGIC
+ magic = xfs_has_crc(mp) ? XFS_ABTC_CRC_MAGIC
: XFS_ABTC_MAGIC;
scan_sbtree(bno, be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
agno, 0, scan_allocbt, 1, magic, agcnts,
bno, agno);
}
- if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (xfs_has_rmapbt(mp)) {
struct rmap_priv priv;
unsigned int levels;
}
}
- if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (xfs_has_reflink(mp)) {
unsigned int levels;
levels = be32_to_cpu(agf->agf_refcount_level);
be32_to_cpu(agf->agf_longest), agcnts->agflongest, agno);
}
- if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
+ if (xfs_has_lazysbcount(mp) &&
be32_to_cpu(agf->agf_btreeblks) != agcnts->agfbtreeblks) {
do_warn(_("agf_btreeblks %u, counted %" PRIu64 " in ag %u\n"),
be32_to_cpu(agf->agf_btreeblks), agcnts->agfbtreeblks, agno);
bno = be32_to_cpu(agi->agi_root);
if (libxfs_verify_agbno(mp, agno, bno)) {
- magic = xfs_sb_version_hascrc(&mp->m_sb) ? XFS_IBT_CRC_MAGIC
+ magic = xfs_has_crc(mp) ? XFS_IBT_CRC_MAGIC
: XFS_IBT_MAGIC;
scan_sbtree(bno, be32_to_cpu(agi->agi_level),
agno, 0, scan_inobt, 1, magic, &priv,
be32_to_cpu(agi->agi_root), agno);
}
- if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ if (xfs_has_finobt(mp)) {
bno = be32_to_cpu(agi->agi_free_root);
if (libxfs_verify_agbno(mp, agno, bno)) {
- magic = xfs_sb_version_hascrc(&mp->m_sb) ?
+ magic = xfs_has_crc(mp) ?
XFS_FIBT_CRC_MAGIC : XFS_FIBT_MAGIC;
scan_sbtree(bno, be32_to_cpu(agi->agi_free_level),
agno, 0, scan_inobt, 1, magic, &priv,
}
}
- if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
+ if (xfs_has_inobtcounts(mp)) {
if (be32_to_cpu(agi->agi_iblocks) != priv.ino_blocks)
do_warn(_("bad inobt block count %u, saw %u\n"),
be32_to_cpu(agi->agi_iblocks),
be32_to_cpu(agi->agi_freecount), agcnts->agifreecount, agno);
}
- if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+ if (xfs_has_finobt(mp) &&
be32_to_cpu(agi->agi_freecount) != agcnts->fibtfreecount) {
do_warn(_("agi_freecount %u, counted %u in ag %u finobt\n"),
be32_to_cpu(agi->agi_freecount), agcnts->fibtfreecount,
update_sb_version(
struct xfs_mount *mp)
{
- if (fs_attributes && !xfs_sb_version_hasattr(&mp->m_sb))
+ if (fs_attributes && !xfs_has_attr(mp))
xfs_sb_version_addattr(&mp->m_sb);
- if (fs_attributes2 && !xfs_sb_version_hasattr2(&mp->m_sb))
+ if (fs_attributes2 && !xfs_has_attr2(mp))
xfs_sb_version_addattr2(&mp->m_sb);
/* V2 inode conversion is now always going to happen */
* have quotas.
*/
if (fs_quotas) {
- if (!xfs_sb_version_hasquota(&mp->m_sb))
+ if (!xfs_has_quota(mp))
xfs_sb_version_addquota(&mp->m_sb);
/*
} else {
mp->m_sb.sb_qflags = 0;
- if (xfs_sb_version_hasquota(&mp->m_sb)) {
+ if (xfs_has_quota(mp)) {
lost_quotas = 1;
mp->m_sb.sb_versionnum &= ~XFS_SB_VERSION_QUOTABIT;
}
}
- if (!fs_aligned_inodes && xfs_sb_version_hasalign(&mp->m_sb))
+ if (!fs_aligned_inodes && xfs_has_align(mp))
mp->m_sb.sb_versionnum &= ~XFS_SB_VERSION_ALIGNBIT;
mp->m_features &= ~(XFS_FEAT_QUOTA | XFS_FEAT_ALIGN);
return 1;
}
- if (xfs_sb_version_hasattr(&mp->m_sb))
+ if (xfs_has_attr(mp))
fs_attributes = 1;
- if (xfs_sb_version_hasattr2(&mp->m_sb))
+ if (xfs_has_attr2(mp))
fs_attributes2 = 1;
if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
}
}
- if (xfs_sb_version_hasquota(&mp->m_sb)) {
+ if (xfs_has_quota(mp)) {
fs_quotas = 1;
if (mp->m_sb.sb_uquotino != 0 && mp->m_sb.sb_uquotino != NULLFSINO)
have_pquotino = 1;
}
- if (xfs_sb_version_hasalign(&mp->m_sb)) {
+ if (xfs_has_align(mp)) {
fs_aligned_inodes = 1;
fs_ino_alignment = mp->m_sb.sb_inoalignmt;
}
xfs_daddr_t logblocks;
int logversion;
- if (!xfs_sb_version_hascrc(&mp->m_sb))
+ if (!xfs_has_crc(mp))
return;
/*
new_cycle = max_cycle + 3;
logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart);
logblocks = XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
- logversion = xfs_sb_version_haslogv2(&mp->m_sb) ? 2 : 1;
+ logversion = xfs_has_logv2(mp) ? 2 : 1;
do_warn(_("Maximum metadata LSN (%d:%d) is ahead of log (%d:%d).\n"),
max_cycle, max_block, log->l_curr_cycle, log->l_curr_block);
struct xfs_buf *bp;
int error;
- if (!xfs_sb_version_hascrc(&mp->m_sb) ||
+ if (!xfs_has_crc(mp) ||
xfs_sb_version_needsrepair(&mp->m_sb))
return;
mp->m_flags |= LIBXFS_MOUNT_WANT_CORRUPTED;
/* Capture the first writeback so that we can set needsrepair. */
- if (xfs_sb_version_hascrc(&mp->m_sb))
+ if (xfs_has_crc(mp))
mp->m_buf_writeback_fn = repair_capture_writeback;
/*