#
# Tracing flags:
-# -DXR_BMAP_DBG incore block bitmap debugging
# -DXR_INODE_TRACE inode processing
-# -DXR_BMAP_TRACE bmap btree processing
# -DXR_DIR_TRACE directory processing
# -DXR_DUP_TRACE duplicate extent processing
# -DXR_BCNT_TRACE incore bcnt freespace btree building
pthread_mutex_lock(&ag_locks[agno]);
- switch (state = get_agbno_state(mp, agno, agbno)) {
+ state = get_bmap(agno, agbno);
+ switch (state) {
case XR_E_INO:
do_warn(
_("uncertain inode block %d/%d already known\n"),
case XR_E_UNKNOWN:
case XR_E_FREE1:
case XR_E_FREE:
- set_agbno_state(mp, agno, agbno, XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
break;
case XR_E_MULT:
case XR_E_INUSE:
do_warn(
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, agbno, state);
- set_agbno_state(mp, agno, agbno, XR_E_MULT);
+ set_bmap(agno, agbno, XR_E_MULT);
pthread_mutex_unlock(&ag_locks[agno]);
return(0);
default:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
agno, agbno, state);
- set_agbno_state(mp, agno, agbno, XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
break;
}
pthread_mutex_lock(&ag_locks[agno]);
for (j = 0, cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno; cur_agbno++) {
- switch (state = get_agbno_state(mp, agno, cur_agbno)) {
+ state = get_bmap(agno, cur_agbno);
+ switch (state) {
case XR_E_MULT:
case XR_E_INUSE:
case XR_E_INUSE_FS:
do_warn(
_("inode block %d/%d multiply claimed, (state %d)\n"),
agno, cur_agbno, state);
- set_agbno_state(mp, agno, cur_agbno, XR_E_MULT);
+ set_bmap(agno, cur_agbno, XR_E_MULT);
j = 1;
break;
case XR_E_INO:
for (cur_agbno = chunk_start_agbno;
cur_agbno < chunk_stop_agbno; cur_agbno++) {
- switch (state = get_agbno_state(mp, agno, cur_agbno)) {
+ state = get_bmap(agno, cur_agbno);
+ switch (state) {
case XR_E_INO:
do_error(
_("uncertain inode block %llu already known\n"),
case XR_E_UNKNOWN:
case XR_E_FREE1:
case XR_E_FREE:
- set_agbno_state(mp, agno, cur_agbno, XR_E_INO);
+ set_bmap(agno, cur_agbno, XR_E_INO);
break;
case XR_E_MULT:
case XR_E_INUSE:
do_warn(
_("inode block %d/%d bad state, (state %d)\n"),
agno, cur_agbno, state);
- set_agbno_state(mp, agno, cur_agbno, XR_E_INO);
+ set_bmap(agno, cur_agbno, XR_E_INO);
break;
}
}
* mark block as an inode block in the incore bitmap
*/
pthread_mutex_lock(&ag_locks[agno]);
- switch (state = get_agbno_state(mp, agno, agbno)) {
- case XR_E_INO: /* already marked */
- break;
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_agbno_state(mp, agno, agbno, XR_E_INO);
- break;
- case XR_E_BAD_STATE:
- do_error(_("bad state in block map %d\n"), state);
- break;
- default:
- set_agbno_state(mp, agno, agbno, XR_E_MULT);
- do_warn(_("inode block %llu multiply claimed, state was %d\n"),
- XFS_AGB_TO_FSB(mp, agno, agbno), state);
- break;
+ state = get_bmap(agno, agbno);
+ switch (state) {
+ case XR_E_INO: /* already marked */
+ break;
+ case XR_E_UNKNOWN:
+ case XR_E_FREE:
+ case XR_E_FREE1:
+ set_bmap(agno, agbno, XR_E_INO);
+ break;
+ case XR_E_BAD_STATE:
+ do_error(_("bad state in block map %d\n"), state);
+ break;
+ default:
+ set_bmap(agno, agbno, XR_E_MULT);
+ do_warn(_("inode block %llu multiply claimed, state was %d\n"),
+ XFS_AGB_TO_FSB(mp, agno, agbno), state);
+ break;
}
pthread_mutex_unlock(&ag_locks[agno]);
agbno++;
pthread_mutex_lock(&ag_locks[agno]);
- switch (state = get_agbno_state(mp, agno, agbno)) {
+ state = get_bmap(agno, agbno);
+ switch (state) {
case XR_E_INO: /* already marked */
break;
case XR_E_UNKNOWN:
case XR_E_FREE:
case XR_E_FREE1:
- set_agbno_state(mp, agno, agbno, XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
break;
case XR_E_BAD_STATE:
do_error(_("bad state in block map %d\n"),
state);
break;
default:
- set_agbno_state(mp, agno, agbno, XR_E_MULT);
+ set_bmap(agno, agbno, XR_E_MULT);
do_warn(_("inode block %llu multiply claimed, "
"state was %d\n"),
XFS_AGB_TO_FSB(mp, agno, agbno), state);
continue;
}
- state = get_rtbno_state(mp, ext);
-
+ state = get_rtbmap(ext);
switch (state) {
- case XR_E_FREE:
- case XR_E_UNKNOWN:
- set_rtbno_state(mp, ext, XR_E_INUSE);
+ case XR_E_FREE:
+ case XR_E_UNKNOWN:
+ set_rtbmap(ext, XR_E_INUSE);
+ break;
+ case XR_E_BAD_STATE:
+ do_error(_("bad state in rt block map %llu\n"), ext);
+ case XR_E_FS_MAP:
+ case XR_E_INO:
+ case XR_E_INUSE_FS:
+ do_error(_("data fork in rt inode %llu found "
+ "metadata block %llu in rt bmap\n"),
+ ino, ext);
+ case XR_E_INUSE:
+ if (pwe)
break;
-
- case XR_E_BAD_STATE:
- do_error(_("bad state in rt block map %llu\n"),
- ext);
-
- case XR_E_FS_MAP:
- case XR_E_INO:
- case XR_E_INUSE_FS:
- do_error(_("data fork in rt inode %llu found "
- "metadata block %llu in rt bmap\n"),
+ case XR_E_MULT:
+ set_rtbmap(ext, XR_E_MULT);
+ do_warn(_("data fork in rt inode %llu claims "
+ "used rt block %llu\n"),
ino, ext);
-
- case XR_E_INUSE:
- if (pwe)
- break;
-
- case XR_E_MULT:
- set_rtbno_state(mp, ext, XR_E_MULT);
- do_warn(_("data fork in rt inode %llu claims "
- "used rt block %llu\n"),
- ino, ext);
- return 1;
-
- case XR_E_FREE1:
- default:
- do_error(_("illegal state %d in rt block map "
- "%llu\n"), state, b);
+ return 1;
+ case XR_E_FREE1:
+ default:
+ do_error(_("illegal state %d in rt block map "
+ "%llu\n"), state, b);
}
}
}
- state = get_agbno_state(mp, agno, agbno);
-
+ state = get_bmap(agno, agbno);
switch (state) {
case XR_E_FREE:
case XR_E_FREE1:
forkname, ino, (__uint64_t) b);
/* fall through ... */
case XR_E_UNKNOWN:
- set_agbno_state(mp, agno, agbno, XR_E_INUSE);
+ set_bmap(agno, agbno, XR_E_INUSE);
break;
case XR_E_BAD_STATE:
case XR_E_INUSE:
case XR_E_MULT:
- set_agbno_state(mp, agno, agbno, XR_E_MULT);
+ set_bmap(agno, agbno, XR_E_MULT);
do_warn(_("%s fork in %s inode %llu claims "
"used block %llu\n"),
forkname, ftype, ino, (__uint64_t) b);
logend = mp->m_sb.sb_logstart + mp->m_sb.sb_logblocks;
for (i = mp->m_sb.sb_logstart; i < logend ; i++) {
- set_fsbno_state(mp, i, XR_E_INUSE_FS);
+ set_bmap(XFS_FSB_TO_AGNO(mp, i),
+ XFS_FSB_TO_AGBNO(mp, i), XR_E_INUSE_FS);
}
return;
for (i = 0; i < mp->m_sb.sb_agcount; i++)
for (j = 0; j < end; j++)
- set_agbno_state(mp, i, j, XR_E_INUSE_FS);
+ set_bmap(i, j, XR_E_INUSE_FS);
return;
}
* account for btree roots
*/
for (j = begin; j < end; j++)
- set_agbno_state(mp, i, j, XR_E_INUSE_FS);
+ set_bmap(i, j, XR_E_INUSE_FS);
}
return;
return;
}
-
-#if defined(XR_BMAP_TRACE) || defined(XR_BMAP_DBG)
-int
-get_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
- xfs_agblock_t ag_blockno)
-{
- __uint64_t *addr;
-
- addr = ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM;
-
- return((*addr >> (((ag_blockno)%XR_BB_NUM)*XR_BB)) & XR_BB_MASK);
-}
-
-void set_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
- xfs_agblock_t ag_blockno, int state)
-{
- __uint64_t *addr;
-
- addr = ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM;
-
- *addr = (((*addr) &
- (~((__uint64_t) XR_BB_MASK << (((ag_blockno)%XR_BB_NUM)*XR_BB)))) |
- (((__uint64_t) (state)) << (((ag_blockno)%XR_BB_NUM)*XR_BB)));
-}
-
-int
-get_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno)
-{
- return(get_agbno_state(mp, XFS_FSB_TO_AGNO(mp, blockno),
- XFS_FSB_TO_AGBNO(mp, blockno)));
-}
-
-void
-set_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno, int state)
-{
- set_agbno_state(mp, XFS_FSB_TO_AGNO(mp, blockno),
- XFS_FSB_TO_AGBNO(mp, blockno), state);
-
- return;
-}
-#endif
* you want to use the regular block map.
*/
-#if defined(XR_BMAP_TRACE) || defined(XR_BMAP_DBG)
-/*
- * implemented as functions for debugging purposes
- */
-int get_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
- xfs_agblock_t ag_blockno);
-void set_agbno_state(xfs_mount_t *mp, xfs_agnumber_t agno,
- xfs_agblock_t ag_blockno, int state);
-
-int get_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno);
-void set_fsbno_state(xfs_mount_t *mp, xfs_dfsbno_t blockno, int state);
-#else
-/*
- * implemented as macros for performance purposes
- */
-
-#define get_agbno_state(mp, agno, ag_blockno) \
+#define get_bmap(agno, ag_blockno) \
((int) (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) \
>> (((ag_blockno)%XR_BB_NUM)*XR_BB)) \
& XR_BB_MASK)
-#define set_agbno_state(mp, agno, ag_blockno, state) \
+#define set_bmap(agno, ag_blockno, state) \
*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) = \
((*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM) & \
(~((__uint64_t) XR_BB_MASK << (((ag_blockno)%XR_BB_NUM)*XR_BB)))) | \
(((__uint64_t) (state)) << (((ag_blockno)%XR_BB_NUM)*XR_BB)))
-#define get_fsbno_state(mp, blockno) \
- get_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
- XFS_FSB_TO_AGBNO(mp, (blockno)))
-#define set_fsbno_state(mp, blockno, state) \
- set_agbno_state(mp, XFS_FSB_TO_AGNO(mp, (blockno)), \
- XFS_FSB_TO_AGBNO(mp, (blockno)), (state))
-
-
-#define get_agbno_rec(mp, agno, ag_blockno) \
- (*(ba_bmap[(agno)] + (ag_blockno)/XR_BB_NUM))
-#endif /* XR_BMAP_TRACE */
-
/*
* these work in real-time extents (e.g. fsbno == rt extent number)
*/
-#define get_rtbno_state(mp, fsbno) \
+#define get_rtbmap(fsbno) \
((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) >> \
(((fsbno)%XR_BB_NUM)*XR_BB)) & XR_BB_MASK)
-#define set_rtbno_state(mp, fsbno, state) \
+#define set_rtbmap(fsbno, state) \
*(rt_ba_bmap + (fsbno)/XR_BB_NUM) = \
((*(rt_ba_bmap + (fsbno)/XR_BB_NUM) & \
(~((__uint64_t) XR_BB_MASK << (((fsbno)%XR_BB_NUM)*XR_BB)))) | \
* also mark blocks
*/
for (b = 0; b < mp->m_ialloc_blks; b++) {
- set_agbno_state(mp, 0,
+ set_bmap(0,
b + XFS_INO_TO_AGBNO(mp, mp->m_sb.sb_rootino),
XR_E_INO);
}
agbno = XFS_AGINO_TO_AGBNO(mp, current_ino);
pthread_mutex_lock(&ag_locks[agno]);
- switch (state = get_agbno_state(mp,
- agno, agbno)) {
- case XR_E_UNKNOWN:
- case XR_E_FREE:
- case XR_E_FREE1:
- set_agbno_state(mp, agno, agbno,
- XR_E_INO);
- break;
+ state = get_bmap(agno, agbno);
+ switch (state) {
case XR_E_BAD_STATE:
do_error(_(
"bad state in block map %d\n"),
* anyway, hopefully without
* losing too much other data
*/
- set_agbno_state(mp, agno, agbno,
- XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
break;
}
pthread_mutex_unlock(&ag_locks[agno]);
}
}
- bstate = get_agbno_state(mp, i, j);
-
+ bstate = get_bmap(i, j);
switch (bstate) {
case XR_E_BAD_STATE:
default:
rt_len = 0;
for (bno = 0; bno < mp->m_sb.sb_rextents; bno++) {
-
- bstate = get_rtbno_state(mp, bno);
-
+ bstate = get_rtbmap(bno);
switch (bstate) {
case XR_E_BAD_STATE:
default:
roundup((mp->m_sb.sb_agblocks+(NBBY/XR_BB)-1)/(NBBY/XR_BB),
sizeof(__uint64_t)));
for (j = 0; j < ag_hdr_block; j++)
- set_agbno_state(mp, i, j, XR_E_INUSE_FS);
+ set_bmap(i, j, XR_E_INUSE_FS);
}
set_bmap_rt(mp->m_sb.sb_rextents);
set_bmap_log(mp);
for (agbno = 0; agbno < ag_end; agbno++) {
#if 0
old_state = state;
- state = get_agbno_state(mp, agno, agbno);
+ state = get_bmap(agno, agbno);
if (state != old_state) {
fprintf(stderr, "agbno %u - new state is %d\n",
agbno, state);
}
}
- if (get_agbno_state(mp, agno, agbno) < XR_E_INUSE) {
+ if (get_bmap(agno, agbno) < XR_E_INUSE) {
free_blocks++;
if (in_extent == 0) {
/*
bits = 0;
for (i = 0; i < sizeof(xfs_rtword_t) * NBBY &&
extno < mp->m_sb.sb_rextents; i++, extno++) {
- if (get_rtbno_state(mp, extno) == XR_E_FREE) {
+ if (get_rtbmap(extno) == XR_E_FREE) {
sb_frextents++;
bits |= freebit;
bit < bitsperblock && extno < mp->m_sb.sb_rextents;
bit++, extno++) {
if (xfs_isset(words, bit)) {
- set_rtbno_state(mp, extno, XR_E_FREE);
+ set_rtbmap(extno, XR_E_FREE);
sb_frextents++;
if (prevbit == 0) {
start_bmbno = bmbno;
xfs_dfiloff_t last_key;
char *forkname;
int numrecs;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int state;
if (whichfork == XFS_DATA_FORK)
forkname = _("data");
bm_cursor->level[level].right_fsbno =
be64_to_cpu(block->bb_u.l.bb_rightsib);
- switch (get_fsbno_state(mp, bno)) {
+ agno = XFS_FSB_TO_AGNO(mp, bno);
+ agbno = XFS_FSB_TO_AGBNO(mp, bno);
+
+ state = get_bmap(agno, agbno);
+ switch (state) {
case XR_E_UNKNOWN:
case XR_E_FREE1:
case XR_E_FREE:
- set_fsbno_state(mp, bno, XR_E_INUSE);
+ set_bmap(agno, agbno, XR_E_INUSE);
break;
case XR_E_FS_MAP:
case XR_E_INUSE:
* we made it here, the block probably
* contains btree data.
*/
- set_fsbno_state(mp, bno, XR_E_MULT);
+ set_bmap(agno, agbno, XR_E_MULT);
do_warn(
_("inode 0x%llx bmap block 0x%llx claimed, state is %d\n"),
- ino, (__uint64_t) bno,
- get_fsbno_state(mp, bno));
+ ino, (__uint64_t) bno, state);
break;
case XR_E_MULT:
case XR_E_INUSE_FS:
- set_fsbno_state(mp, bno, XR_E_MULT);
+ set_bmap(agno, agbno, XR_E_MULT);
do_warn(
_("inode 0x%llx bmap block 0x%llx claimed, state is %d\n"),
- ino, (__uint64_t) bno,
- get_fsbno_state(mp, bno));
+ ino, (__uint64_t) bno, state);
/*
* if we made it to here, this is probably a bmap block
* that is being used by *another* file as a bmap block
default:
do_warn(
_("bad state %d, inode 0x%llx bmap block 0x%llx\n"),
- get_fsbno_state(mp, bno),
- ino, (__uint64_t) bno);
+ state, ino, (__uint64_t) bno);
break;
}
} else {
/*
* check for btree blocks multiply claimed
*/
- state = get_agbno_state(mp, agno, bno);
-
- switch (state) {
- case XR_E_UNKNOWN:
- set_agbno_state(mp, agno, bno, XR_E_FS_MAP);
- break;
- default:
- set_agbno_state(mp, agno, bno, XR_E_MULT);
+ state = get_bmap(agno, bno);
+ switch (state != XR_E_UNKNOWN) {
+ set_bmap(agno, bno, XR_E_MULT);
do_warn(
_("%s freespace btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
name, state, agno, bno, suspect);
return;
}
+ set_bmap(agno, bno, XR_E_FS_MAP);
numrecs = be16_to_cpu(block->bb_numrecs);
continue;
for ( ; b < end; b++) {
- state = get_agbno_state(mp, agno, b);
+ state = get_bmap(agno, b);
switch (state) {
case XR_E_UNKNOWN:
- set_agbno_state(mp, agno, b,
- XR_E_FREE1);
+ set_bmap(agno, b, XR_E_FREE1);
break;
case XR_E_FREE1:
/*
* FREE1 blocks later
*/
if (magic == XFS_ABTC_MAGIC) {
- set_agbno_state(mp, agno, b,
- XR_E_FREE);
+ set_bmap(agno, b, XR_E_FREE);
break;
}
default:
j < XFS_INODES_PER_CHUNK;
j += mp->m_sb.sb_inopblock) {
agbno = XFS_AGINO_TO_AGBNO(mp, ino + j);
- state = get_agbno_state(mp, agno, agbno);
+
+ state = get_bmap(agno, agbno);
if (state == XR_E_UNKNOWN) {
- set_agbno_state(mp, agno, agbno, XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
} else if (state == XR_E_INUSE_FS && agno == 0 &&
ino + j >= first_prealloc_ino &&
ino + j < last_prealloc_ino) {
- set_agbno_state(mp, agno, agbno, XR_E_INO);
+ set_bmap(agno, agbno, XR_E_INO);
} else {
do_warn(
_("inode chunk claims used block, inobt block - agno %d, bno %d, inopb %d\n"),
* check for btree blocks multiply claimed, any unknown/free state
* is ok in the bitmap block.
*/
- state = get_agbno_state(mp, agno, bno);
-
+ state = get_bmap(agno, bno);
switch (state) {
case XR_E_UNKNOWN:
case XR_E_FREE1:
case XR_E_FREE:
- set_agbno_state(mp, agno, bno, XR_E_FS_MAP);
+ set_bmap(agno, bno, XR_E_FS_MAP);
break;
default:
- set_agbno_state(mp, agno, bno, XR_E_MULT);
+ set_bmap(agno, bno, XR_E_MULT);
do_warn(
_("inode btree block claimed (state %d), agno %d, bno %d, suspect %d\n"),
state, agno, bno, suspect);
if (XFS_SB_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
XFS_AGF_BLOCK(mp) != XFS_AGFL_BLOCK(mp) &&
XFS_AGI_BLOCK(mp) != XFS_AGFL_BLOCK(mp))
- set_agbno_state(mp, agno, XFS_AGFL_BLOCK(mp), XR_E_FS_MAP);
+ set_bmap(agno, XFS_AGFL_BLOCK(mp), XR_E_FS_MAP);
if (be32_to_cpu(agf->agf_flcount) == 0)
return;
for (;;) {
bno = be32_to_cpu(agfl->agfl_bno[i]);
if (verify_agbno(mp, agno, bno))
- set_agbno_state(mp, agno, bno, XR_E_FREE);
+ set_bmap(agno, bno, XR_E_FREE);
else
do_warn(_("bad agbno %u in agfl, agno %d\n"),
bno, agno);