+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libxfs.h"
xfs_agino_t freecount;
};
-static __uint64_t *sb_icount_ag; /* allocated inodes per ag */
-static __uint64_t *sb_ifree_ag; /* free inodes per ag */
-static __uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
+static uint64_t *sb_icount_ag; /* allocated inodes per ag */
+static uint64_t *sb_ifree_ag; /* free inodes per ag */
+static uint64_t *sb_fdblocks_ag; /* free data blocks per ag */
static int
mk_incore_fstree(xfs_mount_t *mp, xfs_agnumber_t agno)
* as they will be *after* accounting for the free space
* we've used up will need fewer blocks to to represent
* than we've allocated. We can use the AGFL to hold
- * XFS_AGFL_SIZE (sector/xfs_agfl_t) blocks but that's it.
- * Thus we limit things to XFS_AGFL_SIZE/2 for each of the 2 btrees.
+ * xfs_agfl_size (sector/xfs_agfl_t) blocks but that's it.
+ * Thus we limit things to xfs_agfl_size/2 for each of the 2 btrees.
* if the number of extra blocks is more than that,
* we'll have to be called again.
*/
!= btree_curs->level[0].num_blocks) {
/*
* yes -- recalculate the cursor. If the number of
- * excess (overallocated) blocks is < XFS_AGFL_SIZE/2, we're ok.
+ * excess (overallocated) blocks is < xfs_agfl_size/2, we're ok.
* we can put those into the AGFL. we don't try
* and get things to converge exactly (reach a
* state with zero excess blocks) because there
return(extra_blocks);
}
+/* Map btnum to buffer ops for the types that need it. */
+static const struct xfs_buf_ops *
+btnum_to_ops(
+ xfs_btnum_t btnum)
+{
+ switch (btnum) {
+ case XFS_BTNUM_BNO:
+ case XFS_BTNUM_CNT:
+ return &xfs_allocbt_buf_ops;
+ case XFS_BTNUM_INO:
+ case XFS_BTNUM_FINO:
+ return &xfs_inobt_buf_ops;
+ case XFS_BTNUM_RMAP:
+ return &xfs_rmapbt_buf_ops;
+ case XFS_BTNUM_REFC:
+ return &xfs_refcountbt_buf_ops;
+ default:
+ ASSERT(0);
+ return NULL;
+ }
+}
+
static void
prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
bt_status_t *btree_curs, xfs_agblock_t startblock,
- xfs_extlen_t blockcount, int level, __uint32_t magic)
+ xfs_extlen_t blockcount, int level, xfs_btnum_t btnum)
{
struct xfs_btree_block *bt_hdr;
xfs_alloc_key_t *bt_key;
xfs_alloc_ptr_t *bt_ptr;
xfs_agblock_t agbno;
bt_stat_level_t *lptr;
- __uint32_t crc_magic;
+ const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
- if (magic == XFS_ABTB_MAGIC)
- crc_magic = XFS_ABTB_CRC_MAGIC;
- else
- crc_magic = XFS_ABTC_CRC_MAGIC;
+ ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
level++;
* left-hand side of the tree.
*/
prop_freespace_cursor(mp, agno, btree_curs, startblock,
- blockcount, level, magic);
+ blockcount, level, btnum);
}
if (be16_to_cpu(bt_hdr->bb_numrecs) ==
/*
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, crc_magic, level,
- 0, agno, XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, magic, level,
- 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum, level,
+ 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
* propagate extent record for first extent in new block up
*/
prop_freespace_cursor(mp, agno, btree_curs, startblock,
- blockcount, level, magic);
+ blockcount, level, btnum);
}
/*
* add extent info to current block
}
/*
- * rebuilds a freespace tree given a cursor and magic number of type
+ * rebuilds a freespace tree given a cursor and type
* of tree to build (bno or bcnt). returns the number of free blocks
* represented by the tree.
*/
static xfs_extlen_t
build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
- bt_status_t *btree_curs, __uint32_t magic)
+ bt_status_t *btree_curs, xfs_btnum_t btnum)
{
xfs_agnumber_t i;
xfs_agblock_t j;
extent_tree_node_t *ext_ptr;
bt_stat_level_t *lptr;
xfs_extlen_t freeblks;
- __uint32_t crc_magic;
+ const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
+
+ ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
#ifdef XR_BLD_FREE_TRACE
fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
freeblks = 0;
ASSERT(level > 0);
- if (magic == XFS_ABTB_MAGIC)
- crc_magic = XFS_ABTB_CRC_MAGIC;
- else
- crc_magic = XFS_ABTC_CRC_MAGIC;
/*
* initialize the first block on each btree level
/*
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, crc_magic, i,
- 0, agno, XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, magic, i,
- 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
}
/*
* run along leaf, setting up records. as we have to switch
* pointers for the parent. that can recurse up to the root
* if required. set the sibling pointers for leaf level here.
*/
- if (magic == XFS_ABTB_MAGIC)
+ if (btnum == XFS_BTNUM_BNO)
ext_ptr = findfirst_bno_extent(agno);
else
ext_ptr = findfirst_bcnt_extent(agno);
/*
* block initialization, lay in block header
*/
- lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, crc_magic, 0,
- 0, agno, XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, magic, 0,
- 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
prop_freespace_cursor(mp, agno, btree_curs,
ext_ptr->ex_startblock,
ext_ptr->ex_blockcount,
- 0, magic);
+ 0, btnum);
bt_rec = (xfs_alloc_rec_t *)
((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
bt_rec[j].ar_blockcount = cpu_to_be32(
ext_ptr->ex_blockcount);
freeblks += ext_ptr->ex_blockcount;
- if (magic == XFS_ABTB_MAGIC)
+ if (btnum == XFS_BTNUM_BNO)
ext_ptr = findnext_bno_extent(ext_ptr);
else
ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
*/
static void
init_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
- __uint64_t *num_inos, __uint64_t *num_free_inos, int finobt)
+ uint64_t *num_inos, uint64_t *num_free_inos, int finobt)
{
- __uint64_t ninos;
- __uint64_t nfinos;
+ uint64_t ninos;
+ uint64_t nfinos;
int rec_nfinos;
int rec_ninos;
ino_tree_node_t *ino_rec;
static void
prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
- xfs_agino_t startino, int level)
+ xfs_btnum_t btnum, xfs_agino_t startino, int level)
{
struct xfs_btree_block *bt_hdr;
xfs_inobt_key_t *bt_key;
xfs_inobt_ptr_t *bt_ptr;
xfs_agblock_t agbno;
bt_stat_level_t *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
level++;
* first path up the left side of the tree
* where the agbno's are already set up
*/
- prop_ino_cursor(mp, agno, btree_curs, startino, level);
+ prop_ino_cursor(mp, agno, btree_curs, btnum, startino, level);
}
if (be16_to_cpu(bt_hdr->bb_numrecs) ==
/*
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_CRC_MAGIC,
- level, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_MAGIC,
- level, 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum,
+ level, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
/*
* propagate extent record for first extent in new block up
*/
- prop_ino_cursor(mp, agno, btree_curs, startino, level);
+ prop_ino_cursor(mp, agno, btree_curs, btnum, startino, level);
}
/*
* add inode info to current block
*/
static void
build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
- bt_status_t *btree_curs, __uint32_t magic,
- struct agi_stat *agi_stat, int finobt)
+ bt_status_t *btree_curs, xfs_btnum_t btnum,
+ struct agi_stat *agi_stat)
{
xfs_agnumber_t i;
xfs_agblock_t j;
xfs_inobt_rec_t *bt_rec;
ino_tree_node_t *ino_rec;
bt_stat_level_t *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(btnum);
xfs_agino_t count = 0;
xfs_agino_t freecount = 0;
int inocnt;
uint64_t sparse;
uint16_t holemask;
+ ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
+
for (i = 0; i < level; i++) {
lptr = &btree_curs->level[i];
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, magic,
- i, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, magic,
- i, 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum, i, 0, agno, 0);
}
/*
* pointers for the parent. that can recurse up to the root
* if required. set the sibling pointers for leaf level here.
*/
- if (finobt)
+ if (btnum == XFS_BTNUM_FINO)
ino_rec = findfirst_free_inode_rec(agno);
else
ino_rec = findfirst_inode_rec(agno);
/*
* block initialization, lay in block header
*/
- lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- libxfs_btree_init_block(mp, lptr->buf_p, magic,
- 0, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
- else
- libxfs_btree_init_block(mp, lptr->buf_p, magic,
- 0, 0, agno, 0);
+ libxfs_btree_init_block(mp, lptr->buf_p, btnum, 0, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
lptr->modulo--;
if (lptr->num_recs_pb > 0)
- prop_ino_cursor(mp, agno, btree_curs,
+ prop_ino_cursor(mp, agno, btree_curs, btnum,
ino_rec->ino_startnum, 0);
bt_rec = (xfs_inobt_rec_t *)
freecount += finocnt;
count += inocnt;
- if (finobt)
+ if (btnum == XFS_BTNUM_FINO)
ino_rec = next_free_ino_rec(ino_rec);
else
ino_rec = next_ino_rec(ino_rec);
xfs_rmap_ptr_t *bt_ptr;
xfs_agblock_t agbno;
struct bt_stat_level *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_RMAP);
level++;
/*
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_RMAP_CRC_MAGIC,
- level, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
+ level, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
bt_key->rm_offset = cpu_to_be64(
libxfs_rmap_irec_offset_pack(&high_key));
- for (i = 1; i < numrecs - 1; i++) {
+ for (i = 1; i <= numrecs; i++) {
bt_key = XFS_RMAP_HIGH_KEY_ADDR(bt_hdr, i);
key.rm_startblock = be32_to_cpu(bt_key->rm_startblock);
key.rm_owner = be64_to_cpu(bt_key->rm_owner);
struct xfs_rmap_irec highest_key = {0};
struct xfs_rmap_irec hi_key = {0};
struct bt_stat_level *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_RMAP);
+ int numrecs;
int level = btree_curs->num_levels;
int error;
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_RMAP_CRC_MAGIC,
- i, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
+ i, 0, agno, 0);
}
/*
rm_rec = pop_slab_cursor(rmap_cur);
lptr = &btree_curs->level[0];
- for (i = 0; i < lptr->num_blocks && rm_rec != NULL; i++) {
+ for (i = 0; i < lptr->num_blocks; i++) {
+ numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
+ ASSERT(rm_rec != NULL || numrecs == 0);
+
/*
* block initialization, lay in block header
*/
- lptr->buf_p->b_ops = &xfs_rmapbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_RMAP_CRC_MAGIC,
- 0, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_RMAP,
+ 0, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
- (lptr->modulo > 0));
+ bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
if (lptr->modulo > 0)
lptr->modulo--;
xfs_refcount_ptr_t *bt_ptr;
xfs_agblock_t agbno;
struct bt_stat_level *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_REFC);
level++;
/*
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_REFC_CRC_MAGIC,
- level, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
+ level, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
struct xfs_slab_cursor *refc_cur;
struct xfs_refcount_rec *bt_rec;
struct bt_stat_level *lptr;
+ const struct xfs_buf_ops *ops = btnum_to_ops(XFS_BTNUM_REFC);
+ int numrecs;
int level = btree_curs->num_levels;
int error;
* initialize block header
*/
- lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_REFC_CRC_MAGIC,
- i, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
+ i, 0, agno, 0);
}
/*
lptr = &btree_curs->level[0];
for (i = 0; i < lptr->num_blocks; i++) {
+ numrecs = lptr->num_recs_pb + (lptr->modulo > 0);
+ ASSERT(refc_rec != NULL || numrecs == 0);
+
/*
* block initialization, lay in block header
*/
- lptr->buf_p->b_ops = &xfs_refcountbt_buf_ops;
+ lptr->buf_p->b_ops = ops;
bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
- libxfs_btree_init_block(mp, lptr->buf_p, XFS_REFC_CRC_MAGIC,
- 0, 0, agno,
- XFS_BTREE_CRC_BLOCKS);
+ libxfs_btree_init_block(mp, lptr->buf_p, XFS_BTNUM_REFC,
+ 0, 0, agno, 0);
bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
- (lptr->modulo > 0));
+ bt_hdr->bb_numrecs = cpu_to_be16(numrecs);
if (lptr->modulo > 0)
lptr->modulo--;
memset(agf, 0, mp->m_sb.sb_sectsize);
#ifdef XR_BLD_FREE_TRACE
- fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n",
+ fprintf(stderr, "agf = %p, agf_buf->b_addr = %p\n",
agf, agf_buf->b_addr);
#endif
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(agno);
platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
- for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
+ for (i = 0; i < libxfs_agfl_size(mp); i++)
agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
}
freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);
* yes, now grab as many blocks as we can
*/
i = 0;
- while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
+ while (bno_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
+ {
freelist[i] = cpu_to_be32(
get_next_blockaddr(agno, 0, bno_bt));
i++;
}
- while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) {
+ while (bcnt_bt->num_free_blocks > 0 && i < libxfs_agfl_size(mp))
+ {
freelist[i] = cpu_to_be32(
get_next_blockaddr(agno, 0, bcnt_bt));
i++;
} else {
agf->agf_flfirst = 0;
- agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+ agf->agf_fllast = cpu_to_be32(libxfs_agfl_size(mp) - 1);
agf->agf_flcount = 0;
}
xfs_agnumber_t agno,
struct xfs_slab *lost_fsb)
{
- __uint64_t num_inos;
- __uint64_t num_free_inos;
- __uint64_t finobt_num_inos;
- __uint64_t finobt_num_free_inos;
+ uint64_t num_inos;
+ uint64_t num_free_inos;
+ uint64_t finobt_num_inos;
+ uint64_t finobt_num_free_inos;
bt_status_t bno_btree_curs;
bt_status_t bcnt_btree_curs;
bt_status_t ino_btree_curs;
xfs_extlen_t freeblks2;
#endif
xfs_agblock_t num_extents;
- __uint32_t magic;
struct agi_stat agi_stat = {0,};
int error;
/*
* see if we can fit all the extra blocks into the AGFL
*/
- extra_blocks = (extra_blocks - XFS_AGFL_SIZE(mp) > 0)
- ? extra_blocks - XFS_AGFL_SIZE(mp)
+ extra_blocks = (extra_blocks - libxfs_agfl_size(mp) > 0)
+ ? extra_blocks - libxfs_agfl_size(mp)
: 0;
if (extra_blocks > 0)
* now rebuild the freespace trees
*/
freeblks1 = build_freespace_tree(mp, agno,
- &bno_btree_curs, XFS_ABTB_MAGIC);
+ &bno_btree_curs, XFS_BTNUM_BNO);
#ifdef XR_BLD_FREE_TRACE
fprintf(stderr, "# of free blocks == %d\n", freeblks1);
#endif
#ifdef DEBUG
freeblks2 = build_freespace_tree(mp, agno,
- &bcnt_btree_curs, XFS_ABTC_MAGIC);
+ &bcnt_btree_curs, XFS_BTNUM_CNT);
#else
(void) build_freespace_tree(mp, agno,
- &bcnt_btree_curs, XFS_ABTC_MAGIC);
+ &bcnt_btree_curs, XFS_BTNUM_CNT);
#endif
write_cursor(&bcnt_btree_curs);
/*
* build inode allocation tree.
*/
- magic = xfs_sb_version_hascrc(&mp->m_sb) ?
- XFS_IBT_CRC_MAGIC : XFS_IBT_MAGIC;
- build_ino_tree(mp, agno, &ino_btree_curs, magic, &agi_stat, 0);
+ build_ino_tree(mp, agno, &ino_btree_curs, XFS_BTNUM_INO,
+ &agi_stat);
write_cursor(&ino_btree_curs);
/*
* build free inode tree
*/
if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
- magic = xfs_sb_version_hascrc(&mp->m_sb) ?
- XFS_FIBT_CRC_MAGIC : XFS_FIBT_MAGIC;
- build_ino_tree(mp, agno, &fino_btree_curs, magic,
- NULL, 1);
+ build_ino_tree(mp, agno, &fino_btree_curs,
+ XFS_BTNUM_FINO, NULL);
write_cursor(&fino_btree_curs);
}
struct xfs_trans *tp = NULL;
struct xfs_slab_cursor *cur = NULL;
xfs_fsblock_t *fsb;
- struct xfs_trans_res tres = {0};
- struct xfs_owner_info oinfo;
int error;
- libxfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
error = init_slab_cursor(lost_fsbs, NULL, &cur);
if (error)
return error;
while ((fsb = pop_slab_cursor(cur)) != NULL) {
- error = -libxfs_trans_alloc(mp, &tres, 16, 0, 0, &tp);
+ error = -libxfs_trans_alloc_rollable(mp, 16, &tp);
if (error)
goto out_cancel;
- error = -libxfs_free_extent(tp, *fsb, 1, &oinfo,
+ error = -libxfs_free_extent(tp, *fsb, 1, &XFS_RMAP_OINFO_AG,
XFS_AG_RESV_NONE);
if (error)
goto out_cancel;
int error;
do_log(_("Phase 5 - rebuild AG headers and trees...\n"));
- set_progress_msg(PROG_FMT_REBUILD_AG, (__uint64_t )glob_agcount);
+ set_progress_msg(PROG_FMT_REBUILD_AG, (uint64_t)glob_agcount);
#ifdef XR_BLD_FREE_TRACE
fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
keep_fsinos(mp);
/* allocate per ag counters */
- sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
+ sb_icount_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
if (sb_icount_ag == NULL)
do_error(_("cannot alloc sb_icount_ag buffers\n"));
- sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
+ sb_ifree_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
if (sb_ifree_ag == NULL)
do_error(_("cannot alloc sb_ifree_ag buffers\n"));
- sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(__uint64_t));
+ sb_fdblocks_ag = calloc(mp->m_sb.sb_agcount, sizeof(uint64_t));
if (sb_fdblocks_ag == NULL)
do_error(_("cannot alloc sb_fdblocks_ag buffers\n"));