Merge of master-melb:xfs-cmds:32503a by kenmcd.
Sync up libxfs to latest kernel code
xfs_agnumber_t num_ags, agno;
xfs_agblock_t bno;
xfs_daddr_t begin, next_begin, ag_begin, new_begin, ag_end;
- xfs_alloc_block_t *block;
+ struct xfs_btree_block *block;
xfs_alloc_ptr_t *ptr;
xfs_alloc_rec_t *rec_ptr;
extern char *optarg;
- (__uint64_t)mp->m_sb.sb_fdblocks + 10 * num_ags));
kids = num_targets;
- block = (xfs_alloc_block_t *) btree_buf.data;
+ block = (struct xfs_btree_block *) btree_buf.data;
for (agno = 0; agno < num_ags && kids > 0; agno++) {
/* read in first blocks of the ag */
btree_buf.length = source_blocksize;
read_wbuf(source_fd, &btree_buf, mp);
- block = (xfs_alloc_block_t *) ((char *) btree_buf.data
- + pos - btree_buf.position);
+ block = (struct xfs_btree_block *)
+ ((char *)btree_buf.data +
+ pos - btree_buf.position);
ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);
if (be16_to_cpu(block->bb_level) == 0)
break;
- ptr = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1,
+ ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1,
mp->m_alloc_mxr[1]);
bno = be32_to_cpu(ptr[0]);
}
exit(1);
}
- rec_ptr = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs);
i++, rec_ptr++) {
/* calculate in daddr's */
w_buf.min_io_size >> BBSHIFT);
}
- if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK)
+ if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK)
break;
/* read in next btree record block */
btree_buf.position = pos = (xfs_off_t)
XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(
- block->bb_rightsib)) << BBSHIFT;
+ block->bb_u.s.bb_rightsib)) << BBSHIFT;
btree_buf.length = source_blocksize;
/* let read_wbuf handle alignment */
read_wbuf(source_fd, &btree_buf, mp);
- block = (xfs_alloc_block_t *) ((char *) btree_buf.data
- + pos - btree_buf.position);
+ block = (struct xfs_btree_block *)
+ ((char *) btree_buf.data +
+ pos - btree_buf.position);
ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC);
}
int *nexp,
bmap_ext_t *bep)
{
- xfs_bmbt_block_t *block;
+ struct xfs_btree_block *block;
xfs_fsblock_t bno;
xfs_dfiloff_t curoffset;
xfs_dinode_t *dip;
bno = NULLFSBLOCK;
rblock = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
fsize = XFS_DFORK_SIZE(dip, mp, whichfork);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, rblock, 1,
- XFS_BTREE_BLOCK_MAXRECS(fsize, xfs_bmdr, 0));
- kp = XFS_BTREE_KEY_ADDR(xfs_bmdr, rblock, 1);
+ pp = XFS_BMDR_PTR_ADDR(rblock, 1, xfs_bmdr_maxrecs(mp, fsize, 0));
+ kp = XFS_BMDR_KEY_ADDR(rblock, 1);
bno = select_child(curoffset, kp, pp,
be16_to_cpu(rblock->bb_numrecs));
for (;;) {
set_cur(&typtab[typ], XFS_FSB_TO_DADDR(mp, bno),
blkbb, DB_RING_IGN, NULL);
- block = (xfs_bmbt_block_t *)iocur_top->data;
+ block = (struct xfs_btree_block *)iocur_top->data;
if (be16_to_cpu(block->bb_level) == 0)
break;
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1,
- XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize,
- xfs_bmbt, 0));
- kp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, 1);
+ pp = XFS_BMDR_PTR_ADDR(block, 1,
+ xfs_bmbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0));
+ kp = XFS_BMDR_KEY_ADDR(block, 1);
bno = select_child(curoffset, kp, pp,
be16_to_cpu(block->bb_numrecs));
}
for (;;) {
- nextbno = be64_to_cpu(block->bb_rightsib);
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
nextents = be16_to_cpu(block->bb_numrecs);
- xp = (xfs_bmbt_rec_64_t *)XFS_BTREE_REC_ADDR(xfs_bmbt,
- block, 1);
+ xp = (xfs_bmbt_rec_64_t *)
+ XFS_BMBT_REC_ADDR(mp, block, 1);
for (ep = xp; ep < &xp[nextents] && n < nex; ep++) {
if (!bmap_one_extent(ep, &curoffset, eoffset,
&n, bep)) {
break;
set_cur(&typtab[typ], XFS_FSB_TO_DADDR(mp, bno),
blkbb, DB_RING_IGN, NULL);
- block = (xfs_bmbt_block_t *)iocur_top->data;
+ block = (struct xfs_btree_block *)iocur_top->data;
}
pop_cur();
}
block = (xfs_bmdr_block_t *)((char *)obj + byteize(startoff));
ASSERT(XFS_DFORK_Q(dip) && (char *)block == XFS_DFORK_APTR(dip));
ASSERT(be16_to_cpu(block->bb_level) > 0);
- kp = XFS_BTREE_KEY_ADDR(xfs_bmdr, block, idx);
+ kp = XFS_BMDR_KEY_ADDR(block, idx);
return bitize((int)((char *)kp - (char *)block));
}
block = (xfs_bmdr_block_t *)((char *)obj + byteize(startoff));
ASSERT(XFS_DFORK_Q(dip) && (char *)block == XFS_DFORK_APTR(dip));
ASSERT(be16_to_cpu(block->bb_level) > 0);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, block, idx,
- XFS_BTREE_BLOCK_MAXRECS(XFS_DFORK_ASIZE(dip, mp), xfs_bmdr, 0));
+ pp = XFS_BMDR_PTR_ADDR(block, idx,
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_ASIZE(dip, mp), 0));
return bitize((int)((char *)pp - (char *)block));
}
dip = obj;
block = (xfs_bmdr_block_t *)((char *)obj + byteize(startoff));
ASSERT(be16_to_cpu(block->bb_level) > 0);
- kp = XFS_BTREE_KEY_ADDR(xfs_bmdr, block, idx);
+ kp = XFS_BMDR_KEY_ADDR(block, idx);
return bitize((int)((char *)kp - (char *)block));
}
dip = obj;
block = (xfs_bmdr_block_t *)((char *)obj + byteize(startoff));
ASSERT(be16_to_cpu(block->bb_level) > 0);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, block, idx,
- XFS_BTREE_BLOCK_MAXRECS(XFS_DFORK_DSIZE(dip, mp), xfs_bmdr, 0));
+ pp = XFS_BMDR_PTR_ADDR(block, idx,
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_DSIZE(dip, mp), 0));
return bitize((int)((char *)pp - (char *)block));
}
size_t ptr_len;
} btrees[] = {
[/*0x424d415*/0] = { /* BMAP */
- sizeof(struct xfs_btree_lblock),
+ XFS_BTREE_LBLOCK_LEN,
sizeof(xfs_bmbt_key_t),
sizeof(xfs_bmbt_rec_t),
sizeof(__be64),
},
[/*0x4142544*/2] = { /* ABTB */
- sizeof(struct xfs_btree_sblock),
+ XFS_BTREE_SBLOCK_LEN,
sizeof(xfs_alloc_key_t),
sizeof(xfs_alloc_rec_t),
sizeof(__be32),
},
[/*0x4142544*/3] = { /* ABTC */
- sizeof(struct xfs_btree_sblock),
+ XFS_BTREE_SBLOCK_LEN,
sizeof(xfs_alloc_key_t),
sizeof(xfs_alloc_rec_t),
sizeof(__be32),
},
[/*0x4941425*/4] = { /* IABT */
- sizeof(struct xfs_btree_sblock),
+ XFS_BTREE_SBLOCK_LEN,
sizeof(xfs_inobt_key_t),
sizeof(xfs_inobt_rec_t),
sizeof(__be32),
{ NULL }
};
-#define OFF(f) bitize(offsetof(xfs_bmbt_block_t, bb_ ## f))
+#define OFF(f) bitize(offsetof(struct xfs_btree_block, bb_ ## f))
const field_t bmapbta_flds[] = {
{ "magic", FLDT_UINT32X, OI(OFF(magic)), C1, 0, TYP_NONE },
{ "level", FLDT_UINT16D, OI(OFF(level)), C1, 0, TYP_NONE },
{ "numrecs", FLDT_UINT16D, OI(OFF(numrecs)), C1, 0, TYP_NONE },
- { "leftsib", FLDT_DFSBNO, OI(OFF(leftsib)), C1, 0, TYP_BMAPBTA },
- { "rightsib", FLDT_DFSBNO, OI(OFF(rightsib)), C1, 0, TYP_BMAPBTA },
+ { "leftsib", FLDT_DFSBNO, OI(OFF(u.l.bb_leftsib)), C1, 0, TYP_BMAPBTA },
+ { "rightsib", FLDT_DFSBNO, OI(OFF(u.l.bb_rightsib)), C1, 0, TYP_BMAPBTA },
{ "recs", FLDT_BMAPBTAREC, btblock_rec_offset, btblock_rec_count,
FLD_ARRAY|FLD_ABASE1|FLD_COUNT|FLD_OFFSET, TYP_NONE },
{ "keys", FLDT_BMAPBTAKEY, btblock_key_offset, btblock_key_count,
{ "magic", FLDT_UINT32X, OI(OFF(magic)), C1, 0, TYP_NONE },
{ "level", FLDT_UINT16D, OI(OFF(level)), C1, 0, TYP_NONE },
{ "numrecs", FLDT_UINT16D, OI(OFF(numrecs)), C1, 0, TYP_NONE },
- { "leftsib", FLDT_DFSBNO, OI(OFF(leftsib)), C1, 0, TYP_BMAPBTD },
- { "rightsib", FLDT_DFSBNO, OI(OFF(rightsib)), C1, 0, TYP_BMAPBTD },
+ { "leftsib", FLDT_DFSBNO, OI(OFF(u.s.bb_leftsib)), C1, 0, TYP_BMAPBTD },
+ { "rightsib", FLDT_DFSBNO, OI(OFF(u.s.bb_rightsib)), C1, 0, TYP_BMAPBTD },
{ "recs", FLDT_BMAPBTDREC, btblock_rec_offset, btblock_rec_count,
FLD_ARRAY|FLD_ABASE1|FLD_COUNT|FLD_OFFSET, TYP_NONE },
{ "keys", FLDT_BMAPBTDKEY, btblock_key_offset, btblock_key_count,
{ NULL }
};
-#define OFF(f) bitize(offsetof(struct xfs_btree_sblock, bb_ ## f))
+#define OFF(f) bitize(offsetof(struct xfs_btree_block, bb_ ## f))
const field_t inobt_flds[] = {
{ "magic", FLDT_UINT32X, OI(OFF(magic)), C1, 0, TYP_NONE },
{ "level", FLDT_UINT16D, OI(OFF(level)), C1, 0, TYP_NONE },
{ "numrecs", FLDT_UINT16D, OI(OFF(numrecs)), C1, 0, TYP_NONE },
- { "leftsib", FLDT_AGBLOCK, OI(OFF(leftsib)), C1, 0, TYP_INOBT },
- { "rightsib", FLDT_AGBLOCK, OI(OFF(rightsib)), C1, 0, TYP_INOBT },
+ { "leftsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_leftsib)), C1, 0, TYP_INOBT },
+ { "rightsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_rightsib)), C1, 0, TYP_INOBT },
{ "recs", FLDT_INOBTREC, btblock_rec_offset, btblock_rec_count,
FLD_ARRAY|FLD_ABASE1|FLD_COUNT|FLD_OFFSET, TYP_NONE },
{ "keys", FLDT_INOBTKEY, btblock_key_offset, btblock_key_count,
{ NULL }
};
-#define OFF(f) bitize(offsetof(xfs_alloc_block_t, bb_ ## f))
+#define OFF(f) bitize(offsetof(struct xfs_btree_block, bb_ ## f))
const field_t bnobt_flds[] = {
{ "magic", FLDT_UINT32X, OI(OFF(magic)), C1, 0, TYP_NONE },
{ "level", FLDT_UINT16D, OI(OFF(level)), C1, 0, TYP_NONE },
{ "numrecs", FLDT_UINT16D, OI(OFF(numrecs)), C1, 0, TYP_NONE },
- { "leftsib", FLDT_AGBLOCK, OI(OFF(leftsib)), C1, 0, TYP_BNOBT },
- { "rightsib", FLDT_AGBLOCK, OI(OFF(rightsib)), C1, 0, TYP_BNOBT },
+ { "leftsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_leftsib)), C1, 0, TYP_BNOBT },
+ { "rightsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_rightsib)), C1, 0, TYP_BNOBT },
{ "recs", FLDT_BNOBTREC, btblock_rec_offset, btblock_rec_count,
FLD_ARRAY|FLD_ABASE1|FLD_COUNT|FLD_OFFSET, TYP_NONE },
{ "keys", FLDT_BNOBTKEY, btblock_key_offset, btblock_key_count,
{ NULL }
};
-#define OFF(f) bitize(offsetof(xfs_alloc_block_t, bb_ ## f))
+#define OFF(f) bitize(offsetof(struct xfs_btree_block, bb_ ## f))
const field_t cntbt_flds[] = {
{ "magic", FLDT_UINT32X, OI(OFF(magic)), C1, 0, TYP_NONE },
{ "level", FLDT_UINT16D, OI(OFF(level)), C1, 0, TYP_NONE },
{ "numrecs", FLDT_UINT16D, OI(OFF(numrecs)), C1, 0, TYP_NONE },
- { "leftsib", FLDT_AGBLOCK, OI(OFF(leftsib)), C1, 0, TYP_CNTBT },
- { "rightsib", FLDT_AGBLOCK, OI(OFF(rightsib)), C1, 0, TYP_CNTBT },
+ { "leftsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_leftsib)), C1, 0, TYP_CNTBT },
+ { "rightsib", FLDT_AGBLOCK, OI(OFF(u.s.bb_rightsib)), C1, 0, TYP_CNTBT },
{ "recs", FLDT_CNTBTREC, btblock_rec_offset, btblock_rec_count,
FLD_ARRAY|FLD_ABASE1|FLD_COUNT|FLD_OFFSET, TYP_NONE },
{ "keys", FLDT_CNTBTKEY, btblock_key_offset, btblock_key_count,
#define CHECK_BLISTA(a,b) \
(blist_size && check_blist(XFS_AGB_TO_FSB(mp, a, b)))
-typedef void (*scan_lbtree_f_t)(xfs_btree_lblock_t *block,
+typedef void (*scan_lbtree_f_t)(struct xfs_btree_block *block,
int level,
dbm_t type,
xfs_fsblock_t bno,
int isroot,
typnm_t btype);
-typedef void (*scan_sbtree_f_t)(xfs_btree_sblock_t *block,
+typedef void (*scan_sbtree_f_t)(struct xfs_btree_block *block,
int level,
xfs_agf_t *agf,
xfs_agblock_t bno,
static void scan_sbtree(xfs_agf_t *agf, xfs_agblock_t root,
int nlevels, int isroot,
scan_sbtree_f_t func, typnm_t btype);
-static void scanfunc_bmap(xfs_btree_lblock_t *ablock, int level,
- dbm_t type, xfs_fsblock_t bno,
+static void scanfunc_bmap(struct xfs_btree_block *block,
+ int level, dbm_t type, xfs_fsblock_t bno,
inodata_t *id, xfs_drfsbno_t *totd,
xfs_drfsbno_t *toti, xfs_extnum_t *nex,
blkmap_t **blkmapp, int isroot,
typnm_t btype);
-static void scanfunc_bno(xfs_btree_sblock_t *ablock, int level,
+static void scanfunc_bno(struct xfs_btree_block *block, int level,
xfs_agf_t *agf, xfs_agblock_t bno,
int isroot);
-static void scanfunc_cnt(xfs_btree_sblock_t *ablock, int level,
+static void scanfunc_cnt(struct xfs_btree_block *block, int level,
xfs_agf_t *agf, xfs_agblock_t bno,
int isroot);
-static void scanfunc_ino(xfs_btree_sblock_t *ablock, int level,
+static void scanfunc_ino(struct xfs_btree_block *block, int level,
xfs_agf_t *agf, xfs_agblock_t bno,
int isroot);
static void set_dbmap(xfs_agnumber_t agno, xfs_agblock_t agbno,
error++;
return;
}
- if (be16_to_cpu(dib->bb_numrecs) > XFS_BTREE_BLOCK_MAXRECS(
- XFS_DFORK_SIZE(dip, mp, whichfork), xfs_bmdr,
- be16_to_cpu(dib->bb_level) == 0)) {
+ if (be16_to_cpu(dib->bb_numrecs) >
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_SIZE(dip, mp, whichfork),
+ be16_to_cpu(dib->bb_level) == 0)) {
if (!sflag || id->ilist)
dbprintf("numrecs for ino %lld %s fork bmap root too "
"large (%u)\n",
return;
}
if (be16_to_cpu(dib->bb_level) == 0) {
- rp = (xfs_bmbt_rec_32_t *)XFS_BTREE_REC_ADDR(xfs_bmdr, dib, 1);
+ rp = (xfs_bmbt_rec_32_t *)XFS_BMDR_REC_ADDR(dib, 1);
process_bmbt_reclist(rp, be16_to_cpu(dib->bb_numrecs), type,
id, totd, blkmapp);
*nex += be16_to_cpu(dib->bb_numrecs);
return;
} else {
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dib, 1,
- XFS_BTREE_BLOCK_MAXRECS(XFS_DFORK_SIZE(dip, mp,
- whichfork),
- xfs_bmdr, 0));
+ pp = XFS_BMDR_PTR_ADDR(dib, 1, xfs_bmdr_maxrecs(mp,
+ XFS_DFORK_SIZE(dip, mp, whichfork), 0));
for (i = 0; i < be16_to_cpu(dib->bb_numrecs); i++)
scan_lbtree(be64_to_cpu(pp[i]),
be16_to_cpu(dib->bb_level),
static void
scanfunc_bmap(
- xfs_btree_lblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
dbm_t type,
xfs_fsblock_t bno,
{
xfs_agblock_t agbno;
xfs_agnumber_t agno;
- xfs_bmbt_block_t *block = (xfs_bmbt_block_t *)ablock;
int i;
xfs_bmbt_ptr_t *pp;
xfs_bmbt_rec_32_t *rp;
error++;
return;
}
- rp = (xfs_bmbt_rec_32_t *)XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+ rp = (xfs_bmbt_rec_32_t *)XFS_BMBT_REC_ADDR(mp, block, 1);
*nex += be16_to_cpu(block->bb_numrecs);
process_bmbt_reclist(rp, be16_to_cpu(block->bb_numrecs), type, id, totd,
blkmapp);
error++;
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[0]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_lbtree(be64_to_cpu(pp[i]), level, scanfunc_bmap, type, id,
totd, toti, nex, blkmapp, 0, btype);
static void
scanfunc_bno(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agf_t *agf,
xfs_agblock_t bno,
int isroot)
{
- xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
int i;
xfs_alloc_ptr_t *pp;
xfs_alloc_rec_t *rp;
serious_error++;
return;
}
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
lastblock = 0;
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
set_dbmap(seqno, be32_to_cpu(rp[i].ar_startblock),
serious_error++;
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_bno, TYP_BNOBT);
}
static void
scanfunc_cnt(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agf_t *agf,
xfs_agblock_t bno,
int isroot)
{
- xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int i;
xfs_alloc_ptr_t *pp;
serious_error++;
return;
}
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
lastcount = 0;
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
check_set_dbmap(seqno, be32_to_cpu(rp[i].ar_startblock),
serious_error++;
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_cnt, TYP_CNTBT);
}
static void
scanfunc_ino(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agf_t *agf,
xfs_agblock_t bno,
int isroot)
{
xfs_agino_t agino;
- xfs_inobt_block_t *block = (xfs_inobt_block_t *)ablock;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int i;
int isfree;
serious_error++;
return;
}
- rp = XFS_BTREE_REC_ADDR(xfs_inobt, block, 1);
+ rp = XFS_INOBT_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
agino = be32_to_cpu(rp[i].ir_startino);
off = XFS_INO_TO_OFFSET(mp, agino);
serious_error++;
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_inobt, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, 0, scanfunc_ino, TYP_INOBT);
}
static int rflag;
static int vflag;
-typedef void (*scan_lbtree_f_t)(xfs_btree_lblock_t *block,
+typedef void (*scan_lbtree_f_t)(struct xfs_btree_block *block,
int level,
extmap_t **extmapp,
typnm_t btype);
-typedef void (*scan_sbtree_f_t)(xfs_btree_sblock_t *block,
+typedef void (*scan_sbtree_f_t)(struct xfs_btree_block *block,
int level,
xfs_agf_t *agf);
static void scan_sbtree(xfs_agf_t *agf, xfs_agblock_t root,
int nlevels, scan_sbtree_f_t func,
typnm_t btype);
-static void scanfunc_bmap(xfs_btree_lblock_t *ablock, int level,
+static void scanfunc_bmap(struct xfs_btree_block *block, int level,
extmap_t **extmapp, typnm_t btype);
-static void scanfunc_ino(xfs_btree_sblock_t *ablock, int level,
+static void scanfunc_ino(struct xfs_btree_block *block, int level,
xfs_agf_t *agf);
static const cmdinfo_t frag_cmd =
dib = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
if (be16_to_cpu(dib->bb_level) == 0) {
- rp = (xfs_bmbt_rec_32_t *)XFS_BTREE_REC_ADDR(xfs_bmdr, dib, 1);
+ rp = (xfs_bmbt_rec_32_t *)XFS_BMDR_REC_ADDR(dib, 1);
process_bmbt_reclist(rp, be16_to_cpu(dib->bb_numrecs), extmapp);
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dib, 1,
- XFS_BTREE_BLOCK_MAXRECS(XFS_DFORK_SIZE(dip, mp, whichfork),
- xfs_bmdr, 0));
+ pp = XFS_BMDR_PTR_ADDR(dib, 1,
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_SIZE(dip, mp, whichfork), 0));
for (i = 0; i < be16_to_cpu(dib->bb_numrecs); i++)
scan_lbtree(be64_to_cpu(pp[i]), be16_to_cpu(dib->bb_level),
scanfunc_bmap, extmapp,
static void
scanfunc_bmap(
- xfs_btree_lblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
extmap_t **extmapp,
typnm_t btype)
{
- xfs_bmbt_block_t *block = (xfs_bmbt_block_t *)ablock;
int i;
xfs_bmbt_ptr_t *pp;
xfs_bmbt_rec_t *rp;
if (level == 0) {
- rp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+ rp = XFS_BMBT_REC_ADDR(mp, block, 1);
process_bmbt_reclist((xfs_bmbt_rec_32_t *)rp,
be16_to_cpu(block->bb_numrecs), extmapp);
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[0]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_lbtree(be64_to_cpu(pp[i]), level, scanfunc_bmap, extmapp,
btype);
static void
scanfunc_ino(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agf_t *agf)
{
xfs_agino_t agino;
- xfs_inobt_block_t *block = (xfs_inobt_block_t *)ablock;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int i;
int j;
xfs_inobt_rec_t *rp;
if (level == 0) {
- rp = XFS_BTREE_REC_ADDR(xfs_inobt, block, 1);
+ rp = XFS_INOBT_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++) {
agino = be32_to_cpu(rp[i].ir_startino);
off = XFS_INO_TO_OFFSET(mp, agino);
}
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_inobt, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), level, scanfunc_ino,
TYP_INOBT);
static int init(int argc, char **argv);
static void printhist(void);
static void scan_ag(xfs_agnumber_t agno);
-static void scanfunc_bno(xfs_btree_sblock_t *ablock, typnm_t typ, int level,
+static void scanfunc_bno(struct xfs_btree_block *block, typnm_t typ, int level,
xfs_agf_t *agf);
-static void scanfunc_cnt(xfs_btree_sblock_t *ablock, typnm_t typ, int level,
+static void scanfunc_cnt(struct xfs_btree_block *block, typnm_t typ, int level,
xfs_agf_t *agf);
static void scan_freelist(xfs_agf_t *agf);
static void scan_sbtree(xfs_agf_t *agf, xfs_agblock_t root, typnm_t typ,
int nlevels,
- void (*func)(xfs_btree_sblock_t *block, typnm_t typ,
+ void (*func)(struct xfs_btree_block *block, typnm_t typ,
int level, xfs_agf_t *agf));
static int usage(void);
xfs_agblock_t root,
typnm_t typ,
int nlevels,
- void (*func)(xfs_btree_sblock_t *block,
+ void (*func)(struct xfs_btree_block *block,
typnm_t typ,
int level,
xfs_agf_t *agf))
dbprintf("can't read btree block %u/%u\n", seqno, root);
return;
}
- (*func)((xfs_btree_sblock_t *)iocur_top->data, typ, nlevels - 1, agf);
+ (*func)(iocur_top->data, typ, nlevels - 1, agf);
pop_cur();
}
/*ARGSUSED*/
static void
scanfunc_bno(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
typnm_t typ,
int level,
xfs_agf_t *agf)
{
- xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
int i;
xfs_alloc_ptr_t *pp;
xfs_alloc_rec_t *rp;
if (level == 0) {
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
addtohist(be32_to_cpu(agf->agf_seqno),
be32_to_cpu(rp[i].ar_startblock),
be32_to_cpu(rp[i].ar_blockcount));
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), typ, level, scanfunc_bno);
}
static void
scanfunc_cnt(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
typnm_t typ,
int level,
xfs_agf_t *agf)
{
- xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
int i;
xfs_alloc_ptr_t *pp;
xfs_alloc_rec_t *rp;
if (level == 0) {
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
addtohist(be32_to_cpu(agf->agf_seqno),
be32_to_cpu(rp[i].ar_startblock),
be32_to_cpu(rp[i].ar_blockcount));
return;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++)
scan_sbtree(agf, be32_to_cpu(pp[i]), typ, level, scanfunc_cnt);
}
return 1;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
for (i = 0; i < numrecs; i++) {
if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
if (show_warnings)
typtab[btype].name, agno, agbno);
return 1;
}
- return process_bmbt_reclist(XFS_BTREE_REC_ADDR(xfs_bmbt,
- block, 1), nrecs, *(typnm_t*)arg);
+ return process_bmbt_reclist(XFS_BMBT_REC_ADDR(mp, block, 1),
+ nrecs, *(typnm_t*)arg);
}
if (nrecs > mp->m_bmap_dmxr[1]) {
nrecs, typtab[btype].name, agno, agbno);
return 1;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
for (i = 0; i < nrecs; i++) {
xfs_agnumber_t ag;
xfs_agblock_t bno;
return 1;
}
- if (level == 0)
- return process_bmbt_reclist(XFS_BTREE_REC_ADDR(xfs_bmdr,
- dib, 1), nrecs, itype);
+ if (level == 0) {
+ return process_bmbt_reclist(XFS_BMDR_REC_ADDR(dib, 1),
+ nrecs, itype);
+ }
- maxrecs = XFS_BTREE_BLOCK_MAXRECS(XFS_DFORK_SIZE(dip, mp, whichfork),
- xfs_bmdr, 0);
+ maxrecs = xfs_bmdr_maxrecs(mp, XFS_DFORK_SIZE(dip, mp, whichfork), 0);
if (nrecs > maxrecs) {
if (show_warnings)
print_warning("invalid numrecs (%u) in inode %lld %s "
return 1;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dib, 1, maxrecs);
+ pp = XFS_BMDR_PTR_ADDR(dib, 1, maxrecs);
for (i = 0; i < nrecs; i++) {
xfs_agnumber_t ag;
xfs_agblock_t bno;
typtab[btype].name, agno, agbno);
numrecs = mp->m_inobt_mxr[0];
}
- rp = XFS_BTREE_REC_ADDR(xfs_inobt, block, 1);
+ rp = XFS_INOBT_REC_ADDR(mp, block, 1);
for (i = 0; i < numrecs; i++, rp++) {
if (!copy_inode_chunk(agno, rp))
return 0;
numrecs = mp->m_inobt_mxr[1];
}
- pp = XFS_BTREE_PTR_ADDR(xfs_inobt, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
for (i = 0; i < numrecs; i++) {
if (!valid_bno(agno, be32_to_cpu(pp[i]))) {
if (show_warnings)
struct xfs_buf;
struct xfs_btree_cur;
-struct xfs_btree_sblock;
struct xfs_mount;
/*
/* btree pointer type */
typedef __be32 xfs_alloc_ptr_t;
-/* btree block header type */
-typedef struct xfs_btree_sblock xfs_alloc_block_t;
-
-#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp))
-
-/*
- * Real block structures have a size equal to the disk block size.
- */
-#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_alloc_mxr[lev != 0])
-#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_alloc_mnr[lev != 0])
/*
* Minimum and maximum blocksize and sectorsize.
#define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
/*
- * Record, key, and pointer address macros for btree blocks.
+ * Btree block header size depends on a superblock flag.
+ *
+ * (not quite yet, but soon)
*/
-#define XFS_ALLOC_REC_ADDR(bb,i,cur) \
- XFS_BTREE_REC_ADDR(xfs_alloc, bb, i)
-
-#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \
- XFS_BTREE_KEY_ADDR(xfs_alloc, bb, i)
-
-#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \
- XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
+#define XFS_ALLOC_BLOCK_LEN(mp) XFS_BTREE_SBLOCK_LEN
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_ALLOC_REC_ADDR(mp, block, index) \
+ ((xfs_alloc_rec_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ (((index) - 1) * sizeof(xfs_alloc_rec_t))))
+
+#define XFS_ALLOC_KEY_ADDR(mp, block, index) \
+ ((xfs_alloc_key_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_alloc_key_t)))
+
+#define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \
+ ((xfs_alloc_ptr_t *) \
+ ((char *)(block) + \
+ XFS_ALLOC_BLOCK_LEN(mp) + \
+ (maxrecs) * sizeof(xfs_alloc_key_t) + \
+ ((index) - 1) * sizeof(xfs_alloc_ptr_t)))
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *,
xfs_agnumber_t, xfs_btnum_t);
+extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
#endif /* __XFS_ALLOC_BTREE_H__ */
#define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */
struct xfs_btree_cur;
-struct xfs_btree_lblock;
+struct xfs_btree_block;
struct xfs_mount;
struct xfs_inode;
struct xfs_trans;
/* btree pointer type */
typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-/* btree block header type */
-typedef struct xfs_btree_lblock xfs_bmbt_block_t;
-
-#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp))
-
-#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize)
-#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \
- ((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \
- (cur)->bc_private.b.whichfork)->if_broot_bytes)
-
-#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
- xfs_bmdr, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])))
-#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\
- xfs_bmbt, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0])))
-
-#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur),\
- xfs_bmdr, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
-#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \
- (((lev) == (cur)->bc_nlevels - 1 ? \
- XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\
- xfs_bmbt, (lev) == 0) : \
- ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
-
-#define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_KEY_DADDR(bb,i,cur) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_KEY_IADDR(bb,i,cur) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
-
-#define XFS_BMAP_PTR_DADDR(bb,i,cur) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
- be16_to_cpu((bb)->bb_level), cur)))
-#define XFS_BMAP_PTR_IADDR(bb,i,cur) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
- be16_to_cpu((bb)->bb_level), cur)))
+/*
+ * Btree block header size depends on a superblock flag.
+ *
+ * (not quite yet, but soon)
+ */
+#define XFS_BMBT_BLOCK_LEN(mp) XFS_BTREE_LBLOCK_LEN
+
+#define XFS_BMBT_REC_ADDR(mp, block, index) \
+ ((xfs_bmbt_rec_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_bmbt_rec_t)))
+
+#define XFS_BMBT_KEY_ADDR(mp, block, index) \
+ ((xfs_bmbt_key_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_bmbt_key_t)))
+
+#define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \
+ ((xfs_bmbt_ptr_t *) \
+ ((char *)(block) + \
+ XFS_BMBT_BLOCK_LEN(mp) + \
+ (maxrecs) * sizeof(xfs_bmbt_key_t) + \
+ ((index) - 1) * sizeof(xfs_bmbt_ptr_t)))
+
+#define XFS_BMDR_REC_ADDR(block, index) \
+ ((xfs_bmdr_rec_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ ((index) - 1) * sizeof(xfs_bmdr_rec_t)))
+
+#define XFS_BMDR_KEY_ADDR(block, index) \
+ ((xfs_bmdr_key_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ ((index) - 1) * sizeof(xfs_bmdr_key_t)))
+
+#define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \
+ ((xfs_bmdr_ptr_t *) \
+ ((char *)(block) + \
+ sizeof(struct xfs_bmdr_block) + \
+ (maxrecs) * sizeof(xfs_bmdr_key_t) + \
+ ((index) - 1) * sizeof(xfs_bmdr_ptr_t)))
/*
* These are to be used when we know the size of the block and
* we don't have a cursor.
*/
-#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \
- (XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i))
-#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \
- (XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i))
-#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
- (XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
-
-#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs)
-#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
+#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb, i, sz) \
+ XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0))
#define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \
- (int)(sizeof(xfs_bmbt_block_t) + \
+ (int)(XFS_BTREE_LBLOCK_LEN + \
((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t))))
#define XFS_BMAP_BROOT_SPACE(bb) \
*/
#define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)])
-#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \
- (be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \
- be16_to_cpu((bb)->bb_level) == level && \
- be16_to_cpu((bb)->bb_numrecs) > 0 && \
- be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0])
-
-
/*
* Prototypes for xfs_bmap.c to call.
*/
-extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int);
+extern void xfs_bmdr_to_bmbt(struct xfs_mount *, xfs_bmdr_block_t *, int,
+ struct xfs_btree_block *, int);
extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r);
extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
-extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int);
+extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
+ xfs_bmdr_block_t *, int);
+
+extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level);
+extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf);
+extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf);
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
/*
- * Short form header: space allocation btrees.
- */
-typedef struct xfs_btree_sblock {
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
- __be32 bb_leftsib; /* left sibling block or NULLAGBLOCK */
- __be32 bb_rightsib; /* right sibling block or NULLAGBLOCK */
-} xfs_btree_sblock_t;
-
-/*
- * Long form header: bmap btrees.
- */
-typedef struct xfs_btree_lblock {
- __be32 bb_magic; /* magic number for block type */
- __be16 bb_level; /* 0 is a leaf */
- __be16 bb_numrecs; /* current # of data records */
- __be64 bb_leftsib; /* left sibling block or NULLDFSBNO */
- __be64 bb_rightsib; /* right sibling block or NULLDFSBNO */
-} xfs_btree_lblock_t;
-
-/*
- * Combined header and structure, used by common code.
+ * Generic btree header.
+ *
+ * This is a comination of the actual format used on disk for short and long
+ * format btrees. The first three fields are shared by both format, but
+ * the pointers are different and should be used with care.
+ *
+ * To get the size of the actual short or long form headers please use
+ * the size macros below. Never use sizeof(xfs_btree_block).
*/
-typedef struct xfs_btree_block {
+struct xfs_btree_block {
__be32 bb_magic; /* magic number for block type */
__be16 bb_level; /* 0 is a leaf */
__be16 bb_numrecs; /* current # of data records */
__be64 bb_rightsib;
} l; /* long form pointers */
} bb_u; /* rest */
-} xfs_btree_block_t;
+};
+
+#define XFS_BTREE_SBLOCK_LEN 16 /* size of a short form block */
+#define XFS_BTREE_LBLOCK_LEN 24 /* size of a long form block */
+
/*
* Generic key, ptr and record wrapper structures.
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
-/*
- * Maximum and minimum records in a btree block.
- * Given block size, type prefix, and leaf flag (0 or 1).
- * The divisor below is equivalent to lf ? (e1) : (e2) but that produces
- * compiler warnings.
- */
-#define XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) \
- ((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \
- (((lf) * (uint)sizeof(t ## _rec_t)) + \
- ((1 - (lf)) * \
- ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t))))))
-#define XFS_BTREE_BLOCK_MINRECS(bsz,t,lf) \
- (XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2)
-
-/*
- * Record, key, and pointer address calculation macros.
- * Given block size, type prefix, block pointer, and index of requested entry
- * (first entry numbered 1).
- */
-#define XFS_BTREE_REC_ADDR(t,bb,i) \
- ((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- ((i) - 1) * sizeof(t ## _rec_t)))
-#define XFS_BTREE_KEY_ADDR(t,bb,i) \
- ((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- ((i) - 1) * sizeof(t ## _key_t)))
-#define XFS_BTREE_PTR_ADDR(t,bb,i,mxr) \
- ((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \
- (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t)))
#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
/*
* Convert from buffer to btree block header.
*/
-#define XFS_BUF_TO_BLOCK(bp) ((xfs_btree_block_t *)XFS_BUF_PTR(bp))
-#define XFS_BUF_TO_LBLOCK(bp) ((xfs_btree_lblock_t *)XFS_BUF_PTR(bp))
-#define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)XFS_BUF_PTR(bp))
+#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)XFS_BUF_PTR(bp))
-/*
- * Check that long form block header is ok.
- */
-int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_lblock(
- struct xfs_btree_cur *cur, /* btree cursor */
- struct xfs_btree_lblock *block, /* btree long form block pointer */
- int level, /* level of the btree block */
- struct xfs_buf *bp); /* buffer containing block, if any */
-
/*
* Check that block header is ok.
*/
xfs_dinode_core_t di_core;
/*
* In adding anything between the core and the union, be
- * sure to update the macros like XFS_LITINO below and
- * XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h.
+ * sure to update the macros like XFS_LITINO below.
*/
__be32 di_next_unlinked;/* agi unlinked list ptr */
union {
*/
#define XFS_LITINO(mp) ((mp)->m_litino)
#define XFS_BROOT_SIZE_ADJ \
- (sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t))
+ (XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t))
/*
* Inode data & attribute fork sizes, per inode.
struct xfs_buf;
struct xfs_btree_cur;
-struct xfs_btree_sblock;
struct xfs_mount;
/*
/* btree pointer type */
typedef __be32 xfs_inobt_ptr_t;
-/* btree block header type */
-typedef struct xfs_btree_sblock xfs_inobt_block_t;
-
-#define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)XFS_BUF_PTR(bp))
-
/*
* Bit manipulations for ir_free.
*/
#define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i))
#define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i))
-/*
- * Real block structures have a size equal to the disk block size.
- */
-#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0])
-#define XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0])
-#define XFS_INOBT_IS_LAST_REC(cur) \
- ((cur)->bc_ptrs[0] == be16_to_cpu(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs))
-
/*
* Maximum number of inode btree levels.
*/
#define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
/*
- * Record, key, and pointer address macros for btree blocks.
+ * Btree block header size depends on a superblock flag.
+ *
+ * (not quite yet, but soon)
*/
-#define XFS_INOBT_REC_ADDR(bb,i,cur) \
- (XFS_BTREE_REC_ADDR(xfs_inobt, bb, i))
-
-#define XFS_INOBT_KEY_ADDR(bb,i,cur) \
- (XFS_BTREE_KEY_ADDR(xfs_inobt, bb, i))
+#define XFS_INOBT_BLOCK_LEN(mp) XFS_BTREE_SBLOCK_LEN
-#define XFS_INOBT_PTR_ADDR(bb,i,cur) \
- (XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \
- i, XFS_INOBT_BLOCK_MAXRECS(1, cur)))
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_INOBT_REC_ADDR(mp, block, index) \
+ ((xfs_inobt_rec_t *) \
+ ((char *)(block) + \
+ XFS_INOBT_BLOCK_LEN(mp) + \
+ (((index) - 1) * sizeof(xfs_inobt_rec_t))))
+
+#define XFS_INOBT_KEY_ADDR(mp, block, index) \
+ ((xfs_inobt_key_t *) \
+ ((char *)(block) + \
+ XFS_INOBT_BLOCK_LEN(mp) + \
+ ((index) - 1) * sizeof(xfs_inobt_key_t)))
+
+#define XFS_INOBT_PTR_ADDR(mp, block, index, maxrecs) \
+ ((xfs_inobt_ptr_t *) \
+ ((char *)(block) + \
+ XFS_INOBT_BLOCK_LEN(mp) + \
+ (maxrecs) * sizeof(xfs_inobt_key_t) + \
+ ((index) - 1) * sizeof(xfs_inobt_ptr_t)))
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
+extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
#endif /* __XFS_IALLOC_BTREE_H__ */
typedef struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
int if_real_bytes; /* bytes allocated in if_u1 */
- xfs_bmbt_block_t *if_broot; /* file's incore btree root */
+ struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
unsigned char if_ext_max; /* max # of extent records */
struct xfs_buf;
struct xfs_bmap_free;
struct xfs_bmbt_irec;
-struct xfs_bmbt_block;
struct xfs_inode_log_item;
struct xfs_mount;
struct xfs_trans;
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
- uint m_alloc_mxr[2]; /* XFS_ALLOC_BLOCK_MAXRECS */
- uint m_alloc_mnr[2]; /* XFS_ALLOC_BLOCK_MINRECS */
- uint m_bmap_dmxr[2]; /* XFS_BMAP_BLOCK_DMAXRECS */
- uint m_bmap_dmnr[2]; /* XFS_BMAP_BLOCK_DMINRECS */
- uint m_inobt_mxr[2]; /* XFS_INOBT_BLOCK_MAXRECS */
- uint m_inobt_mnr[2]; /* XFS_INOBT_BLOCK_MINRECS */
+ uint m_alloc_mxr[2]; /* max alloc btree records */
+ uint m_alloc_mnr[2]; /* min alloc btree records */
+ uint m_bmap_dmxr[2]; /* max bmap btree records */
+ uint m_bmap_dmnr[2]; /* min bmap btree records */
+ uint m_inobt_mxr[2]; /* max inobt btree records */
+ uint m_inobt_mnr[2]; /* min inobt btree records */
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */
libxfs_bhash_size = LIBXFS_BHASHSIZE(sbp);
libxfs_bcache = cache_init(libxfs_bhash_size, &libxfs_bcache_operations);
use_xfs_buf_lock = a->usebuflock;
-#ifndef HAVE_PTHREAD_T
- if (use_xfs_buf_lock) {
- fprintf(stderr, _("%s: can't use buffer locks without pthreads\n"),
- progname);
- goto done;
- }
-#endif
manage_zones(0);
rval = 1;
done:
/*
- * Core dir v1 mount code for allowing reading of these dirs.
+ * Core dir v1 mount code for allowing reading of these dirs.
*/
static void
libxfs_dirv1_mount(
* If we are using stripe alignment, check whether
* the stripe unit is a multiple of the inode alignment
*/
- if (mp->m_dalign && mp->m_inoalign_mask &&
+ if (mp->m_dalign && mp->m_inoalign_mask &&
!(mp->m_dalign & mp->m_inoalign_mask))
mp->m_sinoalign = mp->m_dalign;
else
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
+
#ifdef DEBUG
- {
- xfs_alloc_block_t *bnoblock;
- xfs_alloc_block_t *cntblock;
-
- if (bno_cur->bc_nlevels == 1 &&
- cnt_cur->bc_nlevels == 1) {
- bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]);
- cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]);
- XFS_WANT_CORRUPTED_RETURN(
- be16_to_cpu(bnoblock->bb_numrecs) ==
- be16_to_cpu(cntblock->bb_numrecs));
- }
+ if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
+ struct xfs_btree_block *bnoblock;
+ struct xfs_btree_block *cntblock;
+
+ bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
+ cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
+
+ XFS_WANT_CORRUPTED_RETURN(
+ bnoblock->bb_numrecs == cntblock->bb_numrecs);
}
#endif
+
/*
* Deal with all four cases: the allocated record is contained
* within the freespace record, so we can have new freespace
if (numrecs) {
xfs_alloc_rec_t *rrp;
- rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
+ rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
len = rrp->ar_blockcount;
} else {
len = 0;
return cur;
}
+
+/*
+ * Calculate number of records in an alloc btree block.
+ */
+int
+xfs_allocbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_alloc_rec_t);
+ return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
+}
int whichfork) /* data or attr fork */
{
/* REFERENCED */
- xfs_bmbt_block_t *cblock;/* child btree block */
+ struct xfs_btree_block *cblock;/* child btree block */
xfs_fsblock_t cbno; /* child block number */
xfs_buf_t *cbp; /* child block's buffer */
int error; /* error return value */
xfs_ifork_t *ifp; /* inode fork data */
xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
- xfs_bmbt_block_t *rblock;/* root btree block */
+ struct xfs_btree_block *rblock;/* root btree block */
+ mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
rblock = ifp->if_broot;
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
- ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
- mp = ip->i_mount;
- pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
+ ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
cbno = be64_to_cpu(*pp);
*logflagsp = 0;
#ifdef DEBUG
if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
XFS_BMAP_BTREE_REF)))
return error;
- cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
- if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
+ cblock = XFS_BUF_TO_BLOCK(cbp);
+ if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
return error;
xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
ip->i_d.di_nblocks--;
int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */
{
- xfs_bmbt_block_t *ablock; /* allocated (child) bt block */
+ struct xfs_btree_block *ablock; /* allocated (child) bt block */
xfs_buf_t *abp; /* buffer for ablock */
xfs_alloc_arg_t args; /* allocation arguments */
xfs_bmbt_rec_t *arp; /* child record pointer */
- xfs_bmbt_block_t *block; /* btree root block */
+ struct xfs_btree_block *block; /* btree root block */
xfs_btree_cur_t *cur; /* bmap btree cursor */
xfs_bmbt_rec_host_t *ep; /* extent record pointer */
int error; /* error return value */
*/
xfs_iroot_realloc(ip, 1, whichfork);
ifp->if_flags |= XFS_IFBROOT;
+
/*
* Fill in the root.
*/
block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
block->bb_level = cpu_to_be16(1);
block->bb_numrecs = cpu_to_be16(1);
- block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
+ block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+ block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
+
/*
* Need a cursor. Can't allocate until bb_level is filled in.
*/
/*
* Fill in the child block.
*/
- ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
+ ablock = XFS_BUF_TO_BLOCK(abp);
ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
ablock->bb_level = 0;
- ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
- arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
+ ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+ ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
for (cnt = i = 0; i < nextents; i++) {
ep = xfs_iext_get_ext(ifp, i);
}
}
ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
- ablock->bb_numrecs = cpu_to_be16(cnt);
+ xfs_btree_set_numrecs(ablock, cnt);
+
/*
* Fill in the root key and pointer.
*/
- kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
- arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
+ kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
- pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
+ be16_to_cpu(block->bb_level)));
*pp = cpu_to_be64(args.fsbno);
+
/*
* Do all this logging at the end so that
* the root is at the right level.
maxleafents = MAXAEXTNUM;
sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
}
- maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
+ maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
minleafrecs = mp->m_bmap_dmnr[0];
minnoderecs = mp->m_bmap_dmnr[1];
maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
return rval;
}
+STATIC int
+xfs_bmap_sanity_check(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ int level)
+{
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
+ be16_to_cpu(block->bb_level) != level ||
+ be16_to_cpu(block->bb_numrecs) == 0 ||
+ be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
+ return 0;
+ return 1;
+}
+
/*
* Read in the extents to if_extents.
* All inode fields are set up by caller, we just traverse the btree
xfs_inode_t *ip, /* incore inode */
int whichfork) /* data or attr fork */
{
- xfs_bmbt_block_t *block; /* current btree block */
+ struct xfs_btree_block *block; /* current btree block */
xfs_fsblock_t bno; /* block # of "block" */
xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
*/
level = be16_to_cpu(block->bb_level);
ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
+ pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
ASSERT(bno != NULLDFSBNO);
ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
XFS_BMAP_BTREE_REF)))
return error;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
+ block = XFS_BUF_TO_BLOCK(bp);
XFS_WANT_CORRUPTED_GOTO(
- XFS_BMAP_SANITY_CHECK(mp, block, level),
+ xfs_bmap_sanity_check(mp, bp, level),
error0);
if (level == 0)
break;
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
xfs_trans_brelse(tp, bp);
xfs_extnum_t start;
- num_recs = be16_to_cpu(block->bb_numrecs);
+ num_recs = xfs_btree_get_numrecs(block);
if (unlikely(i + num_recs > room)) {
ASSERT(i + num_recs <= room);
xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
goto error0;
}
XFS_WANT_CORRUPTED_GOTO(
- XFS_BMAP_SANITY_CHECK(mp, block, 0),
+ xfs_bmap_sanity_check(mp, bp, 0),
error0);
/*
* Read-ahead the next leaf block, if any.
*/
- nextbno = be64_to_cpu(block->bb_rightsib);
+ nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
if (nextbno != NULLFSBLOCK)
xfs_btree_reada_bufl(mp, nextbno, 1);
/*
* Copy records into the extent records.
*/
- frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+ frp = XFS_BMBT_REC_ADDR(mp, block, 1);
start = i;
for (j = 0; j < num_recs; j++, i++, frp++) {
xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
XFS_BMAP_BTREE_REF)))
return error;
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
+ block = XFS_BUF_TO_BLOCK(bp);
}
ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
*/
#include <xfs.h>
-
/*
* Determine the extent state.
*/
*/
void
xfs_bmdr_to_bmbt(
+ struct xfs_mount *mp,
xfs_bmdr_block_t *dblock,
int dblocklen,
- xfs_bmbt_block_t *rblock,
+ struct xfs_btree_block *rblock,
int rblocklen)
{
int dmxr;
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
- rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
- rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
- dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
- fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
- tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
- fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
- tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
+ rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+ rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
+ dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
+ fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
+ tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
+ fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
+ tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
*/
void
xfs_bmbt_to_bmdr(
- xfs_bmbt_block_t *rblock,
+ struct xfs_mount *mp,
+ struct xfs_btree_block *rblock,
int rblocklen,
xfs_bmdr_block_t *dblock,
int dblocklen)
__be64 *tpp;
ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
- ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO);
- ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO);
+ ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
+ ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
- dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
- fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
- tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
- fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
- tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
+ dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
+ fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
+ tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
+ fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
+ tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
dmxr = be16_to_cpu(dblock->bb_numrecs);
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
struct xfs_btree_cur *cur,
int level)
{
- return XFS_BMAP_BLOCK_IMINRECS(level, cur);
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp;
+
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
+ cur->bc_private.b.whichfork);
+
+ return xfs_bmbt_maxrecs(cur->bc_mp,
+ ifp->if_broot_bytes, level == 0) / 2;
+ }
+
+ return cur->bc_mp->m_bmap_dmnr[level != 0];
}
-STATIC int
+int
xfs_bmbt_get_maxrecs(
struct xfs_btree_cur *cur,
int level)
{
- return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
+ if (level == cur->bc_nlevels - 1) {
+ struct xfs_ifork *ifp;
+
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
+ cur->bc_private.b.whichfork);
+
+ return xfs_bmbt_maxrecs(cur->bc_mp,
+ ifp->if_broot_bytes, level == 0);
+ }
+
+ return cur->bc_mp->m_bmap_dmxr[level != 0];
+
}
/*
struct xfs_btree_cur *cur,
int level)
{
- return XFS_BMAP_BLOCK_DMAXRECS(level, cur);
+ if (level != cur->bc_nlevels - 1)
+ return cur->bc_mp->m_bmap_dmxr[level != 0];
+ return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
+ level == 0);
}
STATIC void
return cur;
}
+
+/*
+ * Calculate number of records in a bmap btree block.
+ */
+int
+xfs_bmbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_BMBT_BLOCK_LEN(mp);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_bmbt_rec_t);
+ return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
+}
+
+/*
+ * Calculate number of records in a bmap btree inode root.
+ */
+int
+xfs_bmdr_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= sizeof(xfs_bmdr_block_t);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_bmdr_rec_t);
+ return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
+}
};
-int /* error (0 or EFSCORRUPTED) */
+STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lblock(
struct xfs_btree_cur *cur, /* btree cursor */
- struct xfs_btree_lblock *block, /* btree long form block pointer */
+ struct xfs_btree_block *block, /* btree long form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer for block, if any */
{
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
- block->bb_leftsib &&
- (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO ||
- XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) &&
- block->bb_rightsib &&
- (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO ||
- XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib)));
+ block->bb_u.l.bb_leftsib &&
+ (be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO ||
+ XFS_FSB_SANITY_CHECK(mp,
+ be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
+ block->bb_u.l.bb_rightsib &&
+ (be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO ||
+ XFS_FSB_SANITY_CHECK(mp,
+ be64_to_cpu(block->bb_u.l.bb_rightsib)));
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_sblock(
struct xfs_btree_cur *cur, /* btree cursor */
- struct xfs_btree_sblock *block, /* btree short form block pointer */
+ struct xfs_btree_block *block, /* btree short form block pointer */
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block */
{
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
- (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK ||
- be32_to_cpu(block->bb_leftsib) < agflen) &&
- block->bb_leftsib &&
- (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK ||
- be32_to_cpu(block->bb_rightsib) < agflen) &&
- block->bb_rightsib;
+ (be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK ||
+ be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
+ block->bb_u.s.bb_leftsib &&
+ (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK ||
+ be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
+ block->bb_u.s.bb_rightsib;
if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block, if any */
{
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- return xfs_btree_check_lblock(cur,
- (struct xfs_btree_lblock *)block, level, bp);
- } else {
- return xfs_btree_check_sblock(cur,
- (struct xfs_btree_sblock *)block, level, bp);
- }
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return xfs_btree_check_lblock(cur, block, level, bp);
+ else
+ return xfs_btree_check_sblock(cur, block, level, bp);
}
/*
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
- sizeof(struct xfs_btree_lblock) :
- sizeof(struct xfs_btree_sblock);
+ XFS_BTREE_LBLOCK_LEN :
+ XFS_BTREE_SBLOCK_LEN;
}
/*
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to check */
{
- xfs_btree_block_t *block; /* generic btree block pointer */
+ struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
- xfs_btree_block_t *block; /* generic btree block pointer */
+ struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
xfs_btree_cur_t *cur, /* btree cursor */
int level) /* level to change */
{
- xfs_btree_block_t *block; /* generic btree block pointer */
+ struct xfs_btree_block *block; /* generic btree block pointer */
xfs_buf_t *bp; /* buffer containing block */
/*
int lev, /* level in btree */
xfs_buf_t *bp) /* new buffer to set */
{
- xfs_btree_block_t *b; /* btree block */
+ struct xfs_btree_block *b; /* btree block */
xfs_buf_t *obp; /* old buffer pointer */
obp = cur->bc_bufs[lev];
int first; /* first byte offset logged */
int last; /* last byte offset logged */
static const short soffsets[] = { /* table of offsets (short) */
- offsetof(struct xfs_btree_sblock, bb_magic),
- offsetof(struct xfs_btree_sblock, bb_level),
- offsetof(struct xfs_btree_sblock, bb_numrecs),
- offsetof(struct xfs_btree_sblock, bb_leftsib),
- offsetof(struct xfs_btree_sblock, bb_rightsib),
- sizeof(struct xfs_btree_sblock)
+ offsetof(struct xfs_btree_block, bb_magic),
+ offsetof(struct xfs_btree_block, bb_level),
+ offsetof(struct xfs_btree_block, bb_numrecs),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib),
+ offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib),
+ XFS_BTREE_SBLOCK_LEN
};
static const short loffsets[] = { /* table of offsets (long) */
- offsetof(struct xfs_btree_lblock, bb_magic),
- offsetof(struct xfs_btree_lblock, bb_level),
- offsetof(struct xfs_btree_lblock, bb_numrecs),
- offsetof(struct xfs_btree_lblock, bb_leftsib),
- offsetof(struct xfs_btree_lblock, bb_rightsib),
- sizeof(struct xfs_btree_lblock)
+ offsetof(struct xfs_btree_block, bb_magic),
+ offsetof(struct xfs_btree_block, bb_level),
+ offsetof(struct xfs_btree_block, bb_numrecs),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib),
+ offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib),
+ XFS_BTREE_LBLOCK_LEN
};
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
if (index) {
xfs_iroot_realloc(cur->bc_private.b.ip, index,
cur->bc_private.b.whichfork);
- block = (struct xfs_btree_block *)ifp->if_broot;
+ block = ifp->if_broot;
}
be16_add_cpu(&block->bb_numrecs, index);
return cur;
}
+
+/*
+ * Calculate number of records in an inobt btree block.
+ */
+int
+xfs_inobt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_INOBT_BLOCK_LEN(mp);
+
+ if (leaf)
+ return blocklen / sizeof(xfs_inobt_rec_t);
+ return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
+}
ifp = XFS_IFORK_PTR(ip, whichfork);
dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
size = XFS_BMAP_BROOT_SPACE(dfp);
- nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
+ nrecs = be16_to_cpu(dfp->bb_numrecs);
/*
* blow out if -- fork has less extents than can fit in
* Copy and convert from the on-disk structure
* to the in-memory structure.
*/
- xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
- ifp->if_broot, size);
+ xfs_bmdr_to_bmbt(ip->i_mount, dfp,
+ XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
+ ifp->if_broot, size);
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFBROOT;
int rec_diff,
int whichfork)
{
+ struct xfs_mount *mp = ip->i_mount;
int cur_max;
xfs_ifork_t *ifp;
- xfs_bmbt_block_t *new_broot;
+ struct xfs_btree_block *new_broot;
int new_max;
size_t new_size;
char *np;
*/
if (ifp->if_broot_bytes == 0) {
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
- ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
- KM_SLEEP);
+ ifp->if_broot = kmem_alloc(new_size, KM_SLEEP);
ifp->if_broot_bytes = (int)new_size;
return;
}
* location. The records don't change location because
* they are kept butted up against the btree block header.
*/
- cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
+ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
new_max = cur_max + rec_diff;
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
- ifp->if_broot = (xfs_bmbt_block_t *)
- kmem_realloc(ifp->if_broot,
- new_size,
+ ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
KM_SLEEP);
- op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
- ifp->if_broot_bytes);
- np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
- (int)new_size);
+ op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+ ifp->if_broot_bytes);
+ np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+ (int)new_size);
ifp->if_broot_bytes = (int)new_size;
ASSERT(ifp->if_broot_bytes <=
XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
* records, just get rid of the root and clear the status bit.
*/
ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
- cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
+ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
new_max = cur_max + rec_diff;
ASSERT(new_max >= 0);
if (new_max > 0)
else
new_size = 0;
if (new_size > 0) {
- new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
+ new_broot = kmem_alloc(new_size, KM_SLEEP);
/*
* First copy over the btree block header.
*/
- memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
+ memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
} else {
new_broot = NULL;
ifp->if_flags &= ~XFS_IFBROOT;
/*
* First copy the records.
*/
- op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
- ifp->if_broot_bytes);
- np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
- (int)new_size);
+ op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
+ np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
/*
* Then copy the pointers.
*/
- op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
+ op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
- np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
+ np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
(int)new_size);
memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
}
ASSERT(ifp->if_broot_bytes <=
(XFS_IFORK_SIZE(ip, whichfork) +
XFS_BROOT_SIZE_ADJ));
- xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
+ xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
(xfs_bmdr_block_t *)cp,
XFS_DFORK_SIZE(dip, mp, whichfork));
}
void
xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
{
- int i;
-
mp->m_agfrotor = mp->m_agirotor = 0;
spin_lock_init(&mp->m_agirotor_lock);
mp->m_maxagi = mp->m_sb.sb_agcount;
}
ASSERT(mp->m_attroffset < XFS_LITINO(mp));
- for (i = 0; i < 2; i++) {
- mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
- xfs_alloc, i == 0);
- mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
- xfs_alloc, i == 0);
- }
- for (i = 0; i < 2; i++) {
- mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
- xfs_bmbt, i == 0);
- mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
- xfs_bmbt, i == 0);
- }
- for (i = 0; i < 2; i++) {
- mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
- xfs_inobt, i == 0);
- mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
- xfs_inobt, i == 0);
- }
+ mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
+ mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
+ mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
+
+ mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
+ mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
+ mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
+
+ mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
+ mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
+ mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
__uint64_t agsize;
xfs_alloc_rec_t *arec;
int attrversion;
- xfs_btree_sblock_t *block;
+ struct xfs_btree_block *block;
int blflag;
int blocklog;
unsigned int blocksize;
buf = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
bsize);
- block = XFS_BUF_TO_SBLOCK(buf);
+ block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, blocksize);
block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
block->bb_level = 0;
block->bb_numrecs = cpu_to_be16(1);
- block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
- arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
if (loginternal && agno == logagno) {
if (lalign) {
buf = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
bsize);
- block = XFS_BUF_TO_SBLOCK(buf);
+ block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, blocksize);
block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
block->bb_level = 0;
block->bb_numrecs = cpu_to_be16(1);
- block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
- arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
if (loginternal && agno == logagno) {
if (lalign) {
buf = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
bsize);
- block = XFS_BUF_TO_SBLOCK(buf);
+ block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, blocksize);
block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
block->bb_level = 0;
block->bb_numrecs = 0;
- block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
}
xfs_dfsbno_t fsbno;
xfs_buf_t *bp;
xfs_dfsbno_t final_fsbno = NULLDFSBNO;
- xfs_bmbt_block_t *block;
+ struct xfs_btree_block *block;
xfs_bmdr_block_t *rootblock = (xfs_bmdr_block_t *)
XFS_DFORK_PTR(dip, whichfork);
* a btree should have at least 2 levels otherwise it
* would be an extent list.
*/
- rkey = XFS_BTREE_KEY_ADDR(xfs_bmdr, rootblock, 1);
- rp = XFS_BTREE_PTR_ADDR(xfs_bmdr, rootblock, 1, XFS_BTREE_BLOCK_MAXRECS(
- XFS_DFORK_SIZE(dip, mp, whichfork), xfs_bmdr, 1));
+ rkey = XFS_BMDR_KEY_ADDR(rootblock, 1);
+ rp = XFS_BMDR_PTR_ADDR(rootblock, 1,
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_SIZE(dip, mp, whichfork), 1));
found = -1;
for (i = 0; i < be16_to_cpu(rootblock->bb_numrecs) - 1; i++) {
if (be64_to_cpu(rkey[i].br_startoff) <= bno &&
do_error(_("cannot read bmap block %llu\n"), fsbno);
return(NULLDFSBNO);
}
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
+ block = XFS_BUF_TO_BLOCK(bp);
numrecs = be16_to_cpu(block->bb_numrecs);
/*
"minimum (%u, min - %u), proceeding ...\n"),
ino, numrecs, mp->m_bmap_dmnr[1]);
}
- key = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, 1);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+ key = XFS_BMBT_KEY_ADDR(mp, block, 1);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
for (found = -1, i = 0; i < numrecs - 1; i++) {
if (be64_to_cpu(key[i].br_startoff) <= bno && bno <
be64_to_cpu(key[i + 1].br_startoff)) {
do_error(_("cannot read bmap block %llu\n"), fsbno);
return(NULLDFSBNO);
}
- block = XFS_BUF_TO_BMBT_BLOCK(bp);
+ block = XFS_BUF_TO_BLOCK(bp);
numrecs = be16_to_cpu(block->bb_numrecs);
}
"(%u, min - %u), continuing...\n"),
ino, numrecs, mp->m_bmap_dmnr[0]);
- rec = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+ rec = XFS_BMBT_REC_ADDR(mp, block, 1);
for (i = 0; i < numrecs; i++) {
libxfs_bmbt_disk_get_all(rec + i, &irec);
if (irec.br_startoff <= bno &&
init_bm_cursor(&cursor, level + 1);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dib, 1, XFS_BTREE_BLOCK_MAXRECS(
- XFS_DFORK_SIZE(dip, mp, whichfork), xfs_bmdr, 0));
- pkey = XFS_BTREE_KEY_ADDR(xfs_bmdr, dib, 1);
+ pp = XFS_BMDR_PTR_ADDR(dib, 1,
+ xfs_bmdr_maxrecs(mp, XFS_DFORK_SIZE(dip, mp, whichfork), 0));
+ pkey = XFS_BMDR_KEY_ADDR(dib, 1);
last_key = NULLDFILOFF;
for (i = 0; i < numrecs; i++) {
}
/*
- * no-cursor versions of the XFS equivalents. The address calculators
- * should be used only for interior btree nodes.
- * these are adapted from xfs_alloc_btree.h and xfs_tree.h
+ * XXX(hch): any reason we don't just look at mp->m_alloc_mxr?
*/
-#define XR_ALLOC_KEY_ADDR(mp, bp, i) \
- (xfs_alloc_key_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
- + ((i)-1) * sizeof(xfs_alloc_key_t))
-
-#define XR_ALLOC_PTR_ADDR(mp, bp, i) \
- (xfs_alloc_ptr_t *) ((char *) (bp) + sizeof(xfs_alloc_block_t) \
- + (mp)->m_alloc_mxr[1] * sizeof(xfs_alloc_key_t) \
- + ((i)-1) * sizeof(xfs_alloc_ptr_t))
-
#define XR_ALLOC_BLOCK_MAXRECS(mp, level) \
- XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
- xfs_alloc, (level) == 0)
+ xfs_allocbt_maxrecs((mp), (mp)->m_sb.sb_blocksize, \
+ (level) == 0)
/*
* this calculates a freespace cursor for an ag.
bt_status_t *btree_curs, xfs_agblock_t startblock,
xfs_extlen_t blockcount, int level, __uint32_t magic)
{
- xfs_alloc_block_t *bt_hdr;
+ struct xfs_btree_block *bt_hdr;
xfs_alloc_key_t *bt_key;
xfs_alloc_ptr_t *bt_ptr;
xfs_agblock_t agbno;
return;
lptr = &btree_curs->level[level];
- bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
/*
lptr->prev_buf_p = lptr->buf_p;
agbno = get_next_blockaddr(agno, level, btree_curs);
- bt_hdr->bb_rightsib = cpu_to_be32(agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
lptr->buf_p = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, agbno),
/*
* initialize block header
*/
- bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(magic);
bt_hdr->bb_level = cpu_to_be16(level);
- bt_hdr->bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = 0;
/*
*/
be16_add_cpu(&bt_hdr->bb_numrecs, 1);
- bt_key = XR_ALLOC_KEY_ADDR(mp, bt_hdr, be16_to_cpu(bt_hdr->bb_numrecs));
- bt_ptr = XR_ALLOC_PTR_ADDR(mp, bt_hdr, be16_to_cpu(bt_hdr->bb_numrecs));
+ bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
+ be16_to_cpu(bt_hdr->bb_numrecs));
+ bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
+ be16_to_cpu(bt_hdr->bb_numrecs),
+ mp->m_alloc_mxr[1]);
bt_key->ar_startblock = cpu_to_be32(startblock);
bt_key->ar_blockcount = cpu_to_be32(blockcount);
{
xfs_agnumber_t i;
xfs_agblock_t j;
- xfs_alloc_block_t *bt_hdr;
+ struct xfs_btree_block *bt_hdr;
xfs_alloc_rec_t *bt_rec;
int level;
xfs_agblock_t agbno;
/*
* initialize block header
*/
- bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(magic);
bt_hdr->bb_level = cpu_to_be16(i);
- bt_hdr->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = 0;
}
/*
/*
* block initialization, lay in block header
*/
- bt_hdr = XFS_BUF_TO_ALLOC_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(magic);
bt_hdr->bb_level = 0;
- bt_hdr->bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
(lptr->modulo > 0));
#ifdef XR_BLD_FREE_TRACE
ext_ptr->ex_blockcount,
0, magic);
- bt_rec = (xfs_alloc_rec_t *) ((char *) bt_hdr +
- sizeof(xfs_alloc_block_t));
+ bt_rec = (xfs_alloc_rec_t *)
+ ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
ASSERT(ext_ptr != NULL);
bt_rec[j].ar_startblock = cpu_to_be32(
lptr->prev_buf_p = lptr->buf_p;
lptr->prev_agbno = lptr->agbno;
lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
- bt_hdr->bb_rightsib = cpu_to_be32(lptr->agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
lptr->buf_p = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
}
/*
- * no-cursor versions of the XFS equivalents. The address calculators
- * should be used only for interior btree nodes.
- * these are adapted from xfs_ialloc_btree.h and xfs_tree.h
+ * XXX(hch): any reason we don't just look at mp->m_inobt_mxr?
*/
-#define XR_INOBT_KEY_ADDR(mp, bp, i) \
- (xfs_inobt_key_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
- + ((i)-1) * sizeof(xfs_inobt_key_t))
-
-#define XR_INOBT_PTR_ADDR(mp, bp, i) \
- (xfs_inobt_ptr_t *) ((char *) (bp) + sizeof(xfs_inobt_block_t) \
- + (mp)->m_inobt_mxr[1] * sizeof(xfs_inobt_key_t) \
- + ((i)-1) * sizeof(xfs_inobt_ptr_t))
-
#define XR_INOBT_BLOCK_MAXRECS(mp, level) \
- XFS_BTREE_BLOCK_MAXRECS((mp)->m_sb.sb_blocksize, \
- xfs_inobt, (level) == 0)
+ xfs_inobt_maxrecs((mp), (mp)->m_sb.sb_blocksize, \
+ (level) == 0)
/*
* we don't have to worry here about how chewing up free extents
prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
xfs_agino_t startino, int level)
{
- xfs_inobt_block_t *bt_hdr;
+ struct xfs_btree_block *bt_hdr;
xfs_inobt_key_t *bt_key;
xfs_inobt_ptr_t *bt_ptr;
xfs_agblock_t agbno;
return;
lptr = &btree_curs->level[level];
- bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
if (be16_to_cpu(bt_hdr->bb_numrecs) == 0) {
/*
lptr->prev_buf_p = lptr->buf_p;
agbno = get_next_blockaddr(agno, level, btree_curs);
- bt_hdr->bb_rightsib = cpu_to_be32(agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);
lptr->buf_p = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, agbno),
/*
* initialize block header
*/
- bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
bt_hdr->bb_level = cpu_to_be16(level);
- bt_hdr->bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = 0;
/*
* propagate extent record for first extent in new block up
*/
be16_add_cpu(&bt_hdr->bb_numrecs, 1);
- bt_key = XR_INOBT_KEY_ADDR(mp, bt_hdr, be16_to_cpu(bt_hdr->bb_numrecs));
- bt_ptr = XR_INOBT_PTR_ADDR(mp, bt_hdr, be16_to_cpu(bt_hdr->bb_numrecs));
+ bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
+ be16_to_cpu(bt_hdr->bb_numrecs));
+ bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
+ be16_to_cpu(bt_hdr->bb_numrecs),
+ mp->m_inobt_mxr[1]);
bt_key->ir_startino = cpu_to_be32(startino);
*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
xfs_agblock_t j;
xfs_agblock_t agbno;
xfs_agino_t first_agino;
- xfs_inobt_block_t *bt_hdr;
+ struct xfs_btree_block *bt_hdr;
xfs_inobt_rec_t *bt_rec;
ino_tree_node_t *ino_rec;
bt_stat_level_t *lptr;
/*
* initialize block header
*/
- bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
bt_hdr->bb_level = cpu_to_be16(i);
- bt_hdr->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = 0;
}
/*
/*
* block initialization, lay in block header
*/
- bt_hdr = XFS_BUF_TO_INOBT_BLOCK(lptr->buf_p);
+ bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
bt_hdr->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
bt_hdr->bb_level = 0;
- bt_hdr->bb_leftsib = cpu_to_be32(lptr->prev_agbno);
- bt_hdr->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
+ bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
(lptr->modulo > 0));
prop_ino_cursor(mp, agno, btree_curs,
ino_rec->ino_startnum, 0);
- bt_rec = (xfs_inobt_rec_t *) ((char *) bt_hdr +
- sizeof(xfs_inobt_block_t));
+ bt_rec = (xfs_inobt_rec_t *)
+ ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
ASSERT(ino_rec != NULL);
bt_rec[j].ir_startino =
lptr->prev_buf_p = lptr->buf_p;
lptr->prev_agbno = lptr->agbno;
lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
- bt_hdr->bb_rightsib = cpu_to_be32(lptr->agbno);
+ bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);
lptr->buf_p = libxfs_getbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
#ifdef XR_BLD_FREE_TRACE
fprintf(stderr, "inobt level 1, maxrec = %d, minrec = %d\n",
- XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0),
- XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 0)
+ xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
+ xfs_inobt_maxrecs(mp->m_sb.sb_blocksize, 0) / 2
);
fprintf(stderr, "inobt level 0 (leaf), maxrec = %d, minrec = %d\n",
- XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1),
- XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_inobt, 1)
- );
+ xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, xfs_inobt, 1),
+ xfs_inobt_maxrecs(mp, mp->m_sb.sb_blocksize, xfs_inobt, 1) / 2);
fprintf(stderr, "xr inobt level 0 (leaf), maxrec = %d\n",
XR_INOBT_BLOCK_MAXRECS(mp, 0));
fprintf(stderr, "xr inobt level 1 (int), maxrec = %d\n",
XR_INOBT_BLOCK_MAXRECS(mp, 1));
fprintf(stderr, "bnobt level 1, maxrec = %d, minrec = %d\n",
- XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0),
- XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 0));
+ xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0),
+ xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 0) / 2);
fprintf(stderr, "bnobt level 0 (leaf), maxrec = %d, minrec = %d\n",
- XFS_BTREE_BLOCK_MAXRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1),
- XFS_BTREE_BLOCK_MINRECS(mp->m_sb.sb_blocksize, xfs_alloc, 1));
+ xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1),
+ xfs_allocbt_maxrecs(mp, mp->m_sb.sb_blocksize, 1) / 2);
#endif
/*
* make sure the root and realtime inodes show up allocated
#include "progress.h"
#include "radix-tree.h"
-#ifdef HAVE_PTHREAD_H
int do_prefetch = 1;
-#else
-int do_prefetch = 0;
-#endif
/*
* Performs prefetching by priming the libxfs cache by using a dedicate thread
if (bp->b_flags & LIBXFS_B_UPTODATE) {
if (B_IS_INODE(flag))
pf_read_inode_dirs(args, bp);
- XFS_BUF_SET_PRIORITY(bp, XFS_BUF_PRIORITY(bp) + 8);
+ XFS_BUF_SET_PRIORITY(bp, XFS_BUF_PRIORITY(bp) +
+ CACHE_PREFETCH_PRIORITY);
libxfs_putbuf(bp);
return;
}
int level,
int isadir,
prefetch_args_t *args,
- int (*func)(xfs_btree_lblock_t *block,
+ int (*func)(struct xfs_btree_block *block,
int level,
int isadir,
prefetch_args_t *args))
XFS_BUF_SET_PRIORITY(bp, isadir ? B_DIR_BMAP : B_BMAP);
- rc = (*func)((xfs_btree_lblock_t *)XFS_BUF_PTR(bp), level - 1, isadir, args);
+ rc = (*func)(XFS_BUF_TO_BLOCK(bp), level - 1, isadir, args);
libxfs_putbuf(bp);
static int
pf_scanfunc_bmap(
- xfs_btree_lblock_t *block,
+ struct xfs_btree_block *block,
int level,
int isadir,
prefetch_args_t *args)
if (numrecs > mp->m_bmap_dmxr[0] || !isadir)
return 0;
return pf_read_bmbt_reclist(args,
- XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1), numrecs);
+ XFS_BMBT_REC_ADDR(mp, block, 1), numrecs);
}
if (numrecs > mp->m_bmap_dmxr[1])
return 0;
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
for (i = 0; i < numrecs; i++) {
dbno = be64_to_cpu(pp[i]);
return;
dsize = XFS_DFORK_DSIZE(dino, mp);
- pp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dib, 1,
- XFS_BTREE_BLOCK_MAXRECS(dsize, xfs_bmdr, 0));
+ pp = XFS_BMDR_PTR_ADDR(dib, 1, xfs_bmdr_maxrecs(mp, dsize, 0));
for (i = 0; i < numrecs; i++) {
dbno = be64_to_cpu(pp[i]);
int nlevels,
xfs_agnumber_t agno,
int suspect,
- void (*func)(xfs_btree_sblock_t *block,
+ void (*func)(struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
do_error(_("can't read btree block %d/%d\n"), agno, root);
return;
}
- (*func)((xfs_btree_sblock_t *)XFS_BUF_PTR(bp),
- nlevels - 1, root, agno, suspect, isroot);
+ (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1, root, agno, suspect, isroot);
libxfs_putbuf(bp);
}
scan_lbtree(
xfs_dfsbno_t root,
int nlevels,
- int (*func)(xfs_btree_lblock_t *block,
+ int (*func)(struct xfs_btree_block *block,
int level,
int type,
int whichfork,
XFS_FSB_TO_AGBNO(mp, root));
return(1);
}
- err = (*func)((xfs_btree_lblock_t *)XFS_BUF_PTR(bp), nlevels - 1,
+ err = (*func)(XFS_BUF_TO_BLOCK(bp), nlevels - 1,
type, whichfork, root, ino, tot, nex, blkmapp,
bm_cursor, isroot, check_dups, &dirty);
int
scanfunc_bmap(
- xfs_btree_lblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
int type,
int whichfork,
int check_dups,
int *dirty)
{
- xfs_bmbt_block_t *block = (xfs_bmbt_block_t *)ablock;
int i;
int err;
xfs_bmbt_ptr_t *pp;
bm_cursor->level[level].fsbno);
return(1);
}
- if (be64_to_cpu(block->bb_leftsib) !=
+ if (be64_to_cpu(block->bb_u.l.bb_leftsib) !=
bm_cursor->level[level].fsbno) {
do_warn(
_("bad back (left) sibling pointer (saw %llu parent block says %llu)\n"
"\tin inode %llu (%s fork) bmap btree block %llu\n"),
- be64_to_cpu(block->bb_leftsib),
+ be64_to_cpu(block->bb_u.l.bb_leftsib),
bm_cursor->level[level].fsbno,
ino, forkname, bno);
return(1);
* This is the first or only block on this level.
* Check that the left sibling pointer is NULL
*/
- if (be64_to_cpu(block->bb_leftsib) != NULLDFSBNO) {
+ if (be64_to_cpu(block->bb_u.l.bb_leftsib) != NULLDFSBNO) {
do_warn(
_("bad back (left) sibling pointer (saw %llu should be NULL (0))\n"
"\tin inode %llu (%s fork) bmap btree block %llu\n"),
- be64_to_cpu(block->bb_leftsib),
+ be64_to_cpu(block->bb_u.l.bb_leftsib),
ino, forkname, bno);
return(1);
}
*/
bm_cursor->level[level].fsbno = bno;
bm_cursor->level[level].left_fsbno =
- be64_to_cpu(block->bb_leftsib);
+ be64_to_cpu(block->bb_u.l.bb_leftsib);
bm_cursor->level[level].right_fsbno =
- be64_to_cpu(block->bb_rightsib);
+ be64_to_cpu(block->bb_u.l.bb_rightsib);
switch (get_fsbno_state(mp, bno)) {
case XR_E_UNKNOWN:
mp->m_bmap_dmxr[0]);
return(1);
}
- rp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
+ rp = XFS_BMBT_REC_ADDR(mp, block, 1);
*nex += numrecs;
/*
* XXX - if we were going to fix up the btree record,
ino, numrecs, mp->m_bmap_dmnr[1], mp->m_bmap_dmxr[1]);
return(1);
}
- pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
- pkey = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, 1);
+ pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+ pkey = XFS_BMBT_KEY_ADDR(mp, block, 1);
last_key = NULLDFILOFF;
void
scanfunc_bno(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
)
{
xfs_agblock_t b, e;
- xfs_alloc_block_t *block = (xfs_alloc_block_t *)ablock;
int i;
xfs_alloc_ptr_t *pp;
xfs_alloc_rec_t *rp;
if (hdr_errors)
suspect++;
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
for (i = 0; i < numrecs; i++) {
if (be32_to_cpu(rp[i].ar_blockcount) == 0 ||
be32_to_cpu(rp[i].ar_startblock) == 0 ||
/*
* interior record
*/
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
if (numrecs > mp->m_alloc_mxr[1]) {
numrecs = mp->m_alloc_mxr[1];
void
scanfunc_cnt(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
int isroot
)
{
- xfs_alloc_block_t *block;
xfs_alloc_ptr_t *pp;
xfs_alloc_rec_t *rp;
xfs_agblock_t b, e;
int numrecs;
int state;
- block = (xfs_alloc_block_t *)ablock;
hdr_errors = 0;
if (be32_to_cpu(block->bb_magic) != XFS_ABTC_MAGIC) {
if (hdr_errors)
suspect++;
- rp = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
+ rp = XFS_ALLOC_REC_ADDR(mp, block, 1);
for (i = 0; i < numrecs; i++) {
if (be32_to_cpu(rp[i].ar_blockcount) == 0 ||
be32_to_cpu(rp[i].ar_startblock) == 0 ||
/*
* interior record
*/
- pp = XFS_BTREE_PTR_ADDR(xfs_alloc, block, 1, mp->m_alloc_mxr[1]);
+ pp = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]);
if (numrecs > mp->m_alloc_mxr[1]) {
numrecs = mp->m_alloc_mxr[1];
*/
void
scanfunc_ino(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
)
{
xfs_ino_t lino;
- xfs_inobt_block_t *block;
int i;
xfs_agino_t ino;
xfs_agblock_t agbno;
ino_tree_node_t *ino_rec, *first_rec, *last_rec;
int hdr_errors;
- block = (xfs_inobt_block_t *)ablock;
hdr_errors = 0;
if (be32_to_cpu(block->bb_magic) != XFS_IBT_MAGIC) {
suspect++;
}
- rp = XFS_BTREE_REC_ADDR(xfs_inobt, block, 1);
+ rp = XFS_INOBT_REC_ADDR(mp, block, 1);
/*
* step through the records, each record points to
hdr_errors++;
}
- pp = XFS_BTREE_PTR_ADDR(xfs_inobt, block, 1, mp->m_inobt_mxr[1]);
+ pp = XFS_INOBT_PTR_ADDR(mp, block, 1, mp->m_inobt_mxr[1]);
/*
* don't pass bogus tree flag down further if this block
int nlevels,
xfs_agnumber_t agno,
int suspect,
- void (*func)(xfs_btree_sblock_t *block,
+ void (*func)(struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
int scan_lbtree(
xfs_dfsbno_t root,
int nlevels,
- int (*func)(xfs_btree_lblock_t *block,
+ int (*func)(struct xfs_btree_block *block,
int level,
int type,
int whichfork,
int check_dups);
int scanfunc_bmap(
- xfs_btree_lblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
int type,
int whichfork,
int *dirty);
void scanfunc_bno(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
int isroot);
void scanfunc_cnt(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,
void
scanfunc_ino(
- xfs_btree_sblock_t *ablock,
+ struct xfs_btree_block *block,
int level,
xfs_agblock_t bno,
xfs_agnumber_t agno,