#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define xfs_buf_offset(bp, offset) ((bp)->b_addr + (offset))
-#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
+
+static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
+{
+ return bp->b_maps[0].bm_bn;
+}
static inline void xfs_buf_set_daddr(struct xfs_buf *bp, xfs_daddr_t blkno)
{
- bp->b_bn = blkno;
+ assert(bp->b_bn == XFS_BUF_DADDR_NULL);
+ bp->b_maps[0].bm_bn = blkno;
}
void libxfs_buf_set_priority(struct xfs_buf *bp, int priority);
blip = (struct xfs_buf_log_item *)lip;
if (blip->bli_item.li_type == XFS_LI_BUF &&
blip->bli_buf->b_target->bt_bdev == btp->bt_bdev &&
- XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
+ xfs_buf_daddr(blip->bli_buf) == map[0].bm_bn &&
blip->bli_buf->b_length == len) {
ASSERT(blip->bli_buf->b_map_count == nmaps);
return blip->bli_buf;
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF);
bip->bli_buf = bp;
bip->__bli_format.blf_type = XFS_LI_BUF;
- bip->__bli_format.blf_blkno = (int64_t)XFS_BUF_ADDR(bp);
+ bip->__bli_format.blf_blkno = (int64_t)xfs_buf_daddr(bp);
bip->__bli_format.blf_len = (unsigned short)bp->b_length;
bp->b_log_item = bip;
}
xfs_agblock_t bno;
int error;
- bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+ bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
+ blk->disk_blkno = xfs_buf_daddr(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
+ blk->disk_blkno = xfs_buf_daddr(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
if (!cur->bc_bufs[i])
break;
- if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
+ if (xfs_buf_daddr(cur->bc_bufs[i]) == bno)
return cur->bc_bufs[i];
}
struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
if (bip->bli_item.li_type == XFS_LI_BUF &&
- XFS_BUF_ADDR(bip->bli_buf) == bno)
+ xfs_buf_daddr(bip->bli_buf) == bno)
return bip->bli_buf;
}
struct xfs_mount *mp = cur->bc_mp;
struct xfs_inode *ip = cur->bc_ino.ip;
struct xfs_trans *tp = cur->bc_tp;
- xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+ xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
struct xfs_owner_info oinfo;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
bp = cur->bc_bufs[i];
if (bp) {
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
- XFS_BUF_ADDR(bp), mp->m_bsize,
+ xfs_buf_daddr(bp), mp->m_bsize,
0, &bp,
cur->bc_ops->buf_ops);
if (error) {
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
- XFS_BUF_ADDR(bp)));
+ xfs_buf_daddr(bp)));
else {
ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
- XFS_BUF_ADDR(bp)));
+ xfs_buf_daddr(bp)));
}
}
error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
if (error)
return error;
- if (bp && XFS_BUF_ADDR(bp) == daddr) {
+ if (bp && xfs_buf_daddr(bp) == daddr) {
*blkp = XFS_BUF_TO_BLOCK(bp);
return 0;
}
return __this_address;
/* sibling pointer verification */
- agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
+ agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
!xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
return __this_address;
{
xfs_inobt_mod_blockcount(cur, -1);
return xfs_free_extent(cur->bc_tp,
- XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
+ XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)), 1,
&XFS_RMAP_OINFO_INOBT, resv);
}
/*
* Validate the magic number and version of every inode in the buffer
*/
- agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
+ agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
for (i = 0; i < ni; i++) {
int di_ok;
struct xfs_mount *mp = cur->bc_mp;
struct xfs_buf *agbp = cur->bc_ag.agbp;
struct xfs_agf *agf = agbp->b_addr;
- xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+ xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
int error;
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
xfs_agblock_t bno;
int error;
- bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+ bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
* secondary superblocks, so allow this usage to continue because
* we never read counters from such superblocks.
*/
- if (XFS_BUF_ADDR(bp) == XFS_SB_DADDR && !sbp->sb_inprogress &&
+ if (xfs_buf_daddr(bp) == XFS_SB_DADDR && !sbp->sb_inprogress &&
(sbp->sb_fdblocks > sbp->sb_dblocks ||
!xfs_verify_icount(mp, sbp->sb_icount) ||
sbp->sb_ifree > sbp->sb_icount)) {
bp->b_length = nbblks;
bp->b_error = 0;
- return libxfs_readbufr(log->l_dev, XFS_BUF_ADDR(bp), bp, nbblks, 0);
+ return libxfs_readbufr(log->l_dev, xfs_buf_daddr(bp), bp, nbblks, 0);
}
int
}
xfs_buf_set_daddr(bp, daddr);
- bp->b_maps[0].bm_bn = daddr;
return bp;
}
}
pftrace("readbuf %p (%llu, %d) in AG %d", bplist[bp_index],
- (long long)XFS_BUF_ADDR(bplist[bp_index]),
+ (long long)xfs_buf_daddr(bplist[bp_index]),
bplist[bp_index]->b_length, agno);
bplist[bp_index]->b_ops = &xfs_inode_buf_ops;
pftrace("put/writebuf %p (%llu) in AG %d",
bplist[bp_index], (long long)
- XFS_BUF_ADDR(bplist[bp_index]), agno);
+ xfs_buf_daddr(bplist[bp_index]), agno);
if (dirty && !no_modify) {
libxfs_buf_mark_dirty(bplist[bp_index]);
pftrace("getbuf %c %p (%llu) in AG %d (fsbno = %lu) added to queue"
"(inode_bufs_queued = %d, last_bno = %lu)", B_IS_INODE(flag) ?
- 'I' : 'M', bp, (long long)XFS_BUF_ADDR(bp), args->agno, fsbno,
+ 'I' : 'M', bp, (long long)xfs_buf_daddr(bp), args->agno, fsbno,
args->inode_bufs_queued, args->last_bno_read);
pf_start_processing(args);
* otherwise, find as many close together blocks and
* read them in one read
*/
- first_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[0]));
- last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
+ first_off = LIBXFS_BBTOOFF64(xfs_buf_daddr(bplist[0]));
+ last_off = LIBXFS_BBTOOFF64(xfs_buf_daddr(bplist[num-1])) +
BBTOB(bplist[num-1]->b_length);
while (num > 1 && last_off - first_off > pf_max_bytes) {
num--;
- last_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[num-1])) +
+ last_off = LIBXFS_BBTOOFF64(xfs_buf_daddr(bplist[num-1])) +
BBTOB(bplist[num-1]->b_length);
}
if (num < ((last_off - first_off) >> (mp->m_sb.sb_blocklog + 3))) {
*/
last_off = first_off + BBTOB(bplist[0]->b_length);
for (i = 1; i < num; i++) {
- next_off = LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[i])) +
+ next_off = LIBXFS_BBTOOFF64(xfs_buf_daddr(bplist[i])) +
BBTOB(bplist[i]->b_length);
if (next_off - last_off > pf_batch_bytes)
break;
for (i = 0; i < num; i++) {
if (btree_delete(args->io_queue, XFS_DADDR_TO_FSB(mp,
- XFS_BUF_ADDR(bplist[i]))) == NULL)
+ xfs_buf_daddr(bplist[i]))) == NULL)
do_error(_("prefetch corruption\n"));
}
}
#ifdef XR_PF_TRACE
pftrace("reading bbs %llu to %llu (%d bufs) from %s queue in AG %d (last_bno = %lu, inode_bufs = %d)",
- (long long)XFS_BUF_ADDR(bplist[0]),
- (long long)XFS_BUF_ADDR(bplist[num-1]), num,
+ (long long)xfs_buf_daddr(bplist[0]),
+ (long long)xfs_buf_daddr(bplist[num-1]), num,
(which != PF_SECONDARY) ? "pri" : "sec", args->agno,
args->last_bno_read, args->inode_bufs_queued);
#endif
*/
for (i = 0; i < num; i++) {
- pbuf = ((char *)buf) + (LIBXFS_BBTOOFF64(XFS_BUF_ADDR(bplist[i])) - first_off);
+ pbuf = ((char *)buf) + (LIBXFS_BBTOOFF64(xfs_buf_daddr(bplist[i])) - first_off);
size = BBTOB(bplist[i]->b_length);
if (len < size)
break;
pftrace("putbuf %c %p (%llu) in AG %d",
B_IS_INODE(libxfs_buf_priority(bplist[i])) ?
'I' : 'M',
- bplist[i], (long long)XFS_BUF_ADDR(bplist[i]),
+ bplist[i], (long long)xfs_buf_daddr(bplist[i]),
args->agno);
libxfs_buf_relse(bplist[i]);
}