.keys_contiguous = xfs_rmapbt_keys_contiguous,
};
-static struct xfs_btree_cur *
-xfs_rmapbt_init_common(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_perag *pag)
-{
- struct xfs_btree_cur *cur;
-
- cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
- mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
- cur->bc_ag.pag = xfs_perag_hold(pag);
- return cur;
-}
-
-/* Create a new reverse mapping btree cursor. */
+/*
+ * Create a new reverse mapping btree cursor.
+ *
+ * For staging cursors tp and agbp are NULL.
+ */
struct xfs_btree_cur *
xfs_rmapbt_init_cursor(
struct xfs_mount *mp,
struct xfs_buf *agbp,
struct xfs_perag *pag)
{
- struct xfs_agf *agf = agbp->b_addr;
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, tp, pag);
- cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+ cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
+ mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
+ cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.agbp = agbp;
+ if (agbp) {
+ struct xfs_agf *agf = agbp->b_addr;
+
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+ }
return cur;
}
{
struct xfs_btree_cur *cur;
- cur = xfs_rmapbt_init_common(mp, NULL, pag);
+ cur = xfs_rmapbt_init_cursor(mp, NULL, NULL, pag);
xfs_btree_stage_afakeroot(cur, afake);
return cur;
}