struct list_head *a,
struct list_head *b)
{
- struct xfs_mount *mp = priv;
struct xfs_rmap_intent *ra;
struct xfs_rmap_intent *rb;
ra = container_of(a, struct xfs_rmap_intent, ri_list);
rb = container_of(b, struct xfs_rmap_intent, ri_list);
- return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
- XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
+
+ return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
}
/* Get an RUI. */
return NULL;
}
+/* Take a passive ref to the AG containing the space we're rmapping. */
+void
+xfs_rmap_update_get_group(
+ struct xfs_mount *mp,
+ struct xfs_rmap_intent *ri)
+{
+ xfs_agnumber_t agno;
+
+ agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
+ ri->ri_pag = xfs_perag_get(mp, agno);
+}
+
+/* Release a passive AG ref after finishing rmapping work. */
+static inline void
+xfs_rmap_update_put_group(
+ struct xfs_rmap_intent *ri)
+{
+ xfs_perag_put(ri->ri_pag);
+}
+
/* Process a deferred rmap update. */
STATIC int
xfs_rmap_update_finish_item(
int error;
ri = container_of(item, struct xfs_rmap_intent, ri_list);
+
error = xfs_rmap_finish_one(tp, ri, state);
+
+ xfs_rmap_update_put_group(ri);
kmem_cache_free(xfs_rmap_intent_cache, ri);
return error;
}
struct xfs_rmap_intent *ri;
ri = container_of(item, struct xfs_rmap_intent, ri_list);
+
+ xfs_rmap_update_put_group(ri);
kmem_cache_free(xfs_rmap_intent_cache, ri);
}
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_perag *pag;
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
xfs_agblock_t bno;
bool unwritten;
- pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock));
bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
- trace_xfs_rmap_deferred(mp, pag->pag_agno, ri->ri_type, bno,
+ trace_xfs_rmap_deferred(mp, ri->ri_pag->pag_agno, ri->ri_type, bno,
ri->ri_owner, ri->ri_whichfork,
ri->ri_bmap.br_startoff, ri->ri_bmap.br_blockcount,
ri->ri_bmap.br_state);
- if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE)) {
- error = -EIO;
- goto out_drop;
- }
-
+ if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
+ return -EIO;
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
- if (rcur != NULL && rcur->bc_ag.pag != pag) {
+ if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
* rmapbt, because a shape change could cause us to
* allocate blocks.
*/
- error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
+ error = xfs_free_extent_fix_freelist(tp, ri->ri_pag, &agbp);
if (error)
- goto out_drop;
- if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
- error = -EFSCORRUPTED;
- goto out_drop;
- }
+ return error;
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
+ return -EFSCORRUPTED;
- rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
+ rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
}
*pcur = rcur;
ASSERT(0);
error = -EFSCORRUPTED;
}
-out_drop:
- xfs_perag_put(pag);
+
return error;
}
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
+ xfs_rmap_update_get_group(tp->t_mountp, ri);
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
}
int ri_whichfork;
uint64_t ri_owner;
struct xfs_bmbt_irec ri_bmap;
+ struct xfs_perag *ri_pag;
};
+void xfs_rmap_update_get_group(struct xfs_mount *mp,
+ struct xfs_rmap_intent *ri);
+
/* functions for updating the rmapbt based on bmbt map/unmap operations */
void xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);