/* rmap buffer target for btree */
struct xfs_buftarg *ar_xmbtp;
- /* rmap observations, p4 */
- struct xfs_slab *ar_rmaps;
+ /* rmaps for rebuilt ag btrees */
+ struct xfs_slab *ar_agbtree_rmaps;
- /* unmerged rmaps */
- struct xfs_slab *ar_raw_rmaps;
+ /* refcount items, p4-5 */
+ struct xfs_slab *ar_refcount_items;
/* agfl entries from leftover agbt allocations */
int ar_flcount;
-
- /* last rmap seen */
- struct xfs_rmap_irec ar_last_rmap;
-
- /* refcount items, p4-5 */
- struct xfs_slab *ar_refcount_items;
};
/* Only the parts of struct xfs_rmap_irec that we need to compute refcounts. */
struct xfs_mount *mp,
struct xfs_ag_rmap *ag_rmap)
{
+ free_slab(&ag_rmap->ar_agbtree_rmaps);
free_slab(&ag_rmap->ar_refcount_items);
if (!rmaps_has_observations(ag_rmap))
if (error)
goto nomem;
+ error = init_slab(&ag_rmap->ar_agbtree_rmaps,
+ sizeof(struct xfs_rmap_irec));
+ if (error)
+ goto nomem;
+
return;
nomem:
do_error(
struct xfs_mount *mp)
{
xfs_agnumber_t i;
- int error;
if (!rmap_needs_work(mp))
return;
if (!ag_rmaps)
do_error(_("couldn't allocate per-AG reverse map roots\n"));
- for (i = 0; i < mp->m_sb.sb_agcount; i++) {
+ for (i = 0; i < mp->m_sb.sb_agcount; i++)
rmaps_init_ag(mp, i, &ag_rmaps[i]);
-
- error = init_slab(&ag_rmaps[i].ar_rmaps,
- sizeof(struct xfs_rmap_irec));
- if (error)
- do_error(
-_("Insufficient memory while allocating reverse mapping slabs."));
- error = init_slab(&ag_rmaps[i].ar_raw_rmaps,
- sizeof(struct xfs_rmap_irec));
- if (error)
- do_error(
-_("Insufficient memory while allocating raw metadata reverse mapping slabs."));
- ag_rmaps[i].ar_last_rmap.rm_owner = XFS_RMAP_OWN_UNKNOWN;
- }
}
/*
if (!rmap_needs_work(mp))
return;
- for (i = 0; i < mp->m_sb.sb_agcount; i++) {
- free_slab(&ag_rmaps[i].ar_rmaps);
- free_slab(&ag_rmaps[i].ar_raw_rmaps);
+ for (i = 0; i < mp->m_sb.sb_agcount; i++)
rmaps_destroy(mp, &ag_rmaps[i]);
- }
free(ag_rmaps);
ag_rmaps = NULL;
}
* Add an observation about a block mapping in an inode's data or attribute
* fork for later btree reconstruction.
*/
-int
+void
rmap_add_rec(
struct xfs_mount *mp,
xfs_ino_t ino,
struct xfs_rmap_irec rmap;
xfs_agnumber_t agno;
xfs_agblock_t agbno;
- struct xfs_rmap_irec *last_rmap;
- int error = 0;
if (!rmap_needs_work(mp))
- return 0;
+ return;
agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
rmap.rm_flags |= XFS_RMAP_UNWRITTEN;
rmap_add_mem_rec(mp, agno, &rmap);
-
- last_rmap = &ag_rmaps[agno].ar_last_rmap;
- if (last_rmap->rm_owner == XFS_RMAP_OWN_UNKNOWN)
- *last_rmap = rmap;
- else if (rmaps_are_mergeable(last_rmap, &rmap))
- last_rmap->rm_blockcount += rmap.rm_blockcount;
- else {
- error = slab_add(ag_rmaps[agno].ar_rmaps, last_rmap);
- if (error)
- return error;
- *last_rmap = rmap;
- }
-
- return error;
-}
-
-/* Finish collecting inode data/attr fork rmaps. */
-int
-rmap_finish_collecting_fork_recs(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- if (!rmap_needs_work(mp) ||
- ag_rmaps[agno].ar_last_rmap.rm_owner == XFS_RMAP_OWN_UNKNOWN)
- return 0;
- return slab_add(ag_rmaps[agno].ar_rmaps, &ag_rmaps[agno].ar_last_rmap);
}
/* add a raw rmap; these will be merged later */
-static int
+static void
__rmap_add_raw_rec(
struct xfs_mount *mp,
xfs_agnumber_t agno,
rmap.rm_blockcount = len;
rmap_add_mem_rec(mp, agno, &rmap);
- return slab_add(ag_rmaps[agno].ar_raw_rmaps, &rmap);
}
/*
* Add a reverse mapping for an inode fork's block mapping btree block.
*/
-int
+void
rmap_add_bmbt_rec(
struct xfs_mount *mp,
xfs_ino_t ino,
xfs_agblock_t agbno;
if (!rmap_needs_work(mp))
- return 0;
+ return;
agno = XFS_FSB_TO_AGNO(mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(mp, fsbno);
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(agbno + 1 <= mp->m_sb.sb_agblocks);
- return __rmap_add_raw_rec(mp, agno, agbno, 1, ino,
- whichfork == XFS_ATTR_FORK, true);
+ __rmap_add_raw_rec(mp, agno, agbno, 1, ino, whichfork == XFS_ATTR_FORK,
+ true);
}
/*
* Add a reverse mapping for a per-AG fixed metadata extent.
*/
-int
+STATIC void
rmap_add_ag_rec(
struct xfs_mount *mp,
xfs_agnumber_t agno,
uint64_t owner)
{
if (!rmap_needs_work(mp))
- return 0;
+ return;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
- return __rmap_add_raw_rec(mp, agno, agbno, len, owner, false, false);
+ __rmap_add_raw_rec(mp, agno, agbno, len, owner, false, false);
}
/*
assert(libxfs_verify_agbext(pag, agbno, len));
libxfs_perag_put(pag);
- return slab_add(ag_rmaps[agno].ar_raw_rmaps, &rmap);
-}
-
-/*
- * Merge adjacent raw rmaps and add them to the main rmap list.
- */
-int
-rmap_fold_raw_recs(
- struct xfs_mount *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_slab_cursor *cur = NULL;
- struct xfs_rmap_irec *prev, *rec;
- uint64_t old_sz;
- int error = 0;
-
- old_sz = slab_count(ag_rmaps[agno].ar_rmaps);
- if (slab_count(ag_rmaps[agno].ar_raw_rmaps) == 0)
- goto no_raw;
- qsort_slab(ag_rmaps[agno].ar_raw_rmaps, rmap_compare);
- error = init_slab_cursor(ag_rmaps[agno].ar_raw_rmaps, rmap_compare,
- &cur);
- if (error)
- goto err;
-
- prev = pop_slab_cursor(cur);
- rec = pop_slab_cursor(cur);
- while (prev && rec) {
- if (rmaps_are_mergeable(prev, rec)) {
- prev->rm_blockcount += rec->rm_blockcount;
- rec = pop_slab_cursor(cur);
- continue;
- }
- error = slab_add(ag_rmaps[agno].ar_rmaps, prev);
- if (error)
- goto err;
- prev = rec;
- rec = pop_slab_cursor(cur);
- }
- if (prev) {
- error = slab_add(ag_rmaps[agno].ar_rmaps, prev);
- if (error)
- goto err;
- }
- free_slab(&ag_rmaps[agno].ar_raw_rmaps);
- error = init_slab(&ag_rmaps[agno].ar_raw_rmaps,
- sizeof(struct xfs_rmap_irec));
- if (error)
- do_error(
-_("Insufficient memory while allocating raw metadata reverse mapping slabs."));
-no_raw:
- if (old_sz)
- qsort_slab(ag_rmaps[agno].ar_rmaps, rmap_compare);
-err:
- free_slab_cursor(&cur);
- return error;
+ return slab_add(ag_rmaps[agno].ar_agbtree_rmaps, &rmap);
}
static int
* Add an allocation group's fixed metadata to the rmap list. This includes
* sb/agi/agf/agfl headers, inode chunks, and the log.
*/
-int
+void
rmap_add_fixed_ag_rec(
struct xfs_mount *mp,
xfs_agnumber_t agno)
xfs_agblock_t agbno;
ino_tree_node_t *ino_rec;
xfs_agino_t agino;
- int error;
int startidx;
int nr;
if (!rmap_needs_work(mp))
- return 0;
+ return;
/* sb/agi/agf/agfl headers */
- error = rmap_add_ag_rec(mp, agno, 0, XFS_BNO_BLOCK(mp),
- XFS_RMAP_OWN_FS);
- if (error)
- goto out;
+ rmap_add_ag_rec(mp, agno, 0, XFS_BNO_BLOCK(mp), XFS_RMAP_OWN_FS);
/* inodes */
ino_rec = findfirst_inode_rec(agno);
agino = ino_rec->ino_startnum + startidx;
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
if (XFS_AGINO_TO_OFFSET(mp, agino) == 0) {
- error = rmap_add_ag_rec(mp, agno, agbno, nr,
+ rmap_add_ag_rec(mp, agno, agbno, nr,
XFS_RMAP_OWN_INODES);
- if (error)
- goto out;
}
}
fsbno = mp->m_sb.sb_logstart;
if (fsbno && XFS_FSB_TO_AGNO(mp, fsbno) == agno) {
agbno = XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart);
- error = rmap_add_ag_rec(mp, agno, agbno, mp->m_sb.sb_logblocks,
+ rmap_add_ag_rec(mp, agno, agbno, mp->m_sb.sb_logblocks,
XFS_RMAP_OWN_LOG);
- if (error)
- goto out;
}
-out:
- return error;
}
/*
if (!xfs_has_rmapbt(mp))
return 0;
- /* Release the ar_rmaps; they were put into the rmapbt during p5. */
- free_slab(&ag_rmap->ar_rmaps);
- error = init_slab(&ag_rmap->ar_rmaps, sizeof(struct xfs_rmap_irec));
- if (error)
- goto err;
-
/* Add the AGFL blocks to the rmap list */
error = -libxfs_trans_read_buf(
mp, NULL, mp->m_ddev_targp,
* space btree blocks, so we must be careful not to create those
* records again. Create a bitmap of already-recorded OWN_AG rmaps.
*/
- error = init_slab_cursor(ag_rmap->ar_raw_rmaps, rmap_compare, &rm_cur);
+ error = init_slab_cursor(ag_rmap->ar_agbtree_rmaps, rmap_compare,
+ &rm_cur);
if (error)
goto err;
error = -bitmap_alloc(&own_ag_bitmap);
agbno = be32_to_cpu(*b);
if (!bitmap_test(own_ag_bitmap, agbno, 1)) {
- error = rmap_add_ag_rec(mp, agno, agbno, 1,
+ error = rmap_add_agbtree_mapping(mp, agno, agbno, 1,
XFS_RMAP_OWN_AG);
if (error)
goto err;
agflbp = NULL;
bitmap_free(&own_ag_bitmap);
- /* Merge all the raw rmaps into the main list */
- error = rmap_fold_raw_recs(mp, agno);
- if (error)
- goto err;
-
/* Create cursors to rmap structures */
- error = init_slab_cursor(ag_rmap->ar_rmaps, rmap_compare, &rm_cur);
+ error = init_slab_cursor(ag_rmap->ar_agbtree_rmaps, rmap_compare,
+ &rm_cur);
if (error)
goto err;
if (!xfs_has_reflink(mp))
return 0;
+ if (!rmaps_has_observations(&ag_rmaps[agno]))
+ return 0;
error = rmap_init_mem_cursor(mp, NULL, agno, &rmcur);
if (error)
return nr;
}
-/*
- * Return a slab cursor that will return rmap objects in order.
- */
-int
-rmap_init_cursor(
- xfs_agnumber_t agno,
- struct xfs_slab_cursor **cur)
-{
- return init_slab_cursor(ag_rmaps[agno].ar_rmaps, rmap_compare, cur);
-}
-
/*
* Disable the refcount btree check.
*/