validate_extsize(mp, dino, lino, dirty);
- if (dino->di_version >= 3)
+ if (dino->di_version >= 3 &&
+ (!xfs_has_zoned(mp) ||
+ dino->di_metatype != cpu_to_be16(XFS_METAFILE_RTRMAP)))
validate_cowextsize(mp, dino, lino, dirty);
/* nsec fields cannot be larger than 1 billion */
check_rtmetadata(
struct xfs_mount *mp)
{
+ if (xfs_has_zoned(mp)) {
+ /*
+ * Here we could/should verify the zone state a bit when we are
+ * on actual zoned devices:
+ * - compare hw write pointer to last written
+ * - compare zone state to last written
+ *
+ * Note much we can do when running in zoned mode on a
+ * conventional device.
+ */
+ return;
+ }
+
generate_rtinfo(mp);
check_rtbitmap(mp);
check_rtsummary(mp);
return;
while ((rtg = xfs_rtgroup_next(mp, rtg))) {
- ensure_rtgroup_bitmap(rtg);
- ensure_rtgroup_summary(rtg);
+ if (!xfs_has_zoned(mp)) {
+ ensure_rtgroup_bitmap(rtg);
+ ensure_rtgroup_summary(rtg);
+ }
ensure_rtgroup_rmapbt(rtg, est_fdblocks);
ensure_rtgroup_refcountbt(rtg, est_fdblocks);
}
xfs_fileoff_t bno = 0;
int error;
+ ASSERT(!xfs_has_zoned(mp));
+
if (!ip) {
do_warn(_("unable to open %s file\n"), filename);
return;
return error;
}
+static void
+rtgroup_update_counters(
+ struct xfs_rtgroup *rtg)
+{
+ struct xfs_inode *rmapip = rtg->rtg_inodes[XFS_RTGI_RMAP];
+ struct xfs_mount *mp = rtg_mount(rtg);
+ uint64_t end =
+ xfs_rtbxlen_to_blen(mp, rtg->rtg_extents);
+ xfs_agblock_t gbno = 0;
+ uint64_t used = 0;
+
+ do {
+ int bstate;
+ xfs_extlen_t blen;
+
+ bstate = get_bmap_ext(rtg_rgno(rtg), gbno, end, &blen, true);
+ switch (bstate) {
+ case XR_E_INUSE:
+ case XR_E_INUSE_FS:
+ used += blen;
+ break;
+ default:
+ break;
+ }
+
+ gbno += blen;
+ } while (gbno < end);
+
+ rmapip->i_used_blocks = used;
+}
+
/* Update the inode counters. */
STATIC int
xrep_rtrmap_reset_counters(
* generated.
*/
sc->ip->i_nblocks = rr->new_fork_info.ifake.if_blocks;
+ if (xfs_has_zoned(sc->mp))
+ rtgroup_update_counters(rr->rtg);
libxfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE);
/* Quotas don't exist so we're done. */
* Done with the block usage maps, toss them. Realtime metadata aren't
* rebuilt until phase 6, so we have to keep them around.
*/
- if (mp->m_sb.sb_rblocks == 0)
+ if (mp->m_sb.sb_rblocks == 0) {
rmaps_free(mp);
- free_bmaps(mp);
+ free_bmaps(mp);
+ }
if (!bad_ino_btree) {
phase6(mp);
phase_end(mp, 6);
- if (mp->m_sb.sb_rblocks != 0)
+ if (mp->m_sb.sb_rblocks != 0) {
rmaps_free(mp);
+ free_bmaps(mp);
+ }
free_rtgroup_inodes();
phase7(mp, phase2_threads);