xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_SPINODES);
}
+static inline bool xfs_sb_version_haszoned(struct xfs_sb *sbp)
+{
+ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_ZONED);
+}
+
#endif /* __LIBXFS_H__ */
* rtgroup, so this mask must be 64-bit.
*/
uint64_t blkmask;
+
+ /*
+ * Start of the first group in the device. This is used to support a
+ * RT device following the data device on the same block device for
+ * SMR hard drives.
+ */
+ xfs_fsblock_t start_fsb;
};
/*
progname);
exit(1);
}
- if (xi->rt.dev &&
+ if ((xi->rt.dev || xi->rt.dev == xi->data.dev) &&
(mp->m_rtdev_targp->bt_bdev != xi->rt.dev ||
mp->m_rtdev_targp->bt_mount != mp)) {
fprintf(stderr,
else
mp->m_logdev_targp = libxfs_buftarg_alloc(mp, xi, &xi->log,
lfail);
- mp->m_rtdev_targp = libxfs_buftarg_alloc(mp, xi, &xi->rt, rfail);
+ if (!xi->rt.dev || xi->rt.dev == xi->data.dev)
+ mp->m_rtdev_targp = mp->m_ddev_targp;
+ else
+ mp->m_rtdev_targp = libxfs_buftarg_alloc(mp, xi, &xi->rt,
+ rfail);
}
/* Compute maximum possible height for per-AG btree types for this fs. */
error = err2;
}
- if (mp->m_rtdev_targp) {
+ if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp) {
err2 = libxfs_flush_buftarg(mp->m_rtdev_targp,
_("realtime device"));
if (!error)
free(mp->m_fsname);
mp->m_fsname = NULL;
- libxfs_buftarg_free(mp->m_rtdev_targp);
+ if (mp->m_rtdev_targp != mp->m_ddev_targp)
+ libxfs_buftarg_free(mp->m_rtdev_targp);
if (mp->m_logdev_targp != mp->m_ddev_targp)
libxfs_buftarg_free(mp->m_logdev_targp);
libxfs_buftarg_free(mp->m_ddev_targp);
if (!mp->m_rtdev_targp->bt_bdev)
return NULL;
+ ASSERT(!mp->m_sb.sb_rtstart);
+
error = libxfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR,
XFS_FSB_TO_BB(mp, 1), 0, &bp, &xfs_rtsb_buf_ops);
if (error)
xfs_agblock_t gbno)
{
struct xfs_mount *mp = xg->xg_mount;
- uint32_t blocks = mp->m_groups[xg->xg_type].blocks;
+ struct xfs_groups *g = &mp->m_groups[xg->xg_type];
+ xfs_fsblock_t fsbno;
- return XFS_FSB_TO_BB(mp, (xfs_fsblock_t)xg->xg_gno * blocks + gbno);
+ fsbno = (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno;
+ return XFS_FSB_TO_BB(mp, g->start_fsb + fsbno);
}
static inline uint32_t
xfs_rgnumber_t rgno = xfs_rtb_to_rgno(mp, rtbno);
uint64_t start_bno = (xfs_rtblock_t)rgno * g->blocks;
- return XFS_FSB_TO_BB(mp, start_bno + (rtbno & g->blkmask));
+ return XFS_FSB_TO_BB(mp,
+ g->start_fsb + start_bno + (rtbno & g->blkmask));
}
static inline xfs_rtblock_t
struct xfs_mount *mp,
xfs_daddr_t daddr)
{
- xfs_rfsblock_t bno = XFS_BB_TO_FSBT(mp, daddr);
+ struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG];
+ xfs_rfsblock_t bno;
+ bno = XFS_BB_TO_FSBT(mp, daddr) - g->start_fsb;
if (xfs_has_rtgroups(mp)) {
- struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG];
xfs_rgnumber_t rgno;
uint32_t rgbno;
rgs->blocks = sbp->sb_rgextents * sbp->sb_rextsize;
rgs->blklog = mp->m_sb.sb_rgblklog;
rgs->blkmask = xfs_mask32lo(mp->m_sb.sb_rgblklog);
+ rgs->start_fsb = mp->m_sb.sb_rtstart;
} else {
rgs->blocks = 0;
rgs->blklog = 0;
*
* size is the size of data which is valid for this sb.
*/
- if (xfs_sb_version_hasmetadir(sb))
+ if (xfs_sb_version_haszoned(sb))
+ size = offsetofend(struct xfs_dsb, sb_rtreserved);
+ else if (xfs_sb_version_hasmetadir(sb))
size = offsetofend(struct xfs_dsb, sb_pad);
else if (xfs_sb_version_hasmetauuid(sb))
size = offsetofend(struct xfs_dsb, sb_meta_uuid);