#include "xfs_zones.h"
static bool
-xfs_zone_validate_empty(
+xfs_validate_blk_zone_seq(
+ struct xfs_mount *mp,
struct blk_zone *zone,
- struct xfs_rtgroup *rtg,
+ unsigned int zone_no,
xfs_rgblock_t *write_pointer)
{
- struct xfs_mount *mp = rtg_mount(rtg);
-
- if (rtg_rmap(rtg)->i_used_blocks > 0) {
- xfs_warn(mp, "empty zone %u has non-zero used counter (0x%x).",
- rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
- return false;
- }
-
- *write_pointer = 0;
- return true;
-}
-
-static bool
-xfs_zone_validate_wp(
- struct blk_zone *zone,
- struct xfs_rtgroup *rtg,
- xfs_rgblock_t *write_pointer)
-{
- struct xfs_mount *mp = rtg_mount(rtg);
- xfs_rtblock_t wp_fsb = xfs_daddr_to_rtb(mp, zone->wp);
-
- if (rtg_rmap(rtg)->i_used_blocks > rtg->rtg_extents) {
- xfs_warn(mp, "zone %u has too large used counter (0x%x).",
- rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
- return false;
- }
-
- if (xfs_rtb_to_rgno(mp, wp_fsb) != rtg_rgno(rtg)) {
- xfs_warn(mp, "zone %u write pointer (0x%llx) outside of zone.",
- rtg_rgno(rtg), wp_fsb);
- return false;
- }
-
- *write_pointer = xfs_rtb_to_rgbno(mp, wp_fsb);
- if (*write_pointer >= rtg->rtg_extents) {
- xfs_warn(mp, "zone %u has invalid write pointer (0x%x).",
- rtg_rgno(rtg), *write_pointer);
- return false;
- }
-
- return true;
-}
-
-static bool
-xfs_zone_validate_full(
- struct blk_zone *zone,
- struct xfs_rtgroup *rtg,
- xfs_rgblock_t *write_pointer)
-{
- struct xfs_mount *mp = rtg_mount(rtg);
-
- if (rtg_rmap(rtg)->i_used_blocks > rtg->rtg_extents) {
- xfs_warn(mp, "zone %u has too large used counter (0x%x).",
- rtg_rgno(rtg), rtg_rmap(rtg)->i_used_blocks);
- return false;
- }
-
- *write_pointer = rtg->rtg_extents;
- return true;
-}
-
-static bool
-xfs_zone_validate_seq(
- struct blk_zone *zone,
- struct xfs_rtgroup *rtg,
- xfs_rgblock_t *write_pointer)
-{
- struct xfs_mount *mp = rtg_mount(rtg);
-
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
- return xfs_zone_validate_empty(zone, rtg, write_pointer);
+ *write_pointer = 0;
+ return true;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
case BLK_ZONE_COND_ACTIVE:
- return xfs_zone_validate_wp(zone, rtg, write_pointer);
+ if (zone->wp < zone->start ||
+ zone->wp >= zone->start + zone->capacity) {
+ xfs_warn(mp,
+ "zone %u write pointer (%llu) outside of zone.",
+ zone_no, zone->wp);
+ return false;
+ }
+
+ *write_pointer = XFS_BB_TO_FSB(mp, zone->wp - zone->start);
+ return true;
case BLK_ZONE_COND_FULL:
- return xfs_zone_validate_full(zone, rtg, write_pointer);
+ *write_pointer = XFS_BB_TO_FSB(mp, zone->capacity);
+ return true;
case BLK_ZONE_COND_NOT_WP:
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
xfs_warn(mp, "zone %u has unsupported zone condition 0x%x.",
- rtg_rgno(rtg), zone->cond);
+ zone_no, zone->cond);
return false;
default:
xfs_warn(mp, "zone %u has unknown zone condition 0x%x.",
- rtg_rgno(rtg), zone->cond);
+ zone_no, zone->cond);
return false;
}
}
static bool
-xfs_zone_validate_conv(
+xfs_validate_blk_zone_conv(
+ struct xfs_mount *mp,
struct blk_zone *zone,
- struct xfs_rtgroup *rtg)
+ unsigned int zone_no)
{
- struct xfs_mount *mp = rtg_mount(rtg);
-
switch (zone->cond) {
case BLK_ZONE_COND_NOT_WP:
return true;
default:
xfs_warn(mp,
"conventional zone %u has unsupported zone condition 0x%x.",
- rtg_rgno(rtg), zone->cond);
+ zone_no, zone->cond);
return false;
}
}
bool
-xfs_zone_validate(
+xfs_validate_blk_zone(
+ struct xfs_mount *mp,
struct blk_zone *zone,
- struct xfs_rtgroup *rtg,
+ unsigned int zone_no,
+ uint32_t expected_size,
+ uint32_t expected_capacity,
xfs_rgblock_t *write_pointer)
{
- struct xfs_mount *mp = rtg_mount(rtg);
- struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG];
- uint32_t expected_size;
-
/*
* Check that the zone capacity matches the rtgroup size stored in the
* superblock. Note that all zones including the last one must have a
* uniform capacity.
*/
- if (XFS_BB_TO_FSB(mp, zone->capacity) != g->blocks) {
+ if (XFS_BB_TO_FSB(mp, zone->capacity) != expected_capacity) {
xfs_warn(mp,
-"zone %u capacity (0x%llx) does not match RT group size (0x%x).",
- rtg_rgno(rtg), XFS_BB_TO_FSB(mp, zone->capacity),
- g->blocks);
+"zone %u capacity (%llu) does not match RT group size (%u).",
+ zone_no, XFS_BB_TO_FSB(mp, zone->capacity),
+ expected_capacity);
return false;
}
- if (g->has_daddr_gaps) {
- expected_size = 1 << g->blklog;
- } else {
- if (zone->len != zone->capacity) {
- xfs_warn(mp,
-"zone %u has capacity != size ((0x%llx vs 0x%llx)",
- rtg_rgno(rtg),
- XFS_BB_TO_FSB(mp, zone->len),
- XFS_BB_TO_FSB(mp, zone->capacity));
- return false;
- }
- expected_size = g->blocks;
- }
-
if (XFS_BB_TO_FSB(mp, zone->len) != expected_size) {
xfs_warn(mp,
-"zone %u length (0x%llx) does match geometry (0x%x).",
- rtg_rgno(rtg), XFS_BB_TO_FSB(mp, zone->len),
+"zone %u length (%llu) does not match geometry (%u).",
+ zone_no, XFS_BB_TO_FSB(mp, zone->len),
expected_size);
+ return false;
}
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
- return xfs_zone_validate_conv(zone, rtg);
+ return xfs_validate_blk_zone_conv(mp, zone, zone_no);
case BLK_ZONE_TYPE_SEQWRITE_REQ:
- return xfs_zone_validate_seq(zone, rtg, write_pointer);
+ return xfs_validate_blk_zone_seq(mp, zone, zone_no,
+ write_pointer);
default:
xfs_warn(mp, "zoned %u has unsupported type 0x%x.",
- rtg_rgno(rtg), zone->type);
+ zone_no, zone->type);
return false;
}
}
struct xfs_init_zones {
struct xfs_mount *mp;
+ uint32_t zone_size;
+ uint32_t zone_capacity;
uint64_t available;
uint64_t reclaimable;
};
/*
* For sequential write required zones, we restart writing at the hardware write
- * pointer returned by xfs_zone_validate().
+ * pointer returned by xfs_validate_blk_zone().
*
* For conventional zones or conventional devices we have to query the rmap to
* find the highest recorded block and set the write pointer to the block after
uint32_t used = rtg_rmap(rtg)->i_used_blocks;
int error;
+ if (write_pointer > rtg->rtg_extents) {
+ xfs_warn(mp, "zone %u has invalid write pointer (0x%x).",
+ rtg_rgno(rtg), write_pointer);
+ return -EFSCORRUPTED;
+ }
+
+ if (used > rtg->rtg_extents) {
+ xfs_warn(mp,
+"zone %u has used counter (0x%x) larger than zone capacity (0x%llx).",
+ rtg_rgno(rtg), used, rtg->rtg_extents);
+ return -EFSCORRUPTED;
+ }
+
+ if (write_pointer == 0 && used != 0) {
+ xfs_warn(mp, "empty zone %u has non-zero used counter (0x%x).",
+ rtg_rgno(rtg), used);
+ return -EFSCORRUPTED;
+ }
+
/*
* If there are no used blocks, but the zone is not in empty state yet
* we lost power before the zoned reset. In that case finish the work
xfs_warn(mp, "realtime group not found for zone %u.", rgno);
return -EFSCORRUPTED;
}
- if (!xfs_zone_validate(zone, rtg, &write_pointer)) {
+ if (!xfs_validate_blk_zone(mp, zone, idx, iz->zone_size,
+ iz->zone_capacity, &write_pointer)) {
xfs_rtgroup_rele(rtg);
return -EFSCORRUPTED;
}
{
struct xfs_init_zones iz = {
.mp = mp,
+ .zone_capacity = mp->m_groups[XG_TYPE_RTG].blocks,
+ .zone_size = xfs_rtgroup_raw_size(mp),
};
struct xfs_buftarg *bt = mp->m_rtdev_targp;
xfs_extlen_t zone_blocks = mp->m_groups[XG_TYPE_RTG].blocks;