kfree(zi);
}
+static int
+xfs_report_zones(
+ struct xfs_mount *mp,
+ struct xfs_init_zones *iz)
+{
+ struct xfs_rtgroup *rtg = NULL;
+
+ while ((rtg = xfs_rtgroup_next(mp, rtg))) {
+ xfs_rgblock_t write_pointer;
+ int error;
+
+ error = xfs_query_write_pointer(iz, rtg, &write_pointer);
+ if (!error)
+ error = xfs_init_zone(iz, rtg, write_pointer);
+ if (error) {
+ xfs_rtgroup_rele(rtg);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
int
xfs_mount_zones(
struct xfs_mount *mp)
.zone_capacity = mp->m_groups[XG_TYPE_RTG].blocks,
.zone_size = xfs_rtgroup_raw_size(mp),
};
- struct xfs_rtgroup *rtg = NULL;
int error;
if (!mp->m_rtdev_targp) {
if (!mp->m_zone_info)
return -ENOMEM;
- xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
- mp->m_sb.sb_rgcount, iz.zone_capacity, mp->m_max_open_zones);
- trace_xfs_zones_mount(mp);
+ error = xfs_report_zones(mp, &iz);
+ if (error)
+ goto out_free_zone_info;
+
+ xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
+ xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
+ iz.available + iz.reclaimable);
/*
* The writeback code switches between inodes regularly to provide
XFS_FSB_TO_B(mp, min(iz.zone_capacity, XFS_MAX_BMBT_EXTLEN)) >>
PAGE_SHIFT;
- while ((rtg = xfs_rtgroup_next(mp, rtg))) {
- xfs_rgblock_t write_pointer;
-
- error = xfs_query_write_pointer(&iz, rtg, &write_pointer);
- if (!error)
- error = xfs_init_zone(&iz, rtg, write_pointer);
- if (error) {
- xfs_rtgroup_rele(rtg);
- goto out_free_zone_info;
- }
- }
-
- xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available);
- xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
- iz.available + iz.reclaimable);
-
/*
* The user may configure GC to free up a percentage of unused blocks.
* By default this is 0. GC will always trigger at the minimum level
error = xfs_zone_gc_mount(mp);
if (error)
goto out_free_zone_info;
+
+ xfs_info(mp, "%u zones of %u blocks (%u max open zones)",
+ mp->m_sb.sb_rgcount, iz.zone_capacity, mp->m_max_open_zones);
+ trace_xfs_zones_mount(mp);
return 0;
out_free_zone_info: