From e33839b514a8af27ba03f9f2a414d154aa980320 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 30 Jan 2026 06:19:26 +0100 Subject: [PATCH] xfs: add sysfs stats for zoned GC Add counters of read, write and zone_reset operations as well as GC written bytes to sysfs. This way they can be easily used for monitoring tools and test cases. Signed-off-by: Christoph Hellwig Reviewed-by: Carlos Maiolino Reviewed-by: Darrick J. Wong Reviewed-by: Hans Holmberg Signed-off-by: Carlos Maiolino --- fs/xfs/xfs_stats.c | 6 +++++- fs/xfs/xfs_stats.h | 6 ++++++ fs/xfs/xfs_zone_gc.c | 7 +++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c index 3fe1f5412537..017db0361cd8 100644 --- a/fs/xfs/xfs_stats.c +++ b/fs/xfs/xfs_stats.c @@ -24,6 +24,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) uint64_t xs_write_bytes = 0; uint64_t xs_read_bytes = 0; uint64_t xs_defer_relog = 0; + uint64_t xs_gc_bytes = 0; static const struct xstats_entry { char *desc; @@ -57,7 +58,8 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) { "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) }, { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)}, /* we print both series of quota information together */ - { "qm", xfsstats_offset(xs_xstrat_bytes)}, + { "qm", xfsstats_offset(xs_gc_read_calls)}, + { "zoned", xfsstats_offset(__pad1)}, }; /* Loop over all stats groups */ @@ -77,6 +79,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes; xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes; xs_defer_relog += per_cpu_ptr(stats, i)->s.xs_defer_relog; + xs_gc_bytes += per_cpu_ptr(stats, i)->s.xs_gc_bytes; } len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n", @@ -89,6 +92,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) #else 0); #endif + len += scnprintf(buf + len, PATH_MAX-len, "gc xpc %llu\n", xs_gc_bytes); return len; } diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h index d86c6ce35010..153d2381d0a8 100644 --- a/fs/xfs/xfs_stats.h +++ b/fs/xfs/xfs_stats.h @@ -138,11 +138,17 @@ struct __xfsstats { uint32_t xs_qm_dqwants; uint32_t xs_qm_dquot; uint32_t xs_qm_dquot_unused; +/* Zone GC counters */ + uint32_t xs_gc_read_calls; + uint32_t xs_gc_write_calls; + uint32_t xs_gc_zone_reset_calls; + uint32_t __pad1; /* Extra precision counters */ uint64_t xs_xstrat_bytes; uint64_t xs_write_bytes; uint64_t xs_read_bytes; uint64_t xs_defer_relog; + uint64_t xs_gc_bytes; }; #define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t)) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 570102184904..1f1f9fc973af 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -712,6 +712,8 @@ xfs_zone_gc_start_chunk( data->scratch_head = (data->scratch_head + len) % data->scratch_size; data->scratch_available -= len; + XFS_STATS_INC(mp, xs_gc_read_calls); + WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); list_add_tail(&chunk->entry, &data->reading); xfs_zone_gc_iter_advance(iter, irec.rm_blockcount); @@ -815,6 +817,9 @@ xfs_zone_gc_write_chunk( return; } + XFS_STATS_INC(mp, xs_gc_write_calls); + XFS_STATS_ADD(mp, xs_gc_bytes, chunk->len); + WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); list_move_tail(&chunk->entry, &data->writing); @@ -911,6 +916,8 @@ xfs_submit_zone_reset_bio( return; } + XFS_STATS_INC(mp, xs_gc_zone_reset_calls); + bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) { /* -- 2.47.3