]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
xfs: add sysfs stats for zoned GC
authorChristoph Hellwig <hch@lst.de>
Fri, 30 Jan 2026 05:19:26 +0000 (06:19 +0100)
committerCarlos Maiolino <cem@kernel.org>
Fri, 30 Jan 2026 09:41:42 +0000 (10:41 +0100)
Add counters of read, write and zone_reset operations as well as
GC written bytes to sysfs.  This way they can be easily used for
monitoring tools and test cases.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Hans Holmberg <hans.holmberg@wdc.com>
Signed-off-by: Carlos Maiolino <cem@kernel.org>
fs/xfs/xfs_stats.c
fs/xfs/xfs_stats.h
fs/xfs/xfs_zone_gc.c

index 3fe1f5412537f2bd8eef443320704eb571355f2e..017db0361cd860e7c3b64eca02f4624b482435d7 100644 (file)
@@ -24,6 +24,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
        uint64_t        xs_write_bytes = 0;
        uint64_t        xs_read_bytes = 0;
        uint64_t        xs_defer_relog = 0;
+       uint64_t        xs_gc_bytes = 0;
 
        static const struct xstats_entry {
                char    *desc;
@@ -57,7 +58,8 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
                { "rtrmapbt_mem",       xfsstats_offset(xs_rtrefcbt_2)  },
                { "rtrefcntbt",         xfsstats_offset(xs_qm_dqreclaims)},
                /* we print both series of quota information together */
-               { "qm",                 xfsstats_offset(xs_xstrat_bytes)},
+               { "qm",                 xfsstats_offset(xs_gc_read_calls)},
+               { "zoned",              xfsstats_offset(__pad1)},
        };
 
        /* Loop over all stats groups */
@@ -77,6 +79,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
                xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
                xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
                xs_defer_relog += per_cpu_ptr(stats, i)->s.xs_defer_relog;
+               xs_gc_bytes += per_cpu_ptr(stats, i)->s.xs_gc_bytes;
        }
 
        len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
@@ -89,6 +92,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
 #else
                0);
 #endif
+       len += scnprintf(buf + len, PATH_MAX-len, "gc xpc %llu\n", xs_gc_bytes);
 
        return len;
 }
index d86c6ce3501070fda20fcf40d59a9a1de40af863..153d2381d0a8575b9a9a195e451de477a522c941 100644 (file)
@@ -138,11 +138,17 @@ struct __xfsstats {
        uint32_t                xs_qm_dqwants;
        uint32_t                xs_qm_dquot;
        uint32_t                xs_qm_dquot_unused;
+/* Zone GC counters */
+       uint32_t                xs_gc_read_calls;
+       uint32_t                xs_gc_write_calls;
+       uint32_t                xs_gc_zone_reset_calls;
+       uint32_t                __pad1;
 /* Extra precision counters */
        uint64_t                xs_xstrat_bytes;
        uint64_t                xs_write_bytes;
        uint64_t                xs_read_bytes;
        uint64_t                xs_defer_relog;
+       uint64_t                xs_gc_bytes;
 };
 
 #define        xfsstats_offset(f)      (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
index 57010218490459d762c2a2d1f2c03f36be122bbc..1f1f9fc973af6bd28e491b36177bbd5cd271134c 100644 (file)
@@ -712,6 +712,8 @@ xfs_zone_gc_start_chunk(
        data->scratch_head = (data->scratch_head + len) % data->scratch_size;
        data->scratch_available -= len;
 
+       XFS_STATS_INC(mp, xs_gc_read_calls);
+
        WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
        list_add_tail(&chunk->entry, &data->reading);
        xfs_zone_gc_iter_advance(iter, irec.rm_blockcount);
@@ -815,6 +817,9 @@ xfs_zone_gc_write_chunk(
                return;
        }
 
+       XFS_STATS_INC(mp, xs_gc_write_calls);
+       XFS_STATS_ADD(mp, xs_gc_bytes, chunk->len);
+
        WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
        list_move_tail(&chunk->entry, &data->writing);
 
@@ -911,6 +916,8 @@ xfs_submit_zone_reset_bio(
                return;
        }
 
+       XFS_STATS_INC(mp, xs_gc_zone_reset_calls);
+
        bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
        if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) {
                /*