uint64_t xs_write_bytes = 0;
uint64_t xs_read_bytes = 0;
uint64_t xs_defer_relog = 0;
+ uint64_t xs_gc_bytes = 0;
static const struct xstats_entry {
char *desc;
{ "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) },
{ "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
- { "qm", xfsstats_offset(xs_xstrat_bytes)},
+ { "qm", xfsstats_offset(xs_gc_read_calls)},
+ { "zoned", xfsstats_offset(__pad1)},
};
/* Loop over all stats groups */
xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
xs_defer_relog += per_cpu_ptr(stats, i)->s.xs_defer_relog;
+ xs_gc_bytes += per_cpu_ptr(stats, i)->s.xs_gc_bytes;
}
len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
#else
0);
#endif
+ len += scnprintf(buf + len, PATH_MAX-len, "gc xpc %llu\n", xs_gc_bytes);
return len;
}
uint32_t xs_qm_dqwants;
uint32_t xs_qm_dquot;
uint32_t xs_qm_dquot_unused;
+/* Zone GC counters */
+ uint32_t xs_gc_read_calls;
+ uint32_t xs_gc_write_calls;
+ uint32_t xs_gc_zone_reset_calls;
+ uint32_t __pad1;
/* Extra precision counters */
uint64_t xs_xstrat_bytes;
uint64_t xs_write_bytes;
uint64_t xs_read_bytes;
uint64_t xs_defer_relog;
+ uint64_t xs_gc_bytes;
};
#define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
data->scratch_head = (data->scratch_head + len) % data->scratch_size;
data->scratch_available -= len;
+ XFS_STATS_INC(mp, xs_gc_read_calls);
+
WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
list_add_tail(&chunk->entry, &data->reading);
xfs_zone_gc_iter_advance(iter, irec.rm_blockcount);
return;
}
+ XFS_STATS_INC(mp, xs_gc_write_calls);
+ XFS_STATS_ADD(mp, xs_gc_bytes, chunk->len);
+
WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW);
list_move_tail(&chunk->entry, &data->writing);
return;
}
+ XFS_STATS_INC(mp, xs_gc_zone_reset_calls);
+
bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) {
/*