}
#endif
+static int btrfs_show_stats(struct seq_file *seq, struct dentry *root)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
+
+ if (btrfs_is_zoned(fs_info)) {
+ btrfs_show_zoned_stats(fs_info, seq);
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
.unfreeze_fs = btrfs_unfreeze,
.nr_cached_objects = btrfs_nr_cached_objects,
.free_cached_objects = btrfs_free_cached_objects,
+ .show_stats = btrfs_show_stats,
#ifdef CONFIG_BTRFS_EXPERIMENTAL
.remove_bdev = btrfs_remove_bdev,
.shutdown = btrfs_shutdown,
return 0;
}
+
+void btrfs_show_zoned_stats(struct btrfs_fs_info *fs_info, struct seq_file *seq)
+{
+ struct btrfs_block_group *bg;
+ u64 data_reloc_bg;
+ u64 treelog_bg;
+
+ seq_puts(seq, "\n zoned statistics:\n");
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ seq_printf(seq, "\tactive block-groups: %zu\n",
+ list_count_nodes(&fs_info->zone_active_bgs));
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ seq_printf(seq, "\t reclaimable: %zu\n",
+ list_count_nodes(&fs_info->reclaim_bgs));
+ seq_printf(seq, "\t unused: %zu\n", list_count_nodes(&fs_info->unused_bgs));
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ seq_printf(seq,"\t need reclaim: %s\n",
+ str_true_false(btrfs_zoned_should_reclaim(fs_info)));
+
+ data_reloc_bg = data_race(fs_info->data_reloc_bg);
+ if (data_reloc_bg)
+ seq_printf(seq, "\tdata relocation block-group: %llu\n",
+ data_reloc_bg);
+ treelog_bg = data_race(fs_info->treelog_bg);
+ if (treelog_bg)
+ seq_printf(seq, "\ttree-log block-group: %llu\n", treelog_bg);
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ seq_puts(seq, "\tactive zones:\n");
+ list_for_each_entry(bg, &fs_info->zone_active_bgs, active_bg_list) {
+ u64 start;
+ u64 alloc_offset;
+ u64 used;
+ u64 reserved;
+ u64 zone_unusable;
+
+ spin_lock(&bg->lock);
+ start = bg->start;
+ alloc_offset = bg->alloc_offset;
+ used = bg->used;
+ reserved = bg->reserved;
+ zone_unusable = bg->zone_unusable;
+ spin_unlock(&bg->lock);
+
+ seq_printf(seq,
+ "\t start: %llu, wp: %llu used: %llu, reserved: %llu, unusable: %llu\n",
+ start, alloc_offset, used, reserved, zone_unusable);
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+}
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/seq_file.h>
#include "messages.h"
#include "volumes.h"
#include "disk-io.h"
int btrfs_zoned_activate_one_bg(struct btrfs_space_info *space_info, bool do_finish);
void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
int btrfs_reset_unused_block_groups(struct btrfs_space_info *space_info, u64 num_bytes);
+void btrfs_show_zoned_stats(struct btrfs_fs_info *fs_info, struct seq_file *seq);
+
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
return 0;
}
+static inline int btrfs_show_zoned_stats(struct btrfs_fs_info *fs_info, struct seq_file *seq)
+{
+ return 0;
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)