]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: enum bch_persistent_counters_stable
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 30 Jan 2025 08:28:27 +0000 (03:28 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sat, 15 Mar 2025 01:02:11 +0000 (21:02 -0400)
Persistent counters, like recovery passes, include a stable enum in
their definition - but this was never correctly plumbed.

This allows us to add new counters and properly organize them with a
non-stable "presentation order", which can also be used in userspace by
the new 'bcachefs fs top' tool.

Fortunatel, since we haven't yet added any new counters where
presentation order ID doesn't match stable ID, this won't cause any
reordering issues.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/sb-counters.c
fs/bcachefs/sb-counters_format.h

index 6992e7469112cf143b6a8fd5c7303e6f1d502235..5153a47ec7d46f352914cafab4bde3b266bb8f65 100644 (file)
@@ -5,11 +5,10 @@
 
 /* BCH_SB_FIELD_counters */
 
-static const char * const bch2_counter_names[] = {
-#define x(t, n, ...) (#t),
+static const u8 counters_to_stable_map[] = {
+#define x(n, id, ...)  [BCH_COUNTER_##n] = BCH_COUNTER_STABLE_##n,
        BCH_PERSISTENT_COUNTERS()
 #undef x
-       NULL
 };
 
 static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
@@ -18,13 +17,13 @@ static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
                return 0;
 
        return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
-};
+}
 
 static int bch2_sb_counters_validate(struct bch_sb *sb, struct bch_sb_field *f,
                                enum bch_validate_flags flags, struct printbuf *err)
 {
        return 0;
-};
+}
 
 static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
                              struct bch_sb_field *f)
@@ -32,50 +31,56 @@ static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
        struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
        unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
 
-       for (unsigned i = 0; i < nr; i++)
-               prt_printf(out, "%s \t%llu\n",
-                          i < BCH_COUNTER_NR ? bch2_counter_names[i] : "(unknown)",
-                          le64_to_cpu(ctrs->d[i]));
-};
+       for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+               unsigned stable = counters_to_stable_map[i];
+               if (stable < nr)
+                       prt_printf(out, "%s \t%llu\n",
+                                  bch2_counter_names[i],
+                                  le64_to_cpu(ctrs->d[stable]));
+       }
+}
 
 int bch2_sb_counters_to_cpu(struct bch_fs *c)
 {
        struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
-       unsigned int i;
        unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
-       u64 val = 0;
 
-       for (i = 0; i < BCH_COUNTER_NR; i++)
+       for (unsigned i = 0; i < BCH_COUNTER_NR; i++)
                c->counters_on_mount[i] = 0;
 
-       for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++) {
-               val = le64_to_cpu(ctrs->d[i]);
-               percpu_u64_set(&c->counters[i], val);
-               c->counters_on_mount[i] = val;
+       for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+               unsigned stable = counters_to_stable_map[i];
+               if (stable < nr) {
+                       u64 v = le64_to_cpu(ctrs->d[stable]);
+                       percpu_u64_set(&c->counters[i], v);
+                       c->counters_on_mount[i] = v;
+               }
        }
+
        return 0;
-};
+}
 
 int bch2_sb_counters_from_cpu(struct bch_fs *c)
 {
        struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
        struct bch_sb_field_counters *ret;
-       unsigned int i;
        unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
 
        if (nr < BCH_COUNTER_NR) {
                ret = bch2_sb_field_resize(&c->disk_sb, counters,
-                                              sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
-
+                                          sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
                if (ret) {
                        ctrs = ret;
                        nr = bch2_sb_counter_nr_entries(ctrs);
                }
        }
 
+       for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+               unsigned stable = counters_to_stable_map[i];
+               if (stable < nr)
+                       ctrs->d[stable] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
+       }
 
-       for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++)
-               ctrs->d[i] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
        return 0;
 }
 
index fdcf598f08b120eafe33f7880c23be1c80f3e934..cb44d9ee1ac5747a90389ac22ffd15a057c522f4 100644 (file)
@@ -95,6 +95,13 @@ enum bch_persistent_counters {
        BCH_COUNTER_NR
 };
 
+enum bch_persistent_counters_stable {
+#define x(t, n, ...) BCH_COUNTER_STABLE_##t = n,
+       BCH_PERSISTENT_COUNTERS()
+#undef x
+       BCH_COUNTER_STABLE_NR
+};
+
 struct bch_sb_field_counters {
        struct bch_sb_field     field;
        __le64                  d[];