]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bcachefs: Debug params are now static_keys
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 10 May 2025 18:14:06 +0000 (14:14 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 22 May 2025 00:14:54 +0000 (20:14 -0400)
We'd like users to be able to debug without building custom kernels, so
this will help us get rid of CONFIG_BCACHEFS_DEBUG, at least for most
things.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
16 files changed:
fs/bcachefs/backpointers.c
fs/bcachefs/backpointers.h
fs/bcachefs/bcachefs.h
fs/bcachefs/bkey.h
fs/bcachefs/bkey_methods.c
fs/bcachefs/bset.c
fs/bcachefs/bset.h
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_update_interior.h
fs/bcachefs/debug.h
fs/bcachefs/extents.c
fs/bcachefs/super.c

index 631d4d24d78f292dfc2435c3e6f9773800cf4ba2..bdf524b465fa891df5a77ceee652a72b66759711 100644 (file)
@@ -182,7 +182,7 @@ err:
 
 static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos)
 {
-       return (likely(!bch2_backpointers_no_use_write_buffer)
+       return (!static_branch_unlikely(&bch2_backpointers_no_use_write_buffer)
                ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos)
                : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0)) ?:
                 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
@@ -192,7 +192,7 @@ static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans,
                                         struct bkey_s_c visiting_k,
                                         struct bkey_buf *last_flushed)
 {
-       return likely(!bch2_backpointers_no_use_write_buffer)
+       return !static_branch_unlikely(&bch2_backpointers_no_use_write_buffer)
                ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed)
                : 0;
 }
index c72707ee9d42e80e83c6d54825d3bb377cd76962..f57098c3214361f9da4b7667de72c70a318d5048 100644 (file)
@@ -102,7 +102,7 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
                                struct bkey_i_backpointer *bp,
                                bool insert)
 {
-       if (unlikely(bch2_backpointers_no_use_write_buffer))
+       if (static_branch_unlikely(&bch2_backpointers_no_use_write_buffer))
                return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert);
 
        if (!insert) {
index 5392a0ec6439cbdc69809a72bb5efbe9f76825bd..46976409f733dd91475c92a67edc7dfd6cb74eeb 100644 (file)
@@ -439,16 +439,10 @@ do {                                                                      \
 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
 #endif
 
-#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
-BCH_DEBUG_PARAMS()
+#define BCH_DEBUG_PARAM(name, description) extern struct static_key_false bch2_##name;
+BCH_DEBUG_PARAMS_ALL()
 #undef BCH_DEBUG_PARAM
 
-#ifndef CONFIG_BCACHEFS_DEBUG
-#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
-BCH_DEBUG_PARAMS_DEBUG()
-#undef BCH_DEBUG_PARAM
-#endif
-
 #define BCH_TIME_STATS()                       \
        x(btree_node_mem_alloc)                 \
        x(btree_node_split)                     \
index 082632905649e638d4f7623c51b1af1b21708a08..b33356982460ce20eecddf828b4f988892bc8b23 100644 (file)
@@ -399,7 +399,7 @@ __bkey_unpack_key_format_checked(const struct btree *b,
                unpack_fn(dst, src);
 
                if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
-                   bch2_expensive_debug_checks) {
+                   static_branch_unlikely(&bch2_expensive_debug_checks)) {
                        struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
 
                        BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
index 00d05ccfaf73bb7032b8b197b109ce6349e8a2b1..fcd8c82cba4f6ff00a30a9ded12c32b1fe594887 100644 (file)
@@ -356,7 +356,7 @@ bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
        return ops->key_merge &&
                bch2_bkey_maybe_mergable(l.k, r.k) &&
                (u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
-               !bch2_key_merging_disabled &&
+               !static_branch_unlikely(&bch2_key_merging_disabled) &&
                ops->key_merge(c, l, r);
 }
 
index 9a4a83d6fd2ddd9ca3510b1e0c37bb6445f3ef1a..7d2004a47fe6776fa249698518761b356c8038be 100644 (file)
@@ -501,7 +501,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
        struct bkey_packed *k = btree_bkey_first(b, t);
        unsigned j = 0;
 
-       if (!bch2_expensive_debug_checks)
+       if (!static_branch_unlikely(&bch2_expensive_debug_checks))
                return;
 
        BUG_ON(bset_has_ro_aux_tree(t));
@@ -869,7 +869,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
                k = p;
        }
 
-       if (bch2_expensive_debug_checks) {
+       if (static_branch_unlikely(&bch2_expensive_debug_checks)) {
                BUG_ON(ret >= orig_k);
 
                for (i = ret
@@ -1195,7 +1195,7 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
                       bkey_iter_pos_cmp(b, m, search) < 0)
                        m = bkey_p_next(m);
 
-       if (bch2_expensive_debug_checks) {
+       if (static_branch_unlikely(&bch2_expensive_debug_checks)) {
                struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
 
                BUG_ON(prev &&
@@ -1435,7 +1435,7 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
                                  struct btree *b)
 {
-       if (bch2_expensive_debug_checks) {
+       if (static_branch_unlikely(&bch2_expensive_debug_checks)) {
                bch2_btree_node_iter_verify(iter, b);
                bch2_btree_node_iter_next_check(iter, b);
        }
@@ -1453,7 +1453,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
        struct btree_node_iter_set *set;
        unsigned end = 0;
 
-       if (bch2_expensive_debug_checks)
+       if (static_branch_unlikely(&bch2_expensive_debug_checks))
                bch2_btree_node_iter_verify(iter, b);
 
        for_each_bset(b, t) {
@@ -1489,7 +1489,7 @@ found:
        iter->data[0].k = __btree_node_key_to_offset(b, prev);
        iter->data[0].end = end;
 
-       if (bch2_expensive_debug_checks)
+       if (static_branch_unlikely(&bch2_expensive_debug_checks))
                bch2_btree_node_iter_verify(iter, b);
        return prev;
 }
index 6953d55b72ccac7aee94fa8c91b51b936739b4e7..f5b7fda537ea5751b818071ca7032b4c5d28dca9 100644 (file)
@@ -537,7 +537,7 @@ static inline void bch2_verify_insert_pos(struct btree *b,
 
 static inline void bch2_verify_btree_nr_keys(struct btree *b)
 {
-       if (bch2_debug_check_btree_accounting)
+       if (static_branch_unlikely(&bch2_debug_check_btree_accounting))
                __bch2_verify_btree_nr_keys(b);
 }
 
index e48089252bb9ab983b0b0e48d4b6fc5c41a702d0..2fd58b08a54dd84482269d8afd59b66cadf635d7 100644 (file)
@@ -377,7 +377,7 @@ static int __btree_node_reclaim_checks(struct bch_fs *c, struct btree *b,
                         * - unless btree verify mode is enabled, since it runs out of
                         * the post write cleanup:
                         */
-                       if (bch2_verify_btree_ondisk)
+                       if (static_branch_unlikely(&bch2_verify_btree_ondisk))
                                bch2_btree_node_write(c, b, SIX_LOCK_intent,
                                                      BTREE_WRITE_cache_reclaim);
                        else
@@ -473,7 +473,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
        unsigned long ret = SHRINK_STOP;
        bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4;
 
-       if (bch2_btree_shrinker_disabled)
+       if (static_branch_unlikely(&bch2_btree_shrinker_disabled))
                return SHRINK_STOP;
 
        mutex_lock(&bc->lock);
@@ -569,7 +569,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
 {
        struct btree_cache_list *list = shrink->private_data;
 
-       if (bch2_btree_shrinker_disabled)
+       if (static_branch_unlikely(&bch2_btree_shrinker_disabled))
                return 0;
 
        return btree_cache_can_free(list);
index dd08ec080313f203c562b66113887bdd3679da9e..91b6395421df6cea30a6853602327ef0b7c200ef 100644 (file)
@@ -619,7 +619,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
        deleted.p = k.k->p;
 
        if (initial) {
-               BUG_ON(bch2_journal_seq_verify &&
+               BUG_ON(static_branch_unlikely(&bch2_journal_seq_verify) &&
                       k.k->bversion.lo > atomic64_read(&c->journal.seq));
 
                if (fsck_err_on(btree_id != BTREE_ID_accounting &&
index e079e12adf860ef838587fb59adfccc3835cf70a..d9adc4f5ba3d33cfc5a162227fb603328030c70c 100644 (file)
@@ -1296,7 +1296,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                ret = btree_node_bkey_val_validate(c, b, u.s_c, READ);
                if (ret == -BCH_ERR_fsck_delete_bkey ||
-                   (bch2_inject_invalid_keys &&
+                   (static_branch_unlikely(&bch2_inject_invalid_keys) &&
                     !bversion_cmp(u.k->bversion, MAX_VERSION))) {
                        btree_keys_account_key_drop(&b->nr, 0, k);
 
@@ -1758,7 +1758,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
 
        trace_and_count(c, btree_node_read, trans, b);
 
-       if (bch2_verify_all_btree_replicas &&
+       if (static_branch_unlikely(&bch2_verify_all_btree_replicas) &&
            !btree_node_read_all_replicas(c, b, sync))
                return;
 
index bd3a0bc07511930341c96d90818dd6c640d0515e..be62958cdb9ab6edc892f9d1bf2e4ae969b2ce9d 100644 (file)
@@ -147,7 +147,7 @@ static void bch2_btree_path_verify_level(struct btree_trans *trans,
        struct printbuf buf3 = PRINTBUF;
        const char *msg;
 
-       if (!bch2_debug_check_iterators)
+       if (!static_branch_unlikely(&bch2_debug_check_iterators))
                return;
 
        l       = &path->l[level];
@@ -281,7 +281,7 @@ static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
        struct bkey_s_c prev;
        int ret = 0;
 
-       if (!bch2_debug_check_iterators)
+       if (!static_branch_unlikely(&bch2_debug_check_iterators))
                return 0;
 
        if (!(iter->flags & BTREE_ITER_filter_snapshots))
@@ -523,7 +523,7 @@ void bch2_btree_node_iter_fix(struct btree_trans *trans,
                __bch2_btree_node_iter_fix(path, b, node_iter, t,
                                           where, clobber_u64s, new_u64s);
 
-               if (bch2_debug_check_iterators)
+               if (static_branch_unlikely(&bch2_debug_check_iterators))
                        bch2_btree_node_iter_verify(node_iter, b);
        }
 
@@ -2929,7 +2929,7 @@ static void btree_trans_verify_sorted(struct btree_trans *trans)
        struct btree_path *path, *prev = NULL;
        struct trans_for_each_path_inorder_iter iter;
 
-       if (!bch2_debug_check_iterators)
+       if (!static_branch_unlikely(&bch2_debug_check_iterators))
                return;
 
        trans_for_each_path_inorder(trans, path, iter) {
index cdde769e7da32ff58aa353a709ffcc90d7d2c49b..7e17df1df7f15923cd869fcea2422b4c78d9478e 100644 (file)
@@ -647,10 +647,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
 
        if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
            !(flags & BCH_TRANS_COMMIT_no_journal_res)) {
-               if (bch2_journal_seq_verify)
+               if (static_branch_unlikely(&bch2_journal_seq_verify))
                        trans_for_each_update(trans, i)
                                i->k->k.bversion.lo = trans->journal_res.seq;
-               else if (bch2_inject_invalid_keys)
+               else if (static_branch_unlikely(&bch2_inject_invalid_keys))
                        trans_for_each_update(trans, i)
                                i->k->k.bversion = MAX_VERSION;
        }
index ff9b95aac5540688582a17fda09f6acb1330cf0c..7fe793788a79deb9203ea116123d0e4b7bd7c7cc 100644 (file)
@@ -144,7 +144,7 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
 
        EBUG_ON(!btree_node_locked(path, level));
 
-       if (bch2_btree_node_merging_disabled)
+       if (static_branch_unlikely(&bch2_btree_node_merging_disabled))
                return 0;
 
        b = path->l[level].b;
index 52dbea7367092b82d77ef4b8d18f0f605b5089c2..d88b1194b8acc02b0cf66d596dcac83b542564e1 100644 (file)
@@ -14,7 +14,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *, struct bch_fs *,
 
 static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b)
 {
-       if (bch2_verify_btree_ondisk)
+       if (static_branch_unlikely(&bch2_verify_btree_ondisk))
                __bch2_btree_verify(c, b);
 }
 
index d3af841e48ef034e922f46dbf6daf0def9640916..1ac9897f189db2999cbf6caf44aa6b9d683895ed 100644 (file)
@@ -164,7 +164,7 @@ static inline bool ptr_better(struct bch_fs *c,
        if (unlikely(failed_delta))
                return failed_delta < 0;
 
-       if (unlikely(bch2_force_reconstruct_read))
+       if (static_branch_unlikely(&bch2_force_reconstruct_read))
                return p1.do_ec_reconstruct > p2.do_ec_reconstruct;
 
        if (unlikely(p1.do_ec_reconstruct || p2.do_ec_reconstruct))
@@ -259,7 +259,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
                        p.do_ec_reconstruct = true;
                }
 
-               if (bch2_force_reconstruct_read && p.has_ec)
+               if (static_branch_unlikely(&bch2_force_reconstruct_read) && p.has_ec)
                        p.do_ec_reconstruct = true;
 
                u64 p_latency = dev_latency(ca);
index 95ed5ab541d36d654a3c663423444569f1be07aa..8125c6804bd543c3d1a1ae6877fafba4b2d149c2 100644 (file)
@@ -2430,9 +2430,45 @@ err:
        return -ENOMEM;
 }
 
-#define BCH_DEBUG_PARAM(name, description)                     \
-       bool bch2_##name;                                       \
-       module_param_named(name, bch2_##name, bool, 0644);      \
+#define BCH_DEBUG_PARAM(name, description) DEFINE_STATIC_KEY_FALSE(bch2_##name);
+BCH_DEBUG_PARAMS_ALL()
+#undef BCH_DEBUG_PARAM
+
+static int bch2_param_set_static_key_t(const char *val, const struct kernel_param *kp)
+{
+       /* Match bool exactly, by re-using it. */
+       struct static_key *key = kp->arg;
+       struct kernel_param boolkp = *kp;
+       bool v;
+       int ret;
+
+       boolkp.arg = &v;
+
+       ret = param_set_bool(val, &boolkp);
+       if (ret)
+               return ret;
+       if (v)
+               static_key_enable(key);
+       else
+               static_key_disable(key);
+       return 0;
+}
+
+static int bch2_param_get_static_key_t(char *buffer, const struct kernel_param *kp)
+{
+       struct static_key *key = kp->arg;
+       return sprintf(buffer, "%c\n", static_key_enabled(key) ? 'N' : 'Y');
+}
+
+static const struct kernel_param_ops bch2_param_ops_static_key_t = {
+       .flags = KERNEL_PARAM_OPS_FL_NOARG,
+       .set = bch2_param_set_static_key_t,
+       .get = bch2_param_get_static_key_t,
+};
+
+#define BCH_DEBUG_PARAM(name, description)                             \
+       module_param_cb(name, &bch2_param_ops_static_key_t, &bch2_##name.key, 0644);\
+       __MODULE_PARM_TYPE(name, "static_key_t");                       \
        MODULE_PARM_DESC(name, description);
 BCH_DEBUG_PARAMS()
 #undef BCH_DEBUG_PARAM