]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: x-macroize alloc_reserve enum
authorKent Overstreet <kent.overstreet@gmail.com>
Sun, 13 Mar 2022 23:27:55 +0000 (19:27 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:29 +0000 (17:09 -0400)
This makes an array of strings available, like our other enums.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
13 files changed:
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_foreground.c
fs/bcachefs/alloc_foreground.h
fs/bcachefs/alloc_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/buckets.c
fs/bcachefs/ec.c
fs/bcachefs/io.h
fs/bcachefs/journal.c
fs/bcachefs/move.c
fs/bcachefs/movinggc.c
fs/bcachefs/sysfs.c
fs/bcachefs/trace.h

index fac040aa0d5a2096b4a7a3fe401ed8be03a45df7..a53aeb4ee6484c9f75e925a625fe0cc77c6dd086 100644 (file)
@@ -780,7 +780,7 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
                 * Don't strand buckets on the copygc freelist until
                 * after recovery is finished:
                 */
-               if (i == RESERVE_MOVINGGC &&
+               if (i == RESERVE_movinggc &&
                    !test_bit(BCH_FS_STARTED, &c->flags))
                        continue;
 
@@ -941,7 +941,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
                 * allocations for foreground writes must wait -
                 * not -ENOSPC calculations.
                 */
-               for (j = 0; j < RESERVE_NONE; j++)
+               for (j = 0; j < RESERVE_none; j++)
                        dev_reserve += ca->free[j].size;
 
                dev_reserve += 1;       /* btree write point */
index dc2f153f60c6fec4054aed32173962afd118274f..76a4b8029bdfeae00c0e536bf6892f00d117dfb2 100644 (file)
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+const char * const bch2_alloc_reserves[] = {
+#define x(t) #t,
+       BCH_ALLOC_RESERVES()
+#undef x
+       NULL
+};
+
 /*
  * Open buckets represent a bucket that's currently being allocated from.  They
  * serve two purposes:
@@ -168,10 +175,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
 {
        switch (reserve) {
-       case RESERVE_BTREE:
-       case RESERVE_BTREE_MOVINGGC:
+       case RESERVE_btree:
+       case RESERVE_btree_movinggc:
                return 0;
-       case RESERVE_MOVINGGC:
+       case RESERVE_movinggc:
                return OPEN_BUCKETS_COUNT / 4;
        default:
                return OPEN_BUCKETS_COUNT / 2;
@@ -219,17 +226,17 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
                        c->blocked_allocate_open_bucket = local_clock();
 
                spin_unlock(&c->freelist_lock);
-               trace_open_bucket_alloc_fail(ca, reserve);
+               trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
                return ERR_PTR(-OPEN_BUCKETS_EMPTY);
        }
 
-       if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
+       if (likely(fifo_pop(&ca->free[RESERVE_none], b)))
                goto out;
 
        switch (reserve) {
-       case RESERVE_BTREE_MOVINGGC:
-       case RESERVE_MOVINGGC:
-               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
+       case RESERVE_btree_movinggc:
+       case RESERVE_movinggc:
+               if (fifo_pop(&ca->free[RESERVE_movinggc], b))
                        goto out;
                break;
        default:
@@ -244,7 +251,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
 
        spin_unlock(&c->freelist_lock);
 
-       trace_bucket_alloc_fail(ca, reserve);
+       trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
        return ERR_PTR(-FREELIST_EMPTY);
 out:
        verify_not_on_freelist(c, ca, b);
@@ -282,7 +289,7 @@ out:
 
        bch2_wake_allocator(ca);
 
-       trace_bucket_alloc(ca, reserve);
+       trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
        return ob;
 }
 
index d466bda9afc8fdddb49f7b353c8c571b12f1fcf6..3598c70b93b4f43faf9748bfa8769a85bcd8a066 100644 (file)
@@ -12,6 +12,8 @@ struct bch_dev;
 struct bch_fs;
 struct bch_devs_List;
 
+extern const char * const bch2_alloc_reserves[];
+
 struct dev_alloc_list {
        unsigned        nr;
        u8              devs[BCH_SB_MEMBERS_MAX];
index 409232e3d99800ef652ce6fcd8b2cf0a2e6476b9..e3a3eb2711588bca88062c9cc7d3d3dd4be8062d 100644 (file)
@@ -22,12 +22,17 @@ enum allocator_states {
 #undef x
 };
 
+#define BCH_ALLOC_RESERVES()           \
+       x(btree_movinggc)               \
+       x(btree)                        \
+       x(movinggc)                     \
+       x(none)
+
 enum alloc_reserve {
-       RESERVE_BTREE_MOVINGGC  = -2,
-       RESERVE_BTREE           = -1,
-       RESERVE_MOVINGGC        = 0,
-       RESERVE_NONE            = 1,
-       RESERVE_NR              = 2,
+#define x(name)        RESERVE_##name,
+       BCH_ALLOC_RESERVES()
+#undef x
+       RESERVE_NR
 };
 
 typedef FIFO(long)     alloc_fifo;
index e0af39ee4b47e7ce0f0c09ca8b74e3b0935afdc1..1c53f965539de2437ba0ee3ca4afe69a4dd45c54 100644 (file)
@@ -194,10 +194,10 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
 
        if (flags & BTREE_INSERT_USE_RESERVE) {
                nr_reserve      = 0;
-               alloc_reserve   = RESERVE_BTREE_MOVINGGC;
+               alloc_reserve   = RESERVE_btree_movinggc;
        } else {
                nr_reserve      = BTREE_NODE_RESERVE;
-               alloc_reserve   = RESERVE_BTREE;
+               alloc_reserve   = RESERVE_btree;
        }
 
        mutex_lock(&c->btree_reserve_cache_lock);
index 0f2dd4b8b47dfc78c2bf0368bf1efee54ee11ddd..8eeabb5a66bd32f5485eed49715654ddf8e00136 100644 (file)
@@ -2091,9 +2091,9 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
             !(buckets_nouse    = kvpmalloc(BITS_TO_LONGS(nbuckets) *
                                            sizeof(unsigned long),
                                            GFP_KERNEL|__GFP_ZERO))) ||
-           !init_fifo(&free[RESERVE_MOVINGGC],
+           !init_fifo(&free[RESERVE_movinggc],
                       copygc_reserve, GFP_KERNEL) ||
-           !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+           !init_fifo(&free[RESERVE_none], reserve_none, GFP_KERNEL) ||
            !init_fifo(&free_inc,       free_inc_nr, GFP_KERNEL) ||
            !init_heap(&alloc_heap,     ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
                goto err;
index b220b523d856fdc8c3440f8295f7b90c21928758..9dc2f9f822c880d08980827d8d9762bfd6b3d6f7 100644 (file)
@@ -1307,8 +1307,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
                                            &nr_have_parity,
                                            &have_cache,
                                            h->copygc
-                                           ? RESERVE_MOVINGGC
-                                           : RESERVE_NONE,
+                                           ? RESERVE_movinggc
+                                           : RESERVE_none,
                                            0,
                                            cl);
 
@@ -1336,8 +1336,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
                                            &nr_have_data,
                                            &have_cache,
                                            h->copygc
-                                           ? RESERVE_MOVINGGC
-                                           : RESERVE_NONE,
+                                           ? RESERVE_movinggc
+                                           : RESERVE_none,
                                            0,
                                            cl);
 
index 8be77561badbb2c08bb399436983e186c4e4b8fd..f8ce9543c9e3e471ba2a4e86b016ab97980ec763 100644 (file)
@@ -70,7 +70,7 @@ static inline u64 *op_journal_seq(struct bch_write_op *op)
 
 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
 {
-       return op->alloc_reserve == RESERVE_MOVINGGC
+       return op->alloc_reserve == RESERVE_movinggc
                ? op->c->copygc_wq
                : op->c->btree_update_wq;
 }
@@ -97,7 +97,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
        op->compression_type    = bch2_compression_opt_to_type[opts.compression];
        op->nr_replicas         = 0;
        op->nr_replicas_required = c->opts.data_replicas_required;
-       op->alloc_reserve       = RESERVE_NONE;
+       op->alloc_reserve       = RESERVE_none;
        op->incompressible      = 0;
        op->open_buckets.nr     = 0;
        op->devs_have.nr        = 0;
index 11b44467aeabbf2ad082ec1cae168c924676a09a..750509661d797e89843fb3ca40409449d116df34 100644 (file)
@@ -817,7 +817,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
                        }
                } else {
                        rcu_read_lock();
-                       ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
+                       ob = bch2_bucket_alloc(c, ca, RESERVE_none,
                                               false, cl);
                        rcu_read_unlock();
                        if (IS_ERR(ob)) {
index b916ee35ee37f1275275aa681b86f9ce7a3eb04a..3a5c81f3697bad7ce922905be1908cec6a055263 100644 (file)
@@ -351,7 +351,7 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
                }
 
        if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
-               m->op.alloc_reserve = RESERVE_MOVINGGC;
+               m->op.alloc_reserve = RESERVE_movinggc;
                m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
        } else {
                /* XXX: this should probably be passed in */
index dd71c0ce0a84edfeb1e643f64975df07b96f927f..b43e54133b15d8acafc14b09cc9a285ad38f38dd 100644 (file)
 #include <linux/sort.h>
 #include <linux/wait.h>
 
-/*
- * We can't use the entire copygc reserve in one iteration of copygc: we may
- * need the buckets we're freeing up to go back into the copygc reserve to make
- * forward progress, but if the copygc reserve is full they'll be available for
- * any allocation - and it's possible that in a given iteration, we free up most
- * of the buckets we're going to free before we allocate most of the buckets
- * we're going to allocate.
- *
- * If we only use half of the reserve per iteration, then in steady state we'll
- * always have room in the reserve for the buckets we're going to need in the
- * next iteration:
- */
-#define COPYGC_BUCKETS_PER_ITER(ca)                                    \
-       ((ca)->free[RESERVE_MOVINGGC].size / 2)
-
 static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
 {
        const struct copygc_heap_entry *l = _l;
@@ -124,7 +109,7 @@ static bool have_copygc_reserve(struct bch_dev *ca)
        bool ret;
 
        spin_lock(&ca->fs->freelist_lock);
-       ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
+       ret = fifo_full(&ca->free[RESERVE_movinggc]) ||
                ca->allocator_state != ALLOCATOR_running;
        spin_unlock(&ca->fs->freelist_lock);
 
@@ -265,7 +250,7 @@ static int bch2_copygc(struct bch_fs *c)
                closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
 
                spin_lock(&ca->fs->freelist_lock);
-               sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
+               sectors_reserved += fifo_used(&ca->free[RESERVE_movinggc]) * ca->mi.bucket_size;
                spin_unlock(&ca->fs->freelist_lock);
        }
 
@@ -281,7 +266,7 @@ static int bch2_copygc(struct bch_fs *c)
        }
 
        /*
-        * Our btree node allocations also come out of RESERVE_MOVINGGC:
+        * Our btree node allocations also come out of RESERVE_movingc:
         */
        sectors_reserved = (sectors_reserved * 3) / 4;
        if (!sectors_reserved) {
index dc67506e08d788644a328f4fe4ba15decda45002..7e10adba5c75ce6bc843dd0b09660ccf1bb9e1ba 100644 (file)
@@ -758,8 +758,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
               stats.buckets_ec,
               __dev_buckets_available(ca, stats),
               fifo_used(&ca->free_inc),                ca->free_inc.size,
-              fifo_used(&ca->free[RESERVE_MOVINGGC]),  ca->free[RESERVE_MOVINGGC].size,
-              fifo_used(&ca->free[RESERVE_NONE]),      ca->free[RESERVE_NONE].size,
+              fifo_used(&ca->free[RESERVE_movinggc]),  ca->free[RESERVE_movinggc].size,
+              fifo_used(&ca->free[RESERVE_none]),      ca->free[RESERVE_none].size,
               c->freelist_wait.list.first              ? "waiting" : "empty",
               OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
               ca->nr_open_buckets,
index 6a2626a058159b7463f77ad9dfe0c29e33d82d42..54260349c07ef29d9ff6a7bb63855694b28fffa4 100644 (file)
@@ -468,37 +468,37 @@ TRACE_EVENT(invalidate,
 );
 
 DECLARE_EVENT_CLASS(bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve),
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve),
 
        TP_STRUCT__entry(
                __field(dev_t,                  dev     )
-               __field(enum alloc_reserve,     reserve )
+               __array(char,   reserve,        16      )
        ),
 
        TP_fast_assign(
                __entry->dev            = ca->dev;
-               __entry->reserve        = reserve;
+               strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
        ),
 
-       TP_printk("%d,%d reserve %d",
+       TP_printk("%d,%d reserve %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, bucket_alloc,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
-       TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
-       TP_ARGS(ca, reserve)
+       TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
+       TP_ARGS(ca, alloc_reserve)
 );
 
 /* Moving IO */