]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: bch2_dev_usage_update() no longer depends on bucket_mark
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 11 Feb 2022 00:09:40 +0000 (19:09 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:29 +0000 (17:09 -0400)
This is one of the last steps in getting rid of the main in-memory
bucket array.

This changes bch2_dev_usage_update() to take bkey_alloc_unpacked instead
of bucket_mark, and for the places where we are in fact working with
bucket_mark and don't have bkey_alloc_unpacked, we add a wrapper that
takes bucket_mark and converts to bkey_alloc_unpacked.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h

index bfab5d88550bbe147227bc53bb14cff641af510a..60ad873da54ff08faa9d448fd52c5424b7a74450 100644 (file)
@@ -283,24 +283,24 @@ bch2_fs_usage_read_short(struct bch_fs *c)
        return ret;
 }
 
-static inline int is_unavailable_bucket(struct bucket_mark m)
+static inline int is_unavailable_bucket(struct bch_alloc_v4 a)
 {
-       return !is_available_bucket(m);
+       return a.dirty_sectors || a.stripe;
 }
 
 static inline int bucket_sectors_fragmented(struct bch_dev *ca,
-                                           struct bucket_mark m)
+                                           struct bch_alloc_v4 a)
 {
-       return m.dirty_sectors
-               ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
+       return a.dirty_sectors
+               ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
                : 0;
 }
 
-static inline enum bch_data_type bucket_type(struct bucket_mark m)
+static inline enum bch_data_type bucket_type(struct bch_alloc_v4 a)
 {
-       return m.cached_sectors && !m.dirty_sectors
+       return a.cached_sectors && !a.dirty_sectors
                ? BCH_DATA_cached
-               : m.data_type;
+               : a.data_type;
 }
 
 static inline void account_bucket(struct bch_fs_usage *fs_usage,
@@ -315,7 +315,8 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage,
 }
 
 static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
-                                 struct bucket_mark old, struct bucket_mark new,
+                                 struct bch_alloc_v4 old,
+                                 struct bch_alloc_v4 new,
                                  u64 journal_seq, bool gc)
 {
        struct bch_fs_usage *fs_usage;
@@ -347,6 +348,28 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
        preempt_enable();
 }
 
+static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
+                                   struct bucket_mark old, struct bucket_mark new,
+                                   u64 journal_seq, bool gc)
+{
+       struct bch_alloc_v4 old_a = {
+               .gen            = old.gen,
+               .data_type      = old.data_type,
+               .dirty_sectors  = old.dirty_sectors,
+               .cached_sectors = old.cached_sectors,
+               .stripe         = old.stripe,
+       };
+       struct bch_alloc_v4 new_a = {
+               .gen            = new.gen,
+               .data_type      = new.data_type,
+               .dirty_sectors  = new.dirty_sectors,
+               .cached_sectors = new.cached_sectors,
+               .stripe         = new.stripe,
+       };
+
+       bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+}
+
 static inline int __update_replicas(struct bch_fs *c,
                                    struct bch_fs_usage *fs_usage,
                                    struct bch_replicas_entry *r,
@@ -562,6 +585,8 @@ int bch2_mark_alloc(struct btree_trans *trans,
        if (!gc && new_a.gen != old_a.gen)
                *bucket_gen(ca, new.k->p.offset) = new_a.gen;
 
+       bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+
        g = __bucket(ca, new.k->p.offset, gc);
 
        old_m = bucket_cmpxchg(g, m, ({
@@ -572,8 +597,6 @@ int bch2_mark_alloc(struct btree_trans *trans,
                m.stripe                = new_a.stripe != 0;
        }));
 
-       bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
-
        g->io_time[READ]        = new_a.io_time[READ];
        g->io_time[WRITE]       = new_a.io_time[WRITE];
        g->gen_valid            = 1;
@@ -651,7 +674,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
                bch2_data_types[old.data_type ?: data_type],
                old.dirty_sectors, sectors);
 
-       bch2_dev_usage_update(c, ca, old, new, 0, true);
+       bch2_dev_usage_update_m(c, ca, old, new, 0, true);
        percpu_up_read(&c->mark_lock);
 }
 
@@ -810,7 +833,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
        g->stripe               = k.k->p.offset;
        g->stripe_redundancy    = s->nr_redundant;
 
-       bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
+       bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
 err:
        percpu_up_read(&c->mark_lock);
        printbuf_exit(&buf);
@@ -883,7 +906,7 @@ static int bch2_mark_pointer(struct btree_trans *trans,
                              old.v.counter,
                              new.v.counter)) != old.v.counter);
 
-       bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
+       bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
 err:
        percpu_up_read(&c->mark_lock);
 
index bcb40f15f82e91fbf4a0ff411378c663ef8d4f70..9cc6c16bcc64ca08a658551574e8ff45e52f6a12 100644 (file)
@@ -134,13 +134,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,
        return ret;
 }
 
-/* bucket gc marks */
-
-static inline bool is_available_bucket(struct bucket_mark mark)
-{
-       return !mark.dirty_sectors && !mark.stripe;
-}
-
 /* Device usage: */
 
 struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);