]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: Don't erasure code cached ptrs
authorKent Overstreet <kent.overstreet@gmail.com>
Thu, 9 Dec 2021 19:19:18 +0000 (14:19 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:18 +0000 (17:09 -0400)
It doesn't make much sense to be erasure coding cached pointers, we
should be erasure coding one of the dirty pointers in an extent. This
patch makes sure we're passing BCH_WRITE_CACHED when we expect the new
pointer to be a cached pointer, and tweaks the write path to not
allocate from a stripe when BCH_WRITE_CACHED is set - and fixes an
assertion we were hitting in the ec path where when adding the stripe to
an extent and deleting the other pointers the pointer to the stripe
didn't exist (because dropping all dirty pointers from an extent turns
it into a KEY_TYPE_error key).

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
fs/bcachefs/ec.c
fs/bcachefs/io.c
fs/bcachefs/move.c

index 2b6a68b4c4d680abb09d8ba7a8f65832eea5b31f..4424cb3ac822e0de5fbfedb27aad1821da841b69 100644 (file)
@@ -143,8 +143,8 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
 }
 
 /* returns blocknr in stripe that we matched: */
-static int bkey_matches_stripe(struct bch_stripe *s,
-                              struct bkey_s_c k)
+static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
+                                               struct bkey_s_c k, unsigned *block)
 {
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const struct bch_extent_ptr *ptr;
@@ -153,10 +153,12 @@ static int bkey_matches_stripe(struct bch_stripe *s,
        bkey_for_each_ptr(ptrs, ptr)
                for (i = 0; i < nr_data; i++)
                        if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
-                                                     le16_to_cpu(s->sectors)))
-                               return i;
+                                                     le16_to_cpu(s->sectors))) {
+                               *block = i;
+                               return ptr;
+                       }
 
-       return -1;
+       return NULL;
 }
 
 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
@@ -834,6 +836,7 @@ retry:
               (k = bch2_btree_iter_peek(&iter)).k &&
               !(ret = bkey_err(k)) &&
               bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
+               const struct bch_extent_ptr *ptr_c;
                struct bch_extent_ptr *ptr, *ec_ptr = NULL;
 
                if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
@@ -841,8 +844,12 @@ retry:
                        continue;
                }
 
-               block = bkey_matches_stripe(&s->key.v, k);
-               if (block < 0) {
+               ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
+               /*
+                * It doesn't generally make sense to erasure code cached ptrs:
+                * XXX: should we be incrementing a counter?
+                */
+               if (!ptr_c || ptr_c->cached) {
                        bch2_btree_iter_advance(&iter);
                        continue;
                }
index 03bea2ddfb39dd23040dd21ed0f57f6a3c44b0ce..814984ec608c11f74bb65d96d02b6a84e71edffc 100644 (file)
@@ -1179,7 +1179,7 @@ again:
                 */
                wp = bch2_alloc_sectors_start(c,
                        op->target,
-                       op->opts.erasure_code,
+                       op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
                        op->write_point,
                        &op->devs_have,
                        op->nr_replicas,
index 482dfc29385eb2ae4d993d030ffde83f39f92c57..8756df0414a8d0f9efadae7b241fd73f15c1d2ee 100644 (file)
@@ -394,10 +394,14 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
                unsigned compressed_sectors = 0;
 
                bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
-                       if (p.ptr.dev == data_opts.rewrite_dev &&
-                           !p.ptr.cached &&
-                           crc_is_compressed(p.crc))
-                               compressed_sectors += p.crc.compressed_size;
+                       if (p.ptr.dev == data_opts.rewrite_dev) {
+                               if (p.ptr.cached)
+                                       m->op.flags |= BCH_WRITE_CACHED;
+
+                               if (!p.ptr.cached &&
+                                   crc_is_compressed(p.crc))
+                                       compressed_sectors += p.crc.compressed_size;
+                       }
 
                if (compressed_sectors) {
                        ret = bch2_disk_reservation_add(c, &m->op.res,