* as allocated out of @ob
*/
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
- struct bkey_i *k, unsigned sectors)
+ struct bkey_i *k, unsigned sectors,
+ bool cached)
{
struct open_bucket *ob;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
- struct bch_extent_ptr tmp = ob->ptr;
+ struct bch_extent_ptr ptr = ob->ptr;
- tmp.cached = !ca->mi.durability &&
- wp->type == BCH_DATA_user;
+ ptr.cached = cached ||
+ (!ca->mi.durability &&
+ wp->type == BCH_DATA_user);
- tmp.offset += ca->mi.bucket_size - ob->sectors_free;
- bch2_bkey_append_ptr(k, tmp);
+ ptr.offset += ca->mi.bucket_size - ob->sectors_free;
+ bch2_bkey_append_ptr(k, ptr);
BUG_ON(sectors > ob->sectors_free);
ob->sectors_free -= sectors;
struct closure *);
void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
- struct bkey_i *, unsigned);
+ struct bkey_i *, unsigned, bool);
void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
void bch2_open_buckets_stop_dev(struct bch_fs *, struct bch_dev *,
}
bkey_btree_ptr_v2_init(&tmp.k);
- bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c));
+ bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
struct bch_extent_crc_unpacked crc)
{
struct bkey_i_extent *e;
- struct bch_extent_ptr *ptr;
op->pos.offset += crc.uncompressed_size;
crc.nonce)
bch2_extent_crc_append(&e->k_i, crc);
- bch2_alloc_sectors_append_ptrs(op->c, wp, &e->k_i, crc.compressed_size);
-
- if (op->flags & BCH_WRITE_CACHED)
- extent_for_each_ptr(extent_i_to_s(e), ptr)
- ptr->cached = true;
+ bch2_alloc_sectors_append_ptrs(op->c, wp, &e->k_i, crc.compressed_size,
+ op->flags & BCH_WRITE_CACHED);
bch2_keylist_push(&op->insert_keys);
}