*/
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
- struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ struct bucket *g2 = PTR_BUCKET(ca, &p.ptr);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
if (fsck_err_on(!g->gen_valid, c,
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, true);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
ptr->gen = g->mark.gen;
}
} else {
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, true);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
(ptr->cached &&
ptrs = bch2_bkey_ptrs_c(*k);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, true);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
if (gen_after(g->oldest_gen, ptr->gen))
g->oldest_gen = ptr->gen;
percpu_down_read(&c->mark_lock);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, false);
+ struct bucket *g = PTR_BUCKET(ca, ptr);
if (gen_after(g->mark.gen, ptr->gen) > 16) {
percpu_up_read(&c->mark_lock);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, false);
+ struct bucket *g = PTR_BUCKET(ca, ptr);
if (gen_after(g->gc_gen, ptr->gen))
g->gc_gen = ptr->gen;
: m.data_type;
}
-static bool bucket_became_unavailable(struct bucket_mark old,
- struct bucket_mark new)
-{
- return is_available_bucket(old) &&
- !is_available_bucket(new);
-}
-
static inline void account_bucket(struct bch_fs_usage *fs_usage,
struct bch_dev_usage *dev_usage,
enum bch_data_type type,
return;
percpu_down_read(&c->mark_lock);
- g = __bucket(ca, b, true);
+ g = gc_bucket(ca, b);
old = bucket_cmpxchg(g, new, ({
new.data_type = data_type;
overflow = checked_add(new.dirty_sectors, sectors);
enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
- bool gc = flags & BTREE_TRIGGER_GC;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g;
struct bucket_mark new, old;
char buf[200];
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
/* * XXX doesn't handle deletion */
percpu_down_read(&c->mark_lock);
- g = PTR_BUCKET(ca, ptr, gc);
+ g = PTR_GC_BUCKET(ca, ptr);
if (g->mark.dirty_sectors ||
(g->stripe && g->stripe != k.k->p.offset)) {
g->stripe = k.k->p.offset;
g->stripe_redundancy = s->nr_redundant;
- bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
+ bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
err:
percpu_up_read(&c->mark_lock);
s64 sectors, enum bch_data_type data_type,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bucket_mark old, new;
u64 v;
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
percpu_down_read(&c->mark_lock);
- g = PTR_BUCKET(ca, &p.ptr, gc);
+ g = PTR_GC_BUCKET(ca, &p.ptr);
v = atomic64_read(&g->_mark.v);
do {
old.v.counter,
new.v.counter)) != old.v.counter);
- bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
-
- BUG_ON(!gc && bucket_became_unavailable(old, new));
+ bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
err:
percpu_up_read(&c->mark_lock);
s64 sectors,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
struct bch_fs *c = trans->c;
struct bch_replicas_padded r;
+ struct gc_stripe *m;
- if (!gc) {
- BUG();
- } else {
- struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
-
- if (!m)
- return -ENOMEM;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
- spin_lock(&c->ec_stripes_heap_lock);
+ m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
- if (!m || !m->alive) {
- spin_unlock(&c->ec_stripes_heap_lock);
- bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
- (u64) p.idx);
- bch2_inconsistent_error(c);
- return -EIO;
- }
+ if (!m)
+ return -ENOMEM;
- m->block_sectors[p.block] += sectors;
+ spin_lock(&c->ec_stripes_heap_lock);
- r = m->r;
+ if (!m || !m->alive) {
spin_unlock(&c->ec_stripes_heap_lock);
-
- r.e.data_type = data_type;
- update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
+ bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
+ (u64) p.idx);
+ bch2_inconsistent_error(c);
+ return -EIO;
}
+ m->block_sectors[p.block] += sectors;
+
+ r = m->r;
+ spin_unlock(&c->ec_stripes_heap_lock);
+
+ r.e.data_type = data_type;
+ update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
+
return 0;
}
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
bool stale;
int ret;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
if (p.ptr.cached) {
if (!stale) {
ret = update_cached_sectors(c, k, p.ptr.dev,
- disk_sectors, journal_seq, gc);
+ disk_sectors, journal_seq, true);
if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
return ret;
}
if (r.e.nr_devs) {
- ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, gc);
+ ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
if (ret) {
char buf[200];
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
if (flags & BTREE_TRIGGER_OVERWRITE)
sectors = -sectors;
sectors *= replicas;
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
idx -= le32_to_cpu(p.v->front_pad);
end += le32_to_cpu(p.v->back_pad);
return buckets->b + b;
}
+static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
+{
+ return __bucket(ca, b, true);
+}
+
static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
{
return __bucket(ca, b, false);
}
static inline struct bucket *PTR_BUCKET(struct bch_dev *ca,
- const struct bch_extent_ptr *ptr,
- bool gc)
+ const struct bch_extent_ptr *ptr)
+{
+ return bucket(ca, PTR_BUCKET_NR(ca, ptr));
+}
+
+static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
{
- return __bucket(ca, PTR_BUCKET_NR(ca, ptr), gc);
+ return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
}
static inline enum bch_data_type ptr_data_type(const struct bkey *k,
u8 ret;
rcu_read_lock();
- ret = gen_after(PTR_BUCKET(ca, ptr, 0)->mark.gen, ptr->gen);
+ ret = gen_after(PTR_BUCKET(ca, ptr)->mark.gen, ptr->gen);
rcu_read_unlock();
return ret;