#define bch2_bkey_ops_alloc (struct bkey_ops) { \
.key_invalid = bch2_alloc_v1_invalid, \
.val_to_text = bch2_alloc_to_text, \
+ .atomic_trigger = bch2_mark_alloc, \
}
#define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
.key_invalid = bch2_alloc_v2_invalid, \
.val_to_text = bch2_alloc_to_text, \
+ .atomic_trigger = bch2_mark_alloc, \
}
#define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
.key_invalid = bch2_alloc_v3_invalid, \
.val_to_text = bch2_alloc_to_text, \
+ .atomic_trigger = bch2_mark_alloc, \
}
static inline bool bkey_is_alloc(const struct bkey *k)
struct bch_fs;
struct btree;
+struct btree_trans;
struct bkey;
enum btree_node_type;
void (*swab)(struct bkey_s);
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+ int (*trans_trigger)(struct btree_trans *, struct bkey_s_c,
+ struct bkey_i *, unsigned);
+ int (*atomic_trigger)(struct btree_trans *, struct bkey_s_c,
+ struct bkey_s_c, unsigned);
void (*compat)(enum btree_id id, unsigned version,
unsigned big_endian, int write,
struct bkey_s);
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+static inline int bch2_mark_key(struct btree_trans *trans,
+ struct bkey_s_c old,
+ struct bkey_s_c new,
+ unsigned flags)
+{
+ const struct bkey_ops *ops = &bch2_bkey_ops[old.k->type ?: new.k->type];
+
+ return ops->atomic_trigger
+ ? ops->atomic_trigger(trans, old, new, flags)
+ : 0;
+}
+
+static inline int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
+ struct bkey_i *new, unsigned flags)
+{
+ const struct bkey_ops *ops = &bch2_bkey_ops[old.k->type ?: new->k.type];
+
+ return ops->trans_trigger
+ ? ops->trans_trigger(trans, old, new, flags)
+ : 0;
+}
+
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
void __bch2_bkey_compat(unsigned, enum btree_id, unsigned, unsigned,
BUG_ON(owned_by_allocator == old.owned_by_allocator);
}
-static int bch2_mark_alloc(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_alloc(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
return 0;
}
-static int bch2_mark_extent(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_extent(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
return 0;
}
-static int bch2_mark_stripe(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_stripe(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
return 0;
}
-static int bch2_mark_inode(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_inode(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
struct bch_fs_usage __percpu *fs_usage;
return 0;
}
-static int bch2_mark_reservation(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_reservation(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
return ret;
}
-static int bch2_mark_reflink_p(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+int bch2_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
return ret;
}
-int bch2_mark_key(struct btree_trans *trans,
- struct bkey_s_c old,
- struct bkey_s_c new,
- unsigned flags)
-{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
-
- switch (k.k->type) {
- case KEY_TYPE_alloc:
- case KEY_TYPE_alloc_v2:
- case KEY_TYPE_alloc_v3:
- return bch2_mark_alloc(trans, old, new, flags);
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return bch2_mark_extent(trans, old, new, flags);
- case KEY_TYPE_stripe:
- return bch2_mark_stripe(trans, old, new, flags);
- case KEY_TYPE_inode:
- case KEY_TYPE_inode_v2:
- return bch2_mark_inode(trans, old, new, flags);
- case KEY_TYPE_reservation:
- return bch2_mark_reservation(trans, old, new, flags);
- case KEY_TYPE_reflink_p:
- return bch2_mark_reflink_p(trans, old, new, flags);
- case KEY_TYPE_snapshot:
- return bch2_mark_snapshot(trans, old, new, flags);
- default:
- return 0;
- }
-}
-
static noinline __cold
void fs_usage_apply_warn(struct btree_trans *trans,
unsigned disk_res_sectors,
return ret;
}
-static int bch2_trans_mark_extent(struct btree_trans *trans,
- struct bkey_s_c k, unsigned flags)
+int bch2_trans_mark_extent(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
return ret;
}
-static int bch2_trans_mark_stripe(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_i *new,
- unsigned flags)
+int bch2_trans_mark_stripe(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
{
const struct bch_stripe *old_s = NULL;
struct bch_stripe *new_s = NULL;
return ret;
}
-static int bch2_trans_mark_inode(struct btree_trans *trans,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+int bch2_trans_mark_inode(struct btree_trans *trans,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
{
int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
return 0;
}
-static int bch2_trans_mark_reservation(struct btree_trans *trans,
- struct bkey_s_c k, unsigned flags)
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
{
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
return ret;
}
-static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
- struct bkey_s_c k, unsigned flags)
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
{
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
u64 idx, end_idx;
int ret = 0;
return ret;
}
-int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
- struct bkey_i *new, unsigned flags)
-{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return bch2_trans_mark_extent(trans, k, flags);
- case KEY_TYPE_stripe:
- return bch2_trans_mark_stripe(trans, old, new, flags);
- case KEY_TYPE_inode:
- case KEY_TYPE_inode_v2:
- return bch2_trans_mark_inode(trans, old, new, flags);
- case KEY_TYPE_reservation:
- return bch2_trans_mark_reservation(trans, k, flags);
- case KEY_TYPE_reflink_p:
- return bch2_trans_mark_reflink_p(trans, k, flags);
- default:
- return 0;
- }
-}
-
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
enum bch_data_type type,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
+int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_stripe(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_inode(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reservation(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reflink_p(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
+
+int bch2_trans_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_stripe(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_inode(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reservation(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reflink_p(struct btree_trans *, struct bkey_s_c, struct bkey_i *, unsigned);
+
int bch2_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
.key_invalid = bch2_stripe_invalid, \
.val_to_text = bch2_stripe_to_text, \
.swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_stripe, \
+ .atomic_trigger = bch2_mark_stripe, \
}
static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
.key_invalid = bch2_btree_ptr_invalid, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
}
#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) { \
.val_to_text = bch2_btree_ptr_v2_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
}
/* KEY_TYPE_extent: */
.swab = bch2_ptr_swab, \
.key_normalize = bch2_extent_normalize, \
.key_merge = bch2_extent_merge, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
}
/* KEY_TYPE_reservation: */
.key_invalid = bch2_reservation_invalid, \
.val_to_text = bch2_reservation_to_text, \
.key_merge = bch2_reservation_merge, \
+ .trans_trigger = bch2_trans_mark_reservation, \
+ .atomic_trigger = bch2_mark_reservation, \
}
/* Extent checksum entries: */
#define bch2_bkey_ops_inode (struct bkey_ops) { \
.key_invalid = bch2_inode_invalid, \
.val_to_text = bch2_inode_to_text, \
+ .trans_trigger = bch2_trans_mark_inode, \
+ .atomic_trigger = bch2_mark_inode, \
}
#define bch2_bkey_ops_inode_v2 (struct bkey_ops) { \
.key_invalid = bch2_inode_v2_invalid, \
.val_to_text = bch2_inode_to_text, \
+ .trans_trigger = bch2_trans_mark_inode, \
+ .atomic_trigger = bch2_mark_inode, \
}
static inline bool bkey_is_inode(const struct bkey *k)
#define bch2_bkey_ops_reflink_p (struct bkey_ops) { \
.key_invalid = bch2_reflink_p_invalid, \
.val_to_text = bch2_reflink_p_to_text, \
- .key_merge = bch2_reflink_p_merge, \
+ .key_merge = bch2_reflink_p_merge, \
+ .trans_trigger = bch2_trans_mark_reflink_p, \
+ .atomic_trigger = bch2_mark_reflink_p, \
}
const char *bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c);
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
}
const char *bch2_indirect_inline_data_invalid(const struct bch_fs *,