Disfavoured, and should go away.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
bch2_btree_build_aux_trees(b);
if (iter && reinit_iter)
- bch2_btree_iter_reinit_node(iter, b);
+ bch2_btree_iter_reinit_node(trans, iter, b);
}
static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
static inline void btree_trans_sort_iters(struct btree_trans *);
-static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
+static struct btree_iter *btree_iter_child_alloc(struct btree_trans *,
+ struct btree_iter *, unsigned long);
static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *,
struct btree_iter *);
-static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
+static void btree_iter_copy(struct btree_trans *, struct btree_iter *, struct btree_iter *);
static inline int btree_iter_cmp(const struct btree_iter *l,
const struct btree_iter *r)
/* Btree node locking: */
-void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+void bch2_btree_node_unlock_write(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
- bch2_btree_node_unlock_write_inlined(b, iter);
+ bch2_btree_node_unlock_write_inlined(trans, iter, b);
}
-void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void __bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
unsigned readers = 0;
EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_iter(trans, linked)
if (linked->l[b->c.level].b == b &&
btree_node_read_locked(linked, b->c.level))
readers++;
else
this_cpu_sub(*b->c.lock.readers, readers);
- btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
+ btree_node_lock_type(trans->c, b, SIX_LOCK_write);
if (!b->c.lock.readers)
atomic64_add(__SIX_VAL(read_lock, readers),
return true;
}
-static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
- unsigned long trace_ip)
+static inline bool btree_iter_get_locks(struct btree_trans *trans,
+ struct btree_iter *iter,
+ bool upgrade, unsigned long trace_ip)
{
unsigned l = iter->level;
int fail_idx = -1;
: bch2_btree_node_relock(iter, l))) {
(upgrade
? trace_node_upgrade_fail
- : trace_node_relock_fail)(iter->trans->ip, trace_ip,
+ : trace_node_relock_fail)(trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq,
if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
iter->uptodate = BTREE_ITER_NEED_PEEK;
- bch2_btree_trans_verify_locks(iter->trans);
+ bch2_btree_trans_verify_locks(trans);
return iter->uptodate < BTREE_ITER_NEED_RELOCK;
}
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
+static void bch2_btree_iter_verify_locks(struct btree_trans *trans,
+ struct btree_iter *iter)
{
unsigned l;
- if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+ if (!(trans->iters_linked & (1ULL << iter->idx))) {
BUG_ON(iter->nodes_locked);
return;
}
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- bch2_btree_iter_verify_locks(iter);
+ bch2_btree_iter_verify_locks(trans, iter);
}
#else
-static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_locks(struct btree_trans *trans,
+ struct btree_iter *iter) {}
#endif
/*
*/
bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
unsigned l;
for (l = iter->level;
l < iter->locks_want && btree_iter_node(iter, l);
l++) {
if (!bch2_btree_node_relock(iter, l)) {
- trace_node_relock_fail(iter->trans->ip, _RET_IP_,
+ trace_node_relock_fail(trans->ip, _RET_IP_,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq,
? iter->l[l].b->c.lock.state.seq
: 0);
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- btree_trans_restart(iter->trans);
+ btree_trans_restart(trans);
return false;
}
}
}
__flatten
-bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
+static bool bch2_btree_iter_relock(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned long trace_ip)
{
- bool ret = btree_iter_get_locks(iter, false, trace_ip);
+ bool ret = btree_iter_get_locks(trans, iter, false, trace_ip);
if (!ret)
- btree_trans_restart(iter->trans);
+ btree_trans_restart(trans);
return ret;
}
bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
unsigned new_locks_want)
{
+ struct btree_trans *trans = iter->trans;
struct btree_iter *linked;
EBUG_ON(iter->locks_want >= new_locks_want);
iter->locks_want = new_locks_want;
- if (btree_iter_get_locks(iter, true, _THIS_IP_))
+ if (btree_iter_get_locks(trans, iter, true, _THIS_IP_))
return true;
/*
* before interior nodes - now that's handled by
* bch2_btree_iter_traverse_all().
*/
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_iter(trans, linked)
if (linked != iter &&
btree_iter_type(linked) == btree_iter_type(iter) &&
linked->btree_id == iter->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_iter_get_locks(linked, true, _THIS_IP_);
+ btree_iter_get_locks(trans, linked, true, _THIS_IP_);
}
if (iter->should_be_locked)
- btree_trans_restart(iter->trans);
+ btree_trans_restart(trans);
return false;
}
trans_for_each_iter(trans, iter)
if (btree_iter_should_be_locked(iter) &&
- !bch2_btree_iter_relock(iter, _RET_IP_)) {
+ !bch2_btree_iter_relock(trans, iter, _RET_IP_)) {
trace_trans_restart_relock(trans->ip, _RET_IP_,
iter->btree_id, &iter->real_pos);
BUG_ON(!trans->restarted);
bch2_btree_iter_verify_level(iter, i);
}
- bch2_btree_iter_verify_locks(iter);
+ bch2_btree_iter_verify_locks(trans, iter);
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+void bch2_btree_iter_fix_key_modified(struct btree_trans *trans,
+ struct btree_iter *iter,
struct btree *b,
struct bkey_packed *where)
{
struct btree_iter *linked;
- trans_for_each_iter_with_node(iter->trans, b, linked) {
+ trans_for_each_iter_with_node(trans, b, linked) {
__bch2_btree_iter_fix_key_modified(linked, b, where);
bch2_btree_iter_verify_level(linked, b->c.level);
}
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
-void bch2_btree_node_iter_fix(struct btree_iter *iter,
+void bch2_btree_node_iter_fix(struct btree_trans *trans,
+ struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_packed *where,
bch2_btree_node_iter_verify(node_iter, b);
}
- trans_for_each_iter_with_node(iter->trans, b, linked) {
+ trans_for_each_iter_with_node(trans, b, linked) {
__bch2_btree_node_iter_fix(linked, b,
&linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
* A btree node is being replaced - update the iterator to point to the new
* node:
*/
-void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_replace(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
enum btree_node_locked_type t;
struct btree_iter *linked;
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_iter(trans, linked)
if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
btree_iter_pos_in_node(linked, b)) {
/*
}
}
-void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_drop(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
unsigned level = b->c.level;
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_iter(trans, linked)
if (linked->l[level].b == b) {
btree_node_unlock(linked, level);
linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
* A btree node has been modified in such a way as to invalidate iterators - fix
* them:
*/
-void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_reinit_node(struct btree_trans *trans,
+ struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- trans_for_each_iter_with_node(iter->trans, b, linked)
+ trans_for_each_iter_with_node(trans, b, linked)
__btree_iter_init(linked, b->c.level);
}
}
noinline
-static int btree_iter_prefetch(struct btree_iter *iter)
+static int btree_iter_prefetch(struct btree_trans *trans, struct btree_iter *iter)
{
- struct bch_fs *c = iter->trans->c;
+ struct bch_fs *c = trans->c;
struct btree_iter_level *l = &iter->l[iter->level];
struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
btree_node_mem_ptr_set(iter, level + 1, b);
if (iter->flags & BTREE_ITER_PREFETCH)
- ret = btree_iter_prefetch(iter);
+ ret = btree_iter_prefetch(trans, iter);
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
iter->level = level;
- bch2_btree_iter_verify_locks(iter);
+ bch2_btree_iter_verify_locks(trans, iter);
err:
bch2_bkey_buf_exit(&tmp, c);
return ret;
}
-static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
+static int btree_iter_traverse_one(struct btree_trans *,
+ struct btree_iter *, unsigned long);
static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
unsigned long trace_ip)
EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
- ret = btree_iter_traverse_one(iter, _THIS_IP_);
+ ret = btree_iter_traverse_one(trans, iter, _THIS_IP_);
if (ret)
goto retry_all;
* On error, caller (peek_node()/peek_key()) must return NULL; the error is
* stashed in the iterator and returned from bch2_trans_exit().
*/
-static int btree_iter_traverse_one(struct btree_iter *iter,
+static int btree_iter_traverse_one(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned long trace_ip)
{
- struct btree_trans *trans = iter->trans;
unsigned l, depth_want = iter->level;
int ret = 0;
* and re-traverse the iterator without a transaction restart:
*/
if (iter->should_be_locked) {
- ret = bch2_btree_iter_relock(iter, trace_ip) ? 0 : -EINTR;
+ ret = bch2_btree_iter_relock(trans, iter, trace_ip) ? 0 : -EINTR;
goto out;
}
int ret;
ret = bch2_trans_cond_resched(trans) ?:
- btree_iter_traverse_one(iter, _RET_IP_);
+ btree_iter_traverse_one(trans, iter, _RET_IP_);
if (unlikely(ret) && hweight64(trans->iters_linked) == 1) {
ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
BUG_ON(ret == -EINTR);
static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
{
+ struct btree_trans *trans = iter->trans;
#ifdef CONFIG_BCACHEFS_DEBUG
struct bpos old_pos = iter->real_pos;
#endif
int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level;
- EBUG_ON(iter->trans->restarted);
+ EBUG_ON(trans->restarted);
if (!cmp)
goto out;
iter->real_pos = new_pos;
iter->should_be_locked = false;
- iter->trans->iters_sorted = false;
+ trans->iters_sorted = false;
if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
btree_node_unlock(iter, 0);
bch2_btree_iter_verify(iter);
#ifdef CONFIG_BCACHEFS_DEBUG
- trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
+ trace_iter_set_search_pos(trans->ip, _RET_IP_,
iter->btree_id,
&old_pos, &new_pos, l);
#endif
struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
struct bpos search_key;
struct bkey_s_c k;
int ret;
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter *child =
- btree_iter_child_alloc(iter, _THIS_IP_);
+ btree_iter_child_alloc(trans, iter, _THIS_IP_);
- btree_iter_copy(child, iter);
+ btree_iter_copy(trans, child, iter);
k = bch2_btree_iter_peek(child);
if (k.k && !bkey_err(k))
btree_trans_verify_sorted_refs(trans);
}
-static void btree_iter_child_free(struct btree_iter *iter)
+static void btree_iter_child_free(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_iter *child = btree_iter_child(iter);
+ struct btree_iter *child = btree_iter_child(trans, iter);
if (child) {
- bch2_trans_iter_free(iter->trans, child);
+ bch2_trans_iter_free(trans, child);
iter->child_idx = U8_MAX;
}
}
-static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
+static struct btree_iter *btree_iter_child_alloc(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned long ip)
{
- struct btree_trans *trans = iter->trans;
- struct btree_iter *child = btree_iter_child(iter);
+ struct btree_iter *child = btree_iter_child(trans, iter);
if (!child) {
child = btree_trans_iter_alloc(trans, iter);
static inline void __bch2_trans_iter_free(struct btree_trans *trans,
unsigned idx)
{
- btree_iter_child_free(&trans->iters[idx]);
+ btree_iter_child_free(trans, &trans->iters[idx]);
btree_iter_list_remove(trans, &trans->iters[idx]);
return iter;
}
-static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
+static void btree_iter_copy(struct btree_trans *trans, struct btree_iter *dst,
+ struct btree_iter *src)
{
unsigned i, offset = offsetof(struct btree_iter, flags);
__bch2_btree_iter_unlock(dst);
- btree_iter_child_free(dst);
+ btree_iter_child_free(trans, dst);
memcpy((void *) dst + offset,
(void *) src + offset,
dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
- dst->trans->iters_sorted = false;
+ trans->iters_sorted = false;
}
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
bch2_btree_iter_init(trans, iter, btree_id);
} else if (btree_iter_keep(trans, best)) {
iter = btree_trans_iter_alloc(trans, best);
- btree_iter_copy(iter, best);
+ btree_iter_copy(trans, iter, best);
} else {
iter = best;
}
locks_want = min(locks_want, BTREE_MAX_DEPTH);
if (locks_want > iter->locks_want) {
iter->locks_want = locks_want;
- btree_iter_get_locks(iter, true, _THIS_IP_);
+ btree_iter_get_locks(trans, iter, true, _THIS_IP_);
}
while (iter->level != depth) {
}
struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
- struct btree_iter *src)
+ struct btree_iter *src)
{
struct btree_iter *iter;
iter = btree_trans_iter_alloc(trans, src);
- btree_iter_copy(iter, src);
+ btree_iter_copy(trans, iter, src);
trans->iters_live |= 1ULL << iter->idx;
/*
struct btree_iter *iter;
trans_for_each_iter(trans, iter)
- btree_iter_child_free(iter);
+ btree_iter_child_free(trans, iter);
}
if (trans->iters_live) {
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
#endif
-void bch2_btree_iter_fix_key_modified(struct btree_iter *, struct btree *,
- struct bkey_packed *);
-void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
- struct btree_node_iter *, struct bkey_packed *,
- unsigned, unsigned);
+void bch2_btree_iter_fix_key_modified(struct btree_trans *trans, struct btree_iter *,
+ struct btree *, struct bkey_packed *);
+void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_iter *,
+ struct btree *, struct btree_node_iter *,
+ struct bkey_packed *, unsigned, unsigned);
bool bch2_btree_iter_relock_intent(struct btree_iter *);
-bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
void bch2_trans_downgrade(struct btree_trans *);
-void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
-void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_replace(struct btree_trans *trans,
+ struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_drop(struct btree_trans *,
+ struct btree_iter *, struct btree *);
-void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
+void bch2_btree_iter_reinit_node(struct btree_trans *,
+ struct btree_iter *, struct btree *);
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
return idx != U8_MAX ? trans->iters + idx : NULL;
}
-static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
+static inline struct btree_iter *btree_iter_child(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- return idx_to_btree_iter(iter->trans, iter->child_idx);
+ return idx_to_btree_iter(trans, iter->child_idx);
}
/*
}
struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
- struct btree_iter *);
+ struct btree_iter *);
static inline struct btree_iter *
bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
{
* XXX: not allowed to be holding read locks when we take a write lock,
* currently
*/
- bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
+ bch2_btree_node_lock_write(trans, ck_iter, ck_iter->l[0].b);
if (new_k) {
kfree(ck->k);
ck->u64s = new_u64s;
bkey_reassemble(ck->k, k);
ck->valid = true;
- bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
+ bch2_btree_node_unlock_write(trans, ck_iter, ck_iter->l[0].b);
/* We're not likely to need this iterator again: */
set_btree_iter_dontneed(trans, iter);
* succeed:
*/
static inline void
-bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
+bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_iter *iter,
+ struct btree *b)
{
struct btree_iter *linked;
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
- trans_for_each_iter_with_node(iter->trans, b, linked)
+ trans_for_each_iter_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2;
six_unlock_write(&b->c.lock);
}
-void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
+void bch2_btree_node_unlock_write(struct btree_trans *,
+ struct btree_iter *, struct btree *);
-void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
+void __bch2_btree_node_lock_write(struct btree_trans *,
+ struct btree_iter *, struct btree *);
-static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b)
{
EBUG_ON(iter->l[b->c.level].b != b);
EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
if (unlikely(!six_trylock_write(&b->c.lock)))
- __bch2_btree_node_lock_write(b, iter);
+ __bch2_btree_node_lock_write(trans, iter, b);
}
#endif /* _BCACHEFS_BTREE_LOCKING_H */
void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *,
struct btree *);
-bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
- struct btree_node_iter *, struct bkey_i *);
+bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_iter *,
+ struct btree *, struct btree_node_iter *,
+ struct bkey_i *);
void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
enum btree_insert_flags {
static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
struct btree_iter *, struct btree *,
struct keylist *, unsigned);
+static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
/* Debug code: */
mutex_unlock(&c->btree_cache.lock);
}
-void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
-{
- struct open_buckets ob = b->ob;
-
- b->ob.nr = 0;
-
- clear_btree_node_dirty(c, b);
-
- btree_node_lock_type(c, b, SIX_LOCK_write);
- __btree_node_free(c, b);
- six_unlock_write(&b->c.lock);
-
- bch2_open_buckets_put(c, &ob);
-}
-
-void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
- struct btree_iter *iter)
+static void bch2_btree_node_free_inmem(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b)
{
+ struct bch_fs *c = trans->c;
struct btree_iter *linked;
- trans_for_each_iter(iter->trans, linked)
+ trans_for_each_iter(trans, linked)
BUG_ON(linked->l[b->c.level].b == b);
six_lock_write(&b->c.lock, NULL, NULL);
* And it adds @b to the list of @as's new nodes, so that we can update sector
* counts in bch2_btree_update_nodes_written:
*/
-void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
+static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
{
struct bch_fs *c = as->c;
closure_put(&as->cl);
}
-void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
+static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
{
while (b->ob.nr)
as->open_buckets[as->nr_open_buckets++] =
* nodes and thus outstanding btree_updates - redirect @b's
* btree_updates to point to this btree_update:
*/
-void bch2_btree_interior_update_will_free_node(struct btree_update *as,
+static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
struct btree *b)
{
struct bch_fs *c = as->c;
as->nr_old_nodes++;
}
-void bch2_btree_update_done(struct btree_update *as)
+static void bch2_btree_update_done(struct btree_update *as)
{
BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
as->c->btree_interior_update_worker);
}
-struct btree_update *
-bch2_btree_update_start(struct btree_iter *iter, unsigned level,
- unsigned nr_nodes, unsigned flags)
+static struct btree_update *
+bch2_btree_update_start(struct btree_trans *trans, struct btree_iter *iter,
+ unsigned level, unsigned nr_nodes, unsigned flags)
{
- struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct btree_update *as;
struct closure cl;
* is nothing new to be done. This just guarantees that there is a
* journal write.
*/
-static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
- struct btree_iter *iter)
+static void bch2_btree_set_root(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b)
{
struct bch_fs *c = as->c;
struct btree *old;
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write(old, iter);
+ bch2_btree_node_lock_write(trans, iter, old);
bch2_btree_set_root_inmem(c, b);
* an intent lock on the new root, and any updates that would
* depend on the new root would have to update the new root.
*/
- bch2_btree_node_unlock_write(old, iter);
+ bch2_btree_node_unlock_write(trans, iter, old);
}
/* Interior node updates: */
-static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b,
+static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
+ struct btree_trans *trans,
struct btree_iter *iter,
- struct bkey_i *insert,
- struct btree_node_iter *node_iter)
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bkey_i *insert)
{
struct bch_fs *c = as->c;
struct bkey_packed *k;
bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
bch2_btree_node_iter_advance(node_iter, b);
- bch2_btree_bset_insert_key(iter, b, node_iter, insert);
+ bch2_btree_bset_insert_key(trans, iter, b, node_iter, insert);
set_btree_node_dirty(c, b);
set_btree_node_need_write(b);
}
static void
-__bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
- struct btree_iter *iter, struct keylist *keys,
- struct btree_node_iter node_iter)
+__bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b,
+ struct btree_node_iter node_iter,
+ struct keylist *keys)
{
struct bkey_i *insert = bch2_keylist_front(keys);
struct bkey_packed *k;
;
while (!bch2_keylist_empty(keys)) {
- bch2_insert_fixup_btree_ptr(as, b, iter,
- bch2_keylist_front(keys), &node_iter);
+ bch2_insert_fixup_btree_ptr(as, trans, iter, b,
+ &node_iter, bch2_keylist_front(keys));
bch2_keylist_pop_front(keys);
}
}
* nodes that were coalesced, and thus in the middle of a child node post
* coalescing:
*/
-static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
+static void btree_split_insert_keys(struct btree_update *as,
+ struct btree_trans *trans,
struct btree_iter *iter,
+ struct btree *b,
struct keylist *keys)
{
struct btree_node_iter node_iter;
bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
- __bch2_btree_insert_keys_interior(as, b, iter, keys, node_iter);
+ __bch2_btree_insert_keys_interior(as, trans, iter, b, node_iter, keys);
/*
* We can't tolerate whiteouts here - with whiteouts there can be
bch2_btree_update_add_new_node(as, n1);
if (keys)
- btree_split_insert_keys(as, n1, iter, keys);
+ btree_split_insert_keys(as, trans, iter, n1, keys);
if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
trace_btree_split(c, b);
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
- btree_split_insert_keys(as, n3, iter, &as->parent_keys);
+ btree_split_insert_keys(as, trans, iter, n3, &as->parent_keys);
bch2_btree_node_write(c, n3, SIX_LOCK_intent);
}
/* Split a non root node */
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
} else if (n3) {
- bch2_btree_set_root(as, n3, iter);
+ bch2_btree_set_root(as, trans, iter, n3);
} else {
/* Root filled up but didn't need to be split */
- bch2_btree_set_root(as, n1, iter);
+ bch2_btree_set_root(as, trans, iter, n1);
}
bch2_btree_update_get_open_buckets(as, n1);
/* Successful split, update the iterator to point to the new nodes: */
six_lock_increment(&b->c.lock, SIX_LOCK_intent);
- bch2_btree_iter_node_drop(iter, b);
+ bch2_btree_iter_node_drop(trans, iter, b);
if (n3)
- bch2_btree_iter_node_replace(iter, n3);
+ bch2_btree_iter_node_replace(trans, iter, n3);
if (n2)
- bch2_btree_iter_node_replace(iter, n2);
- bch2_btree_iter_node_replace(iter, n1);
+ bch2_btree_iter_node_replace(trans, iter, n2);
+ bch2_btree_iter_node_replace(trans, iter, n1);
/*
* The old node must be freed (in memory) _before_ unlocking the new
* node after another thread has locked and updated the new node, thus
* seeing stale data:
*/
- bch2_btree_node_free_inmem(c, b, iter);
+ bch2_btree_node_free_inmem(trans, iter, b);
if (n3)
six_unlock_intent(&n3->c.lock);
}
static void
-bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
- struct btree_iter *iter, struct keylist *keys)
+bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b,
+ struct keylist *keys)
{
struct btree_iter *linked;
- __bch2_btree_insert_keys_interior(as, b, iter, keys, iter->l[b->c.level].iter);
+ __bch2_btree_insert_keys_interior(as, trans, iter, b,
+ iter->l[b->c.level].iter, keys);
btree_update_updated_node(as, b);
- trans_for_each_iter_with_node(iter->trans, b, linked)
+ trans_for_each_iter_with_node(trans, b, linked)
bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
- bch2_btree_trans_verify_iters(iter->trans, b);
+ bch2_btree_trans_verify_iters(trans, b);
}
/**
bch2_btree_node_lock_for_insert(trans, iter, b);
if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
- bch2_btree_node_unlock_write(b, iter);
+ bch2_btree_node_unlock_write(trans, iter, b);
goto split;
}
btree_node_interior_verify(c, b);
- bch2_btree_insert_keys_interior(as, b, iter, keys);
+ bch2_btree_insert_keys_interior(as, trans, iter, b, keys);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
if (u64s_added > live_u64s_added &&
bch2_maybe_compact_whiteouts(c, b))
- bch2_btree_iter_reinit_node(iter, b);
+ bch2_btree_iter_reinit_node(trans, iter, b);
- bch2_btree_node_unlock_write(b, iter);
+ bch2_btree_node_unlock_write(trans, iter, b);
btree_node_interior_verify(c, b);
return;
unsigned l;
int ret = 0;
- as = bch2_btree_update_start(iter, iter->level,
+ as = bch2_btree_update_start(trans, iter, iter->level,
btree_update_reserve_required(c, b), flags);
if (IS_ERR(as))
return PTR_ERR(as);
goto out;
parent = btree_node_parent(iter, b);
- as = bch2_btree_update_start(iter, level,
+ as = bch2_btree_update_start(trans, iter, level,
btree_update_reserve_required(c, parent) + 1,
flags|
BTREE_INSERT_NOFAIL|
six_lock_increment(&b->c.lock, SIX_LOCK_intent);
six_lock_increment(&m->c.lock, SIX_LOCK_intent);
- bch2_btree_iter_node_drop(iter, b);
- bch2_btree_iter_node_drop(iter, m);
+ bch2_btree_iter_node_drop(trans, iter, b);
+ bch2_btree_iter_node_drop(trans, iter, m);
- bch2_btree_iter_node_replace(iter, n);
+ bch2_btree_iter_node_replace(trans, iter, n);
bch2_btree_trans_verify_iters(trans, n);
- bch2_btree_node_free_inmem(c, b, iter);
- bch2_btree_node_free_inmem(c, m, iter);
+ bch2_btree_node_free_inmem(trans, iter, b);
+ bch2_btree_node_free_inmem(trans, iter, m);
six_unlock_intent(&n->c.lock);
goto out;
parent = btree_node_parent(iter, b);
- as = bch2_btree_update_start(iter, b->c.level,
+ as = bch2_btree_update_start(trans, iter, b->c.level,
(parent
? btree_update_reserve_required(c, parent)
: 0) + 1,
bch2_btree_insert_node(as, trans, iter, parent,
&as->parent_keys, flags);
} else {
- bch2_btree_set_root(as, n, iter);
+ bch2_btree_set_root(as, trans, iter, n);
}
bch2_btree_update_get_open_buckets(as, n);
six_lock_increment(&b->c.lock, SIX_LOCK_intent);
- bch2_btree_iter_node_drop(iter, b);
- bch2_btree_iter_node_replace(iter, n);
- bch2_btree_node_free_inmem(c, b, iter);
+ bch2_btree_iter_node_drop(trans, iter, b);
+ bch2_btree_iter_node_replace(trans, iter, n);
+ bch2_btree_node_free_inmem(trans, iter, b);
six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as);
if (ret)
goto err;
- bch2_btree_node_lock_write(b, iter);
+ bch2_btree_node_lock_write(trans, iter, b);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
bkey_copy(&b->key, new_key);
}
- bch2_btree_node_unlock_write(b, iter);
+ bch2_btree_node_unlock_write(trans, iter, b);
out:
bch2_trans_iter_put(trans, iter2);
return ret;
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
};
-void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
- struct btree_iter *);
-void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-
-void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
-
struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct bkey_format);
-void bch2_btree_update_done(struct btree_update *);
-struct btree_update *
-bch2_btree_update_start(struct btree_iter *, unsigned, unsigned, unsigned);
-
-void bch2_btree_interior_update_will_free_node(struct btree_update *,
- struct btree *);
-void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
-
int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
{
struct bch_fs *c = trans->c;
- bch2_btree_node_lock_write(b, iter);
+ bch2_btree_node_lock_write(trans, iter, b);
if (btree_iter_type(iter) == BTREE_ITER_CACHED)
return;
if (unlikely(btree_node_just_written(b)) &&
bch2_btree_post_write_cleanup(c, b))
- bch2_btree_iter_reinit_node(iter, b);
+ bch2_btree_iter_reinit_node(trans, iter, b);
/*
* If the last bset has been written, or if it's gotten too big - start
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
-bool bch2_btree_bset_insert_key(struct btree_iter *iter,
+bool bch2_btree_bset_insert_key(struct btree_trans *trans,
+ struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_i *insert)
EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
EBUG_ON(insert->k.u64s >
- bch_btree_keys_u64s_remaining(iter->trans->c, b));
+ bch_btree_keys_u64s_remaining(trans->c, b));
EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
k = bch2_btree_node_iter_peek_all(node_iter, b);
k->type = KEY_TYPE_deleted;
if (k->needs_whiteout)
- push_whiteout(iter->trans->c, b, insert->k.p);
+ push_whiteout(trans->c, b, insert->k.p);
k->needs_whiteout = false;
if (k >= btree_bset_last(b)->start) {
bch2_bset_delete(b, k, clobber_u64s);
goto fix_iter;
} else {
- bch2_btree_iter_fix_key_modified(iter, b, k);
+ bch2_btree_iter_fix_key_modified(trans, iter, b, k);
}
return true;
clobber_u64s = k->u64s;
goto overwrite;
} else {
- bch2_btree_iter_fix_key_modified(iter, b, k);
+ bch2_btree_iter_fix_key_modified(trans, iter, b, k);
}
}
new_u64s = k->u64s;
fix_iter:
if (clobber_u64s != new_u64s)
- bch2_btree_node_iter_fix(iter, b, node_iter, k,
+ bch2_btree_node_iter_fix(trans, iter, b, node_iter, k,
clobber_u64s, new_u64s);
return true;
}
EBUG_ON(!iter->level &&
!test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags));
- if (unlikely(!bch2_btree_bset_insert_key(iter, b,
+ if (unlikely(!bch2_btree_bset_insert_key(trans, iter, b,
&iter_l(iter)->iter, insert)))
return false;
if (u64s_added > live_u64s_added &&
bch2_maybe_compact_whiteouts(c, b))
- bch2_btree_iter_reinit_node(iter, b);
+ bch2_btree_iter_reinit_node(trans, iter, b);
trace_btree_insert_key(c, b, insert);
return true;
trans_for_each_update(trans, i)
if (!same_leaf_as_prev(trans, i))
- bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b,
- i->iter);
+ bch2_btree_node_unlock_write_inlined(trans, i->iter,
+ iter_l(i->iter)->b);
if (!ret && trans->journal_pin)
bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete);
- ret = bch2_extent_trim_atomic(&delete, iter);
+ ret = bch2_extent_trim_atomic(trans, iter, &delete);
if (ret)
break;
}
return 0;
}
-static int ec_stripe_mem_alloc(struct bch_fs *c,
+static int ec_stripe_mem_alloc(struct btree_trans *trans,
struct btree_iter *iter)
{
size_t idx = iter->pos.offset;
int ret = 0;
- if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
+ if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
return ret;
- bch2_trans_unlock(iter->trans);
+ bch2_trans_unlock(trans);
ret = -EINTR;
- if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
+ if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
return ret;
return -ENOMEM;
found_slot:
start_pos = iter->pos;
- ret = ec_stripe_mem_alloc(c, iter);
+ ret = ec_stripe_mem_alloc(&trans, iter);
if (ret)
goto err;
#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
-int bch2_extent_atomic_end(struct btree_iter *iter,
+int bch2_extent_atomic_end(struct btree_trans *trans,
+ struct btree_iter *iter,
struct bkey_i *insert,
struct bpos *end)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter *copy;
struct bkey_s_c k;
unsigned nr_iters = 0;
return ret < 0 ? ret : 0;
}
-int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+int bch2_extent_trim_atomic(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *k)
{
struct bpos end;
int ret;
- ret = bch2_extent_atomic_end(iter, k, &end);
+ ret = bch2_extent_atomic_end(trans, iter, k, &end);
if (ret)
return ret;
bch2_cut_back(end, k);
return 0;
}
-
-int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
-{
- struct bpos end;
- int ret;
-
- ret = bch2_extent_atomic_end(iter, k, &end);
- if (ret)
- return ret;
-
- return !bkey_cmp(end, k->k.p);
-}
#include "bcachefs.h"
-int bch2_extent_atomic_end(struct btree_iter *, struct bkey_i *,
- struct bpos *);
-int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
-int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
+int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, struct bpos *);
+int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *);
#endif /* _BCACHEFS_EXTENT_UPDATE_H */
copy.k->k.p.offset += shift >> 9;
bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
- ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
+ ret = bch2_extent_atomic_end(&trans, dst, copy.k, &atomic_end);
if (ret)
continue;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
int ret;
- ret = bch2_extent_trim_atomic(k, iter);
+ ret = bch2_extent_trim_atomic(trans, iter, k);
if (ret)
return ret;