]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bcachefs: Reduce iter->trans usage
authorKent Overstreet <kent.overstreet@gmail.com>
Wed, 25 Aug 2021 01:30:06 +0000 (21:30 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 22 Oct 2023 21:09:10 +0000 (17:09 -0400)
Disfavoured, and should go away.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
14 files changed:
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_update.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.h
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/ec.c
fs/bcachefs/extent_update.c
fs/bcachefs/extent_update.h
fs/bcachefs/fs-io.c
fs/bcachefs/io.c

index e42ade7cbc4b7faf6d1aa3460790edddc0331fb6..99799d93cf09063238debe58888543ad2903ac09 100644 (file)
@@ -498,7 +498,7 @@ void bch2_btree_init_next(struct btree_trans *trans,
        bch2_btree_build_aux_trees(b);
 
        if (iter && reinit_iter)
-               bch2_btree_iter_reinit_node(iter, b);
+               bch2_btree_iter_reinit_node(trans, iter, b);
 }
 
 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
index 5bb714d1525b36eb8a29d9b7b542a98f37f41bd9..ef5e7e9884f52f41f50778403c8cbce5b5100d34 100644 (file)
 
 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
 static inline void btree_trans_sort_iters(struct btree_trans *);
-static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
+static struct btree_iter *btree_iter_child_alloc(struct btree_trans *,
+                                       struct btree_iter *, unsigned long);
 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *,
                                                 struct btree_iter *);
-static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
+static void btree_iter_copy(struct btree_trans *, struct btree_iter *, struct btree_iter *);
 
 static inline int btree_iter_cmp(const struct btree_iter *l,
                                 const struct btree_iter *r)
@@ -100,19 +101,21 @@ static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
 
 /* Btree node locking: */
 
-void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+void bch2_btree_node_unlock_write(struct btree_trans *trans,
+                       struct btree_iter *iter, struct btree *b)
 {
-       bch2_btree_node_unlock_write_inlined(b, iter);
+       bch2_btree_node_unlock_write_inlined(trans, iter, b);
 }
 
-void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void __bch2_btree_node_lock_write(struct btree_trans *trans,
+                       struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
        unsigned readers = 0;
 
        EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
 
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                if (linked->l[b->c.level].b == b &&
                    btree_node_read_locked(linked, b->c.level))
                        readers++;
@@ -129,7 +132,7 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
        else
                this_cpu_sub(*b->c.lock.readers, readers);
 
-       btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
+       btree_node_lock_type(trans->c, b, SIX_LOCK_write);
 
        if (!b->c.lock.readers)
                atomic64_add(__SIX_VAL(read_lock, readers),
@@ -191,8 +194,9 @@ success:
        return true;
 }
 
-static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
-                                       unsigned long trace_ip)
+static inline bool btree_iter_get_locks(struct btree_trans *trans,
+                                       struct btree_iter *iter,
+                                       bool upgrade, unsigned long trace_ip)
 {
        unsigned l = iter->level;
        int fail_idx = -1;
@@ -206,7 +210,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
                      : bch2_btree_node_relock(iter, l))) {
                        (upgrade
                         ? trace_node_upgrade_fail
-                        : trace_node_relock_fail)(iter->trans->ip, trace_ip,
+                        : trace_node_relock_fail)(trans->ip, trace_ip,
                                        btree_iter_type(iter) == BTREE_ITER_CACHED,
                                        iter->btree_id, &iter->real_pos,
                                        l, iter->l[l].lock_seq,
@@ -237,7 +241,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
        if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
                iter->uptodate = BTREE_ITER_NEED_PEEK;
 
-       bch2_btree_trans_verify_locks(iter->trans);
+       bch2_btree_trans_verify_locks(trans);
 
        return iter->uptodate < BTREE_ITER_NEED_RELOCK;
 }
@@ -363,11 +367,12 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
 /* Btree iterator locking: */
 
 #ifdef CONFIG_BCACHEFS_DEBUG
-static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
+static void bch2_btree_iter_verify_locks(struct btree_trans *trans,
+                                        struct btree_iter *iter)
 {
        unsigned l;
 
-       if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+       if (!(trans->iters_linked & (1ULL << iter->idx))) {
                BUG_ON(iter->nodes_locked);
                return;
        }
@@ -387,10 +392,11 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
        struct btree_iter *iter;
 
        trans_for_each_iter(trans, iter)
-               bch2_btree_iter_verify_locks(iter);
+               bch2_btree_iter_verify_locks(trans, iter);
 }
 #else
-static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_locks(struct btree_trans *trans,
+                                               struct btree_iter *iter) {}
 #endif
 
 /*
@@ -398,13 +404,14 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
  */
 bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
 {
+       struct btree_trans *trans = iter->trans;
        unsigned l;
 
        for (l = iter->level;
             l < iter->locks_want && btree_iter_node(iter, l);
             l++) {
                if (!bch2_btree_node_relock(iter, l)) {
-                       trace_node_relock_fail(iter->trans->ip, _RET_IP_,
+                       trace_node_relock_fail(trans->ip, _RET_IP_,
                                        btree_iter_type(iter) == BTREE_ITER_CACHED,
                                        iter->btree_id, &iter->real_pos,
                                        l, iter->l[l].lock_seq,
@@ -415,7 +422,7 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
                                        ? iter->l[l].b->c.lock.state.seq
                                        : 0);
                        btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
-                       btree_trans_restart(iter->trans);
+                       btree_trans_restart(trans);
                        return false;
                }
        }
@@ -424,25 +431,27 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
 }
 
 __flatten
-bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
+static bool bch2_btree_iter_relock(struct btree_trans *trans,
+                       struct btree_iter *iter, unsigned long trace_ip)
 {
-       bool ret = btree_iter_get_locks(iter, false, trace_ip);
+       bool ret = btree_iter_get_locks(trans, iter, false, trace_ip);
 
        if (!ret)
-               btree_trans_restart(iter->trans);
+               btree_trans_restart(trans);
        return ret;
 }
 
 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
                               unsigned new_locks_want)
 {
+       struct btree_trans *trans = iter->trans;
        struct btree_iter *linked;
 
        EBUG_ON(iter->locks_want >= new_locks_want);
 
        iter->locks_want = new_locks_want;
 
-       if (btree_iter_get_locks(iter, true, _THIS_IP_))
+       if (btree_iter_get_locks(trans, iter, true, _THIS_IP_))
                return true;
 
        /*
@@ -464,17 +473,17 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
         * before interior nodes - now that's handled by
         * bch2_btree_iter_traverse_all().
         */
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                if (linked != iter &&
                    btree_iter_type(linked) == btree_iter_type(iter) &&
                    linked->btree_id == iter->btree_id &&
                    linked->locks_want < new_locks_want) {
                        linked->locks_want = new_locks_want;
-                       btree_iter_get_locks(linked, true, _THIS_IP_);
+                       btree_iter_get_locks(trans, linked, true, _THIS_IP_);
                }
 
        if (iter->should_be_locked)
-               btree_trans_restart(iter->trans);
+               btree_trans_restart(trans);
        return false;
 }
 
@@ -528,7 +537,7 @@ bool bch2_trans_relock(struct btree_trans *trans)
 
        trans_for_each_iter(trans, iter)
                if (btree_iter_should_be_locked(iter) &&
-                   !bch2_btree_iter_relock(iter, _RET_IP_)) {
+                   !bch2_btree_iter_relock(trans, iter, _RET_IP_)) {
                        trace_trans_restart_relock(trans->ip, _RET_IP_,
                                        iter->btree_id, &iter->real_pos);
                        BUG_ON(!trans->restarted);
@@ -686,7 +695,7 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
                bch2_btree_iter_verify_level(iter, i);
        }
 
-       bch2_btree_iter_verify_locks(iter);
+       bch2_btree_iter_verify_locks(trans, iter);
 }
 
 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
@@ -753,13 +762,14 @@ static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
        btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
 }
 
-void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+void bch2_btree_iter_fix_key_modified(struct btree_trans *trans,
+                                     struct btree_iter *iter,
                                      struct btree *b,
                                      struct bkey_packed *where)
 {
        struct btree_iter *linked;
 
-       trans_for_each_iter_with_node(iter->trans, b, linked) {
+       trans_for_each_iter_with_node(trans, b, linked) {
                __bch2_btree_iter_fix_key_modified(linked, b, where);
                bch2_btree_iter_verify_level(linked, b->c.level);
        }
@@ -863,7 +873,8 @@ fixup_done:
                btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
 }
 
-void bch2_btree_node_iter_fix(struct btree_iter *iter,
+void bch2_btree_node_iter_fix(struct btree_trans *trans,
+                             struct btree_iter *iter,
                              struct btree *b,
                              struct btree_node_iter *node_iter,
                              struct bkey_packed *where,
@@ -881,7 +892,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
                        bch2_btree_node_iter_verify(node_iter, b);
        }
 
-       trans_for_each_iter_with_node(iter->trans, b, linked) {
+       trans_for_each_iter_with_node(trans, b, linked) {
                __bch2_btree_node_iter_fix(linked, b,
                                           &linked->l[b->c.level].iter, t,
                                           where, clobber_u64s, new_u64s);
@@ -1055,12 +1066,13 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
  * A btree node is being replaced - update the iterator to point to the new
  * node:
  */
-void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_replace(struct btree_trans *trans,
+                       struct btree_iter *iter, struct btree *b)
 {
        enum btree_node_locked_type t;
        struct btree_iter *linked;
 
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
                    btree_iter_pos_in_node(linked, b)) {
                        /*
@@ -1080,12 +1092,13 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
                }
 }
 
-void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_drop(struct btree_trans *trans,
+                       struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
        unsigned level = b->c.level;
 
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                if (linked->l[level].b == b) {
                        btree_node_unlock(linked, level);
                        linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
@@ -1096,11 +1109,12 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
  * A btree node has been modified in such a way as to invalidate iterators - fix
  * them:
  */
-void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_reinit_node(struct btree_trans *trans,
+                       struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
 
-       trans_for_each_iter_with_node(iter->trans, b, linked)
+       trans_for_each_iter_with_node(trans, b, linked)
                __btree_iter_init(linked, b->c.level);
 }
 
@@ -1170,9 +1184,9 @@ static inline int btree_iter_lock_root(struct btree_trans *trans,
 }
 
 noinline
-static int btree_iter_prefetch(struct btree_iter *iter)
+static int btree_iter_prefetch(struct btree_trans *trans, struct btree_iter *iter)
 {
-       struct bch_fs *c = iter->trans->c;
+       struct bch_fs *c = trans->c;
        struct btree_iter_level *l = &iter->l[iter->level];
        struct btree_node_iter node_iter = l->iter;
        struct bkey_packed *k;
@@ -1258,19 +1272,20 @@ static __always_inline int btree_iter_down(struct btree_trans *trans,
                btree_node_mem_ptr_set(iter, level + 1, b);
 
        if (iter->flags & BTREE_ITER_PREFETCH)
-               ret = btree_iter_prefetch(iter);
+               ret = btree_iter_prefetch(trans, iter);
 
        if (btree_node_read_locked(iter, level + 1))
                btree_node_unlock(iter, level + 1);
        iter->level = level;
 
-       bch2_btree_iter_verify_locks(iter);
+       bch2_btree_iter_verify_locks(trans, iter);
 err:
        bch2_bkey_buf_exit(&tmp, c);
        return ret;
 }
 
-static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
+static int btree_iter_traverse_one(struct btree_trans *,
+                       struct btree_iter *, unsigned long);
 
 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
                                     unsigned long trace_ip)
@@ -1331,7 +1346,7 @@ retry_all:
 
                EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
 
-               ret = btree_iter_traverse_one(iter, _THIS_IP_);
+               ret = btree_iter_traverse_one(trans, iter, _THIS_IP_);
                if (ret)
                        goto retry_all;
 
@@ -1400,10 +1415,10 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
  * stashed in the iterator and returned from bch2_trans_exit().
  */
-static int btree_iter_traverse_one(struct btree_iter *iter,
+static int btree_iter_traverse_one(struct btree_trans *trans,
+                                  struct btree_iter *iter,
                                   unsigned long trace_ip)
 {
-       struct btree_trans *trans = iter->trans;
        unsigned l, depth_want = iter->level;
        int ret = 0;
 
@@ -1412,7 +1427,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
         * and re-traverse the iterator without a transaction restart:
         */
        if (iter->should_be_locked) {
-               ret = bch2_btree_iter_relock(iter, trace_ip) ? 0 : -EINTR;
+               ret = bch2_btree_iter_relock(trans, iter, trace_ip) ? 0 : -EINTR;
                goto out;
        }
 
@@ -1488,7 +1503,7 @@ static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
        int ret;
 
        ret =   bch2_trans_cond_resched(trans) ?:
-               btree_iter_traverse_one(iter, _RET_IP_);
+               btree_iter_traverse_one(trans, iter, _RET_IP_);
        if (unlikely(ret) && hweight64(trans->iters_linked) == 1) {
                ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
                BUG_ON(ret == -EINTR);
@@ -1619,20 +1634,21 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
 
 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
 {
+       struct btree_trans *trans = iter->trans;
 #ifdef CONFIG_BCACHEFS_DEBUG
        struct bpos old_pos = iter->real_pos;
 #endif
        int cmp = bpos_cmp(new_pos, iter->real_pos);
        unsigned l = iter->level;
 
-       EBUG_ON(iter->trans->restarted);
+       EBUG_ON(trans->restarted);
 
        if (!cmp)
                goto out;
 
        iter->real_pos = new_pos;
        iter->should_be_locked = false;
-       iter->trans->iters_sorted = false;
+       trans->iters_sorted = false;
 
        if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
                btree_node_unlock(iter, 0);
@@ -1666,7 +1682,7 @@ out:
 
        bch2_btree_iter_verify(iter);
 #ifdef CONFIG_BCACHEFS_DEBUG
-       trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
+       trace_iter_set_search_pos(trans->ip, _RET_IP_,
                                  iter->btree_id,
                                  &old_pos, &new_pos, l);
 #endif
@@ -1886,6 +1902,7 @@ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
 
 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
 {
+       struct btree_trans *trans = iter->trans;
        struct bpos search_key;
        struct bkey_s_c k;
        int ret;
@@ -1954,9 +1971,9 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
 
                if (iter->flags & BTREE_ITER_INTENT) {
                        struct btree_iter *child =
-                               btree_iter_child_alloc(iter, _THIS_IP_);
+                               btree_iter_child_alloc(trans, iter, _THIS_IP_);
 
-                       btree_iter_copy(child, iter);
+                       btree_iter_copy(trans, child, iter);
                        k = bch2_btree_iter_peek(child);
 
                        if (k.k && !bkey_err(k))
@@ -2163,21 +2180,21 @@ static inline void btree_iter_list_add(struct btree_trans *trans,
        btree_trans_verify_sorted_refs(trans);
 }
 
-static void btree_iter_child_free(struct btree_iter *iter)
+static void btree_iter_child_free(struct btree_trans *trans, struct btree_iter *iter)
 {
-       struct btree_iter *child = btree_iter_child(iter);
+       struct btree_iter *child = btree_iter_child(trans, iter);
 
        if (child) {
-               bch2_trans_iter_free(iter->trans, child);
+               bch2_trans_iter_free(trans, child);
                iter->child_idx = U8_MAX;
        }
 }
 
-static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
+static struct btree_iter *btree_iter_child_alloc(struct btree_trans *trans,
+                                                struct btree_iter *iter,
                                                 unsigned long ip)
 {
-       struct btree_trans *trans = iter->trans;
-       struct btree_iter *child = btree_iter_child(iter);
+       struct btree_iter *child = btree_iter_child(trans, iter);
 
        if (!child) {
                child = btree_trans_iter_alloc(trans, iter);
@@ -2194,7 +2211,7 @@ static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
                                          unsigned idx)
 {
-       btree_iter_child_free(&trans->iters[idx]);
+       btree_iter_child_free(trans, &trans->iters[idx]);
 
        btree_iter_list_remove(trans, &trans->iters[idx]);
 
@@ -2312,12 +2329,13 @@ static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans,
        return iter;
 }
 
-static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
+static void btree_iter_copy(struct btree_trans *trans, struct btree_iter *dst,
+                           struct btree_iter *src)
 {
        unsigned i, offset = offsetof(struct btree_iter, flags);
 
        __bch2_btree_iter_unlock(dst);
-       btree_iter_child_free(dst);
+       btree_iter_child_free(trans, dst);
 
        memcpy((void *) dst + offset,
               (void *) src + offset,
@@ -2330,7 +2348,7 @@ static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
 
        dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
        dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
-       dst->trans->iters_sorted = false;
+       trans->iters_sorted = false;
 }
 
 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
@@ -2388,7 +2406,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
                bch2_btree_iter_init(trans, iter, btree_id);
        } else if (btree_iter_keep(trans, best)) {
                iter = btree_trans_iter_alloc(trans, best);
-               btree_iter_copy(iter, best);
+               btree_iter_copy(trans, iter, best);
        } else {
                iter = best;
        }
@@ -2411,7 +2429,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
        locks_want = min(locks_want, BTREE_MAX_DEPTH);
        if (locks_want > iter->locks_want) {
                iter->locks_want = locks_want;
-               btree_iter_get_locks(iter, true, _THIS_IP_);
+               btree_iter_get_locks(trans, iter, true, _THIS_IP_);
        }
 
        while (iter->level != depth) {
@@ -2464,12 +2482,12 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
 }
 
 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
-                                       struct btree_iter *src)
+                                         struct btree_iter *src)
 {
        struct btree_iter *iter;
 
        iter = btree_trans_iter_alloc(trans, src);
-       btree_iter_copy(iter, src);
+       btree_iter_copy(trans, iter, src);
 
        trans->iters_live |= 1ULL << iter->idx;
        /*
@@ -2647,7 +2665,7 @@ int bch2_trans_exit(struct btree_trans *trans)
                struct btree_iter *iter;
 
                trans_for_each_iter(trans, iter)
-                       btree_iter_child_free(iter);
+                       btree_iter_child_free(trans, iter);
        }
 
        if (trans->iters_live) {
index 5c754d4665431a497fa68ebb92cc66d8a8121650..ea129387ebb77241602ea0e7c59972c9d2aed557 100644 (file)
@@ -135,14 +135,13 @@ static inline void bch2_btree_trans_verify_iters(struct btree_trans *trans,
 static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
 #endif
 
-void bch2_btree_iter_fix_key_modified(struct btree_iter *, struct btree *,
-                                          struct bkey_packed *);
-void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
-                             struct btree_node_iter *, struct bkey_packed *,
-                             unsigned, unsigned);
+void bch2_btree_iter_fix_key_modified(struct btree_trans *trans, struct btree_iter *,
+                                     struct btree *, struct bkey_packed *);
+void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_iter *,
+                             struct btree *, struct btree_node_iter *,
+                             struct bkey_packed *, unsigned, unsigned);
 
 bool bch2_btree_iter_relock_intent(struct btree_iter *);
-bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
 
 bool bch2_trans_relock(struct btree_trans *);
 void bch2_trans_unlock(struct btree_trans *);
@@ -179,10 +178,13 @@ static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
 
 void bch2_trans_downgrade(struct btree_trans *);
 
-void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
-void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_replace(struct btree_trans *trans,
+                                 struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_drop(struct btree_trans *,
+                              struct btree_iter *, struct btree *);
 
-void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
+void bch2_btree_iter_reinit_node(struct btree_trans *,
+                                struct btree_iter *, struct btree *);
 
 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
 
@@ -226,9 +228,10 @@ static inline struct btree_iter *idx_to_btree_iter(struct btree_trans *trans, un
        return idx != U8_MAX ? trans->iters + idx : NULL;
 }
 
-static inline struct btree_iter *btree_iter_child(struct btree_iter *iter)
+static inline struct btree_iter *btree_iter_child(struct btree_trans *trans,
+                                                 struct btree_iter *iter)
 {
-       return idx_to_btree_iter(iter->trans, iter->child_idx);
+       return idx_to_btree_iter(trans, iter->child_idx);
 }
 
 /*
@@ -319,7 +322,7 @@ bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
 }
 
 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
-                                       struct btree_iter *);
+                                         struct btree_iter *);
 static inline struct btree_iter *
 bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
 {
index ac8f40810d7a771dbe58a26565e90bf2ea797e5c..c7d223f91bf604aea7c0ac83ff02f0f2dcf4043f 100644 (file)
@@ -238,7 +238,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
         * XXX: not allowed to be holding read locks when we take a write lock,
         * currently
         */
-       bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
+       bch2_btree_node_lock_write(trans, ck_iter, ck_iter->l[0].b);
        if (new_k) {
                kfree(ck->k);
                ck->u64s = new_u64s;
@@ -247,7 +247,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
 
        bkey_reassemble(ck->k, k);
        ck->valid = true;
-       bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
+       bch2_btree_node_unlock_write(trans, ck_iter, ck_iter->l[0].b);
 
        /* We're not likely to need this iterator again: */
        set_btree_iter_dontneed(trans, iter);
index fda1648021547e44819c949402ca0ea6791fa1e7..0acc731df8e9a496892f36e6d2656e1d67bf282a 100644 (file)
@@ -207,30 +207,35 @@ static inline bool bch2_btree_node_relock(struct btree_iter *iter,
  * succeed:
  */
 static inline void
-bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
+bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_iter *iter,
+                                    struct btree *b)
 {
        struct btree_iter *linked;
 
        EBUG_ON(iter->l[b->c.level].b != b);
        EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
 
-       trans_for_each_iter_with_node(iter->trans, b, linked)
+       trans_for_each_iter_with_node(trans, b, linked)
                linked->l[b->c.level].lock_seq += 2;
 
        six_unlock_write(&b->c.lock);
 }
 
-void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
+void bch2_btree_node_unlock_write(struct btree_trans *,
+                       struct btree_iter *, struct btree *);
 
-void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
+void __bch2_btree_node_lock_write(struct btree_trans *,
+                       struct btree_iter *, struct btree *);
 
-static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
+                                             struct btree_iter *iter,
+                                             struct btree *b)
 {
        EBUG_ON(iter->l[b->c.level].b != b);
        EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
 
        if (unlikely(!six_trylock_write(&b->c.lock)))
-               __bch2_btree_node_lock_write(b, iter);
+               __bch2_btree_node_lock_write(trans, iter, b);
 }
 
 #endif /* _BCACHEFS_BTREE_LOCKING_H */
index 217b52e1a1683a7977198acce09e3f9668eab296..5707baf10262298d94fc2bb8e1a5a5a40133dacd 100644 (file)
@@ -10,8 +10,9 @@ struct btree;
 
 void bch2_btree_node_lock_for_insert(struct btree_trans *, struct btree_iter *,
                                     struct btree *);
-bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
-                               struct btree_node_iter *, struct bkey_i *);
+bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_iter *,
+                               struct btree *, struct btree_node_iter *,
+                               struct bkey_i *);
 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
 
 enum btree_insert_flags {
index 2594738f3d537f9be22b5551bc133e7490decbca..4acd4990061144c133bc4cf72900970ec4f067cc 100644 (file)
@@ -25,6 +25,7 @@
 static void bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
                                   struct btree_iter *, struct btree *,
                                   struct keylist *, unsigned);
+static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
 
 /* Debug code: */
 
@@ -159,27 +160,14 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b)
        mutex_unlock(&c->btree_cache.lock);
 }
 
-void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
-{
-       struct open_buckets ob = b->ob;
-
-       b->ob.nr = 0;
-
-       clear_btree_node_dirty(c, b);
-
-       btree_node_lock_type(c, b, SIX_LOCK_write);
-       __btree_node_free(c, b);
-       six_unlock_write(&b->c.lock);
-
-       bch2_open_buckets_put(c, &ob);
-}
-
-void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
-                               struct btree_iter *iter)
+static void bch2_btree_node_free_inmem(struct btree_trans *trans,
+                                      struct btree_iter *iter,
+                                      struct btree *b)
 {
+       struct bch_fs *c = trans->c;
        struct btree_iter *linked;
 
-       trans_for_each_iter(iter->trans, linked)
+       trans_for_each_iter(trans, linked)
                BUG_ON(linked->l[b->c.level].b == b);
 
        six_lock_write(&b->c.lock, NULL, NULL);
@@ -773,7 +761,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
  * And it adds @b to the list of @as's new nodes, so that we can update sector
  * counts in bch2_btree_update_nodes_written:
  */
-void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
+static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
 {
        struct bch_fs *c = as->c;
 
@@ -827,7 +815,7 @@ found:
                closure_put(&as->cl);
 }
 
-void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
+static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
 {
        while (b->ob.nr)
                as->open_buckets[as->nr_open_buckets++] =
@@ -839,7 +827,7 @@ void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b
  * nodes and thus outstanding btree_updates - redirect @b's
  * btree_updates to point to this btree_update:
  */
-void bch2_btree_interior_update_will_free_node(struct btree_update *as,
+static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
                                               struct btree *b)
 {
        struct bch_fs *c = as->c;
@@ -911,7 +899,7 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
        as->nr_old_nodes++;
 }
 
-void bch2_btree_update_done(struct btree_update *as)
+static void bch2_btree_update_done(struct btree_update *as)
 {
        BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
 
@@ -925,11 +913,10 @@ void bch2_btree_update_done(struct btree_update *as)
                    as->c->btree_interior_update_worker);
 }
 
-struct btree_update *
-bch2_btree_update_start(struct btree_iter *iter, unsigned level,
-                       unsigned nr_nodes, unsigned flags)
+static struct btree_update *
+bch2_btree_update_start(struct btree_trans *trans, struct btree_iter *iter,
+                       unsigned level, unsigned nr_nodes, unsigned flags)
 {
-       struct btree_trans *trans = iter->trans;
        struct bch_fs *c = trans->c;
        struct btree_update *as;
        struct closure cl;
@@ -1092,8 +1079,10 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
  * is nothing new to be done.  This just guarantees that there is a
  * journal write.
  */
-static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
-                               struct btree_iter *iter)
+static void bch2_btree_set_root(struct btree_update *as,
+                               struct btree_trans *trans,
+                               struct btree_iter *iter,
+                               struct btree *b)
 {
        struct bch_fs *c = as->c;
        struct btree *old;
@@ -1108,7 +1097,7 @@ static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
         * Ensure no one is using the old root while we switch to the
         * new root:
         */
-       bch2_btree_node_lock_write(old, iter);
+       bch2_btree_node_lock_write(trans, iter, old);
 
        bch2_btree_set_root_inmem(c, b);
 
@@ -1121,15 +1110,17 @@ static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
         * an intent lock on the new root, and any updates that would
         * depend on the new root would have to update the new root.
         */
-       bch2_btree_node_unlock_write(old, iter);
+       bch2_btree_node_unlock_write(trans, iter, old);
 }
 
 /* Interior node updates: */
 
-static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b,
+static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
+                                       struct btree_trans *trans,
                                        struct btree_iter *iter,
-                                       struct bkey_i *insert,
-                                       struct btree_node_iter *node_iter)
+                                       struct btree *b,
+                                       struct btree_node_iter *node_iter,
+                                       struct bkey_i *insert)
 {
        struct bch_fs *c = as->c;
        struct bkey_packed *k;
@@ -1161,15 +1152,18 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
               bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
                bch2_btree_node_iter_advance(node_iter, b);
 
-       bch2_btree_bset_insert_key(iter, b, node_iter, insert);
+       bch2_btree_bset_insert_key(trans, iter, b, node_iter, insert);
        set_btree_node_dirty(c, b);
        set_btree_node_need_write(b);
 }
 
 static void
-__bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
-                                 struct btree_iter *iter, struct keylist *keys,
-                                 struct btree_node_iter node_iter)
+__bch2_btree_insert_keys_interior(struct btree_update *as,
+                                 struct btree_trans *trans,
+                                 struct btree_iter *iter,
+                                 struct btree *b,
+                                 struct btree_node_iter node_iter,
+                                 struct keylist *keys)
 {
        struct bkey_i *insert = bch2_keylist_front(keys);
        struct bkey_packed *k;
@@ -1181,8 +1175,8 @@ __bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
                ;
 
        while (!bch2_keylist_empty(keys)) {
-               bch2_insert_fixup_btree_ptr(as, b, iter,
-                               bch2_keylist_front(keys), &node_iter);
+               bch2_insert_fixup_btree_ptr(as, trans, iter, b,
+                               &node_iter, bch2_keylist_front(keys));
                bch2_keylist_pop_front(keys);
        }
 }
@@ -1308,8 +1302,10 @@ static struct btree *__btree_split_node(struct btree_update *as,
  * nodes that were coalesced, and thus in the middle of a child node post
  * coalescing:
  */
-static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
+static void btree_split_insert_keys(struct btree_update *as,
+                                   struct btree_trans *trans,
                                    struct btree_iter *iter,
+                                   struct btree *b,
                                    struct keylist *keys)
 {
        struct btree_node_iter node_iter;
@@ -1319,7 +1315,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
 
        bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
 
-       __bch2_btree_insert_keys_interior(as, b, iter, keys, node_iter);
+       __bch2_btree_insert_keys_interior(as, trans, iter, b, node_iter, keys);
 
        /*
         * We can't tolerate whiteouts here - with whiteouts there can be
@@ -1368,7 +1364,7 @@ static void btree_split(struct btree_update *as,
        bch2_btree_update_add_new_node(as, n1);
 
        if (keys)
-               btree_split_insert_keys(as, n1, iter, keys);
+               btree_split_insert_keys(as, trans, iter, n1, keys);
 
        if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
                trace_btree_split(c, b);
@@ -1398,7 +1394,7 @@ static void btree_split(struct btree_update *as,
                        n3->sib_u64s[0] = U16_MAX;
                        n3->sib_u64s[1] = U16_MAX;
 
-                       btree_split_insert_keys(as, n3, iter, &as->parent_keys);
+                       btree_split_insert_keys(as, trans, iter, n3, &as->parent_keys);
 
                        bch2_btree_node_write(c, n3, SIX_LOCK_intent);
                }
@@ -1420,10 +1416,10 @@ static void btree_split(struct btree_update *as,
                /* Split a non root node */
                bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
        } else if (n3) {
-               bch2_btree_set_root(as, n3, iter);
+               bch2_btree_set_root(as, trans, iter, n3);
        } else {
                /* Root filled up but didn't need to be split */
-               bch2_btree_set_root(as, n1, iter);
+               bch2_btree_set_root(as, trans, iter, n1);
        }
 
        bch2_btree_update_get_open_buckets(as, n1);
@@ -1435,12 +1431,12 @@ static void btree_split(struct btree_update *as,
        /* Successful split, update the iterator to point to the new nodes: */
 
        six_lock_increment(&b->c.lock, SIX_LOCK_intent);
-       bch2_btree_iter_node_drop(iter, b);
+       bch2_btree_iter_node_drop(trans, iter, b);
        if (n3)
-               bch2_btree_iter_node_replace(iter, n3);
+               bch2_btree_iter_node_replace(trans, iter, n3);
        if (n2)
-               bch2_btree_iter_node_replace(iter, n2);
-       bch2_btree_iter_node_replace(iter, n1);
+               bch2_btree_iter_node_replace(trans, iter, n2);
+       bch2_btree_iter_node_replace(trans, iter, n1);
 
        /*
         * The old node must be freed (in memory) _before_ unlocking the new
@@ -1448,7 +1444,7 @@ static void btree_split(struct btree_update *as,
         * node after another thread has locked and updated the new node, thus
         * seeing stale data:
         */
-       bch2_btree_node_free_inmem(c, b, iter);
+       bch2_btree_node_free_inmem(trans, iter, b);
 
        if (n3)
                six_unlock_intent(&n3->c.lock);
@@ -1463,19 +1459,23 @@ static void btree_split(struct btree_update *as,
 }
 
 static void
-bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
-                               struct btree_iter *iter, struct keylist *keys)
+bch2_btree_insert_keys_interior(struct btree_update *as,
+                               struct btree_trans *trans,
+                               struct btree_iter *iter,
+                               struct btree *b,
+                               struct keylist *keys)
 {
        struct btree_iter *linked;
 
-       __bch2_btree_insert_keys_interior(as, b, iter, keys, iter->l[b->c.level].iter);
+       __bch2_btree_insert_keys_interior(as, trans, iter, b,
+                                         iter->l[b->c.level].iter, keys);
 
        btree_update_updated_node(as, b);
 
-       trans_for_each_iter_with_node(iter->trans, b, linked)
+       trans_for_each_iter_with_node(trans, b, linked)
                bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
 
-       bch2_btree_trans_verify_iters(iter->trans, b);
+       bch2_btree_trans_verify_iters(trans, b);
 }
 
 /**
@@ -1509,13 +1509,13 @@ static void bch2_btree_insert_node(struct btree_update *as,
        bch2_btree_node_lock_for_insert(trans, iter, b);
 
        if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
-               bch2_btree_node_unlock_write(b, iter);
+               bch2_btree_node_unlock_write(trans, iter, b);
                goto split;
        }
 
        btree_node_interior_verify(c, b);
 
-       bch2_btree_insert_keys_interior(as, b, iter, keys);
+       bch2_btree_insert_keys_interior(as, trans, iter, b, keys);
 
        live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
        u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
@@ -1527,9 +1527,9 @@ static void bch2_btree_insert_node(struct btree_update *as,
 
        if (u64s_added > live_u64s_added &&
            bch2_maybe_compact_whiteouts(c, b))
-               bch2_btree_iter_reinit_node(iter, b);
+               bch2_btree_iter_reinit_node(trans, iter, b);
 
-       bch2_btree_node_unlock_write(b, iter);
+       bch2_btree_node_unlock_write(trans, iter, b);
 
        btree_node_interior_verify(c, b);
        return;
@@ -1547,7 +1547,7 @@ int bch2_btree_split_leaf(struct btree_trans *trans,
        unsigned l;
        int ret = 0;
 
-       as = bch2_btree_update_start(iter, iter->level,
+       as = bch2_btree_update_start(trans, iter, iter->level,
                btree_update_reserve_required(c, b), flags);
        if (IS_ERR(as))
                return PTR_ERR(as);
@@ -1660,7 +1660,7 @@ retry:
                goto out;
 
        parent = btree_node_parent(iter, b);
-       as = bch2_btree_update_start(iter, level,
+       as = bch2_btree_update_start(trans, iter, level,
                         btree_update_reserve_required(c, parent) + 1,
                         flags|
                         BTREE_INSERT_NOFAIL|
@@ -1702,15 +1702,15 @@ retry:
 
        six_lock_increment(&b->c.lock, SIX_LOCK_intent);
        six_lock_increment(&m->c.lock, SIX_LOCK_intent);
-       bch2_btree_iter_node_drop(iter, b);
-       bch2_btree_iter_node_drop(iter, m);
+       bch2_btree_iter_node_drop(trans, iter, b);
+       bch2_btree_iter_node_drop(trans, iter, m);
 
-       bch2_btree_iter_node_replace(iter, n);
+       bch2_btree_iter_node_replace(trans, iter, n);
 
        bch2_btree_trans_verify_iters(trans, n);
 
-       bch2_btree_node_free_inmem(c, b, iter);
-       bch2_btree_node_free_inmem(c, m, iter);
+       bch2_btree_node_free_inmem(trans, iter, b);
+       bch2_btree_node_free_inmem(trans, iter, m);
 
        six_unlock_intent(&n->c.lock);
 
@@ -1762,7 +1762,7 @@ retry:
                goto out;
 
        parent = btree_node_parent(iter, b);
-       as = bch2_btree_update_start(iter, b->c.level,
+       as = bch2_btree_update_start(trans, iter, b->c.level,
                (parent
                 ? btree_update_reserve_required(c, parent)
                 : 0) + 1,
@@ -1792,15 +1792,15 @@ retry:
                bch2_btree_insert_node(as, trans, iter, parent,
                                       &as->parent_keys, flags);
        } else {
-               bch2_btree_set_root(as, n, iter);
+               bch2_btree_set_root(as, trans, iter, n);
        }
 
        bch2_btree_update_get_open_buckets(as, n);
 
        six_lock_increment(&b->c.lock, SIX_LOCK_intent);
-       bch2_btree_iter_node_drop(iter, b);
-       bch2_btree_iter_node_replace(iter, n);
-       bch2_btree_node_free_inmem(c, b, iter);
+       bch2_btree_iter_node_drop(trans, iter, b);
+       bch2_btree_iter_node_replace(trans, iter, n);
+       bch2_btree_node_free_inmem(trans, iter, b);
        six_unlock_intent(&n->c.lock);
 
        bch2_btree_update_done(as);
@@ -1931,7 +1931,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
        if (ret)
                goto err;
 
-       bch2_btree_node_lock_write(b, iter);
+       bch2_btree_node_lock_write(trans, iter, b);
 
        if (new_hash) {
                mutex_lock(&c->btree_cache.lock);
@@ -1946,7 +1946,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
                bkey_copy(&b->key, new_key);
        }
 
-       bch2_btree_node_unlock_write(b, iter);
+       bch2_btree_node_unlock_write(trans, iter, b);
 out:
        bch2_trans_iter_put(trans, iter2);
        return ret;
index e88e737ee8134e8365fcd451bba8257f08a4820a..07046dab614bd8a340015fd178c24b0363b72c04 100644 (file)
@@ -113,24 +113,10 @@ struct btree_update {
        u64                             inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
 };
 
-void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
-                               struct btree_iter *);
-void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
-
-void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
-
 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
                                                  struct btree *,
                                                  struct bkey_format);
 
-void bch2_btree_update_done(struct btree_update *);
-struct btree_update *
-bch2_btree_update_start(struct btree_iter *, unsigned, unsigned, unsigned);
-
-void bch2_btree_interior_update_will_free_node(struct btree_update *,
-                                              struct btree *);
-void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
-
 int bch2_btree_split_leaf(struct btree_trans *, struct btree_iter *, unsigned);
 
 int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_iter *,
index 791c121adeb2c5e0b9c13e2babbd40449f3b4baa..b32c8f14823a2ae498f6613898d998b85d5c1e7a 100644 (file)
@@ -42,14 +42,14 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
 
-       bch2_btree_node_lock_write(b, iter);
+       bch2_btree_node_lock_write(trans, iter, b);
 
        if (btree_iter_type(iter) == BTREE_ITER_CACHED)
                return;
 
        if (unlikely(btree_node_just_written(b)) &&
            bch2_btree_post_write_cleanup(c, b))
-               bch2_btree_iter_reinit_node(iter, b);
+               bch2_btree_iter_reinit_node(trans, iter, b);
 
        /*
         * If the last bset has been written, or if it's gotten too big - start
@@ -62,7 +62,8 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
 /* Inserting into a given leaf node (last stage of insert): */
 
 /* Handle overwrites and do insert, for non extents: */
-bool bch2_btree_bset_insert_key(struct btree_iter *iter,
+bool bch2_btree_bset_insert_key(struct btree_trans *trans,
+                               struct btree_iter *iter,
                                struct btree *b,
                                struct btree_node_iter *node_iter,
                                struct bkey_i *insert)
@@ -76,7 +77,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
        EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
        EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
        EBUG_ON(insert->k.u64s >
-               bch_btree_keys_u64s_remaining(iter->trans->c, b));
+               bch_btree_keys_u64s_remaining(trans->c, b));
        EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
 
        k = bch2_btree_node_iter_peek_all(node_iter, b);
@@ -96,7 +97,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
                k->type = KEY_TYPE_deleted;
 
                if (k->needs_whiteout)
-                       push_whiteout(iter->trans->c, b, insert->k.p);
+                       push_whiteout(trans->c, b, insert->k.p);
                k->needs_whiteout = false;
 
                if (k >= btree_bset_last(b)->start) {
@@ -104,7 +105,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
                        bch2_bset_delete(b, k, clobber_u64s);
                        goto fix_iter;
                } else {
-                       bch2_btree_iter_fix_key_modified(iter, b, k);
+                       bch2_btree_iter_fix_key_modified(trans, iter, b, k);
                }
 
                return true;
@@ -122,7 +123,7 @@ bool bch2_btree_bset_insert_key(struct btree_iter *iter,
                        clobber_u64s = k->u64s;
                        goto overwrite;
                } else {
-                       bch2_btree_iter_fix_key_modified(iter, b, k);
+                       bch2_btree_iter_fix_key_modified(trans, iter, b, k);
                }
        }
 
@@ -132,7 +133,7 @@ overwrite:
        new_u64s = k->u64s;
 fix_iter:
        if (clobber_u64s != new_u64s)
-               bch2_btree_node_iter_fix(iter, b, node_iter, k,
+               bch2_btree_node_iter_fix(trans, iter, b, node_iter, k,
                                         clobber_u64s, new_u64s);
        return true;
 }
@@ -190,7 +191,7 @@ static bool btree_insert_key_leaf(struct btree_trans *trans,
        EBUG_ON(!iter->level &&
                !test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags));
 
-       if (unlikely(!bch2_btree_bset_insert_key(iter, b,
+       if (unlikely(!bch2_btree_bset_insert_key(trans, iter, b,
                                        &iter_l(iter)->iter, insert)))
                return false;
 
@@ -212,7 +213,7 @@ static bool btree_insert_key_leaf(struct btree_trans *trans,
 
        if (u64s_added > live_u64s_added &&
            bch2_maybe_compact_whiteouts(c, b))
-               bch2_btree_iter_reinit_node(iter, b);
+               bch2_btree_iter_reinit_node(trans, iter, b);
 
        trace_btree_insert_key(c, b, insert);
        return true;
@@ -610,8 +611,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
 
        trans_for_each_update(trans, i)
                if (!same_leaf_as_prev(trans, i))
-                       bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b,
-                                                            i->iter);
+                       bch2_btree_node_unlock_write_inlined(trans, i->iter,
+                                                       iter_l(i->iter)->b);
 
        if (!ret && trans->journal_pin)
                bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
@@ -1178,7 +1179,7 @@ retry:
                        bch2_key_resize(&delete.k, max_sectors);
                        bch2_cut_back(end, &delete);
 
-                       ret = bch2_extent_trim_atomic(&delete, iter);
+                       ret = bch2_extent_trim_atomic(trans, iter, &delete);
                        if (ret)
                                break;
                }
index c0855245f2ec4127fc1e735b01074d6154d5adfd..c20c80bd344d39f9948607b08a9fcdda37be2cba 100644 (file)
@@ -552,19 +552,19 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
        return 0;
 }
 
-static int ec_stripe_mem_alloc(struct bch_fs *c,
+static int ec_stripe_mem_alloc(struct btree_trans *trans,
                               struct btree_iter *iter)
 {
        size_t idx = iter->pos.offset;
        int ret = 0;
 
-       if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
+       if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
                return ret;
 
-       bch2_trans_unlock(iter->trans);
+       bch2_trans_unlock(trans);
        ret = -EINTR;
 
-       if (!__ec_stripe_mem_alloc(c, idx, GFP_KERNEL))
+       if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL))
                return ret;
 
        return -ENOMEM;
@@ -735,7 +735,7 @@ retry:
 found_slot:
        start_pos = iter->pos;
 
-       ret = ec_stripe_mem_alloc(c, iter);
+       ret = ec_stripe_mem_alloc(&trans, iter);
        if (ret)
                goto err;
 
index 4a8dd085f7fb80b2650f5bab433cae077343d1e3..93d55f46233f0917c734c595e8d2258fe7ebce54 100644 (file)
@@ -94,11 +94,11 @@ static int count_iters_for_insert(struct btree_trans *trans,
 
 #define EXTENT_ITERS_MAX       (BTREE_ITER_MAX / 3)
 
-int bch2_extent_atomic_end(struct btree_iter *iter,
+int bch2_extent_atomic_end(struct btree_trans *trans,
+                          struct btree_iter *iter,
                           struct bkey_i *insert,
                           struct bpos *end)
 {
-       struct btree_trans *trans = iter->trans;
        struct btree_iter *copy;
        struct bkey_s_c k;
        unsigned nr_iters = 0;
@@ -153,27 +153,17 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
        return ret < 0 ? ret : 0;
 }
 
-int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+int bch2_extent_trim_atomic(struct btree_trans *trans,
+                           struct btree_iter *iter,
+                           struct bkey_i *k)
 {
        struct bpos end;
        int ret;
 
-       ret = bch2_extent_atomic_end(iter, k, &end);
+       ret = bch2_extent_atomic_end(trans, iter, k, &end);
        if (ret)
                return ret;
 
        bch2_cut_back(end, k);
        return 0;
 }
-
-int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
-{
-       struct bpos end;
-       int ret;
-
-       ret = bch2_extent_atomic_end(iter, k, &end);
-       if (ret)
-               return ret;
-
-       return !bkey_cmp(end, k->k.p);
-}
index 2fa4602967e04f5cf02033a19d5d134ba90dcf8c..6f5cf449361a7f1aa6661086c43110c8b2e15455 100644 (file)
@@ -4,9 +4,9 @@
 
 #include "bcachefs.h"
 
-int bch2_extent_atomic_end(struct btree_iter *, struct bkey_i *,
-                          struct bpos *);
-int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
-int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
+int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
+                          struct bkey_i *, struct bpos *);
+int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *,
+                           struct bkey_i *);
 
 #endif /* _BCACHEFS_EXTENT_UPDATE_H */
index 9ac10b72d1cf0a5d0c01def5a075c364c48aa593..251029c33164cb822867d5c073003f8ae7527248 100644 (file)
@@ -2576,7 +2576,7 @@ reassemble:
                copy.k->k.p.offset += shift >> 9;
                bch2_btree_iter_set_pos(dst, bkey_start_pos(&copy.k->k));
 
-               ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
+               ret = bch2_extent_atomic_end(&trans, dst, copy.k, &atomic_end);
                if (ret)
                        continue;
 
index 30d9b6e4abf74482e788f59bd9e5bc128d8fce4e..27f6b324574194c96f6f90fb4e6cccbc17220ef7 100644 (file)
@@ -280,7 +280,7 @@ int bch2_extent_update(struct btree_trans *trans,
        s64 i_sectors_delta = 0, disk_sectors_delta = 0;
        int ret;
 
-       ret = bch2_extent_trim_atomic(k, iter);
+       ret = bch2_extent_trim_atomic(trans, iter, k);
        if (ret)
                return ret;