struct bpos *end)
{
struct btree_trans *trans = iter->trans;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b;
+ struct btree_node_iter node_iter;
struct bkey_packed *_k;
unsigned nr_iters = 0;
int ret;
- BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ b = iter->l[0].b;
+ node_iter = iter->l[0].iter;
+
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
*end = bpos_min(insert->k.p, b->key.k.p);
s64 i_sectors_delta;
int ret;
- ret = bch2_btree_iter_traverse(extent_iter);
- if (ret)
- return ret;
-
ret = bch2_extent_trim_atomic(k, extent_iter);
if (ret)
return ret;
copy.k.k.p.offset += shift >> 9;
bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k.k));
- ret = bch2_btree_iter_traverse(dst);
- if (ret)
- goto bkey_err;
-
ret = bch2_extent_atomic_end(dst, ©.k, &atomic_end);
if (ret)
goto bkey_err;
BTREE_ITER_INTENT);
do {
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_extent_atomic_end(iter, k, &atomic_end);
if (ret)
goto err;
if (ret)
goto err;
- ret = bch2_extent_atomic_end(split_iter, k, &atomic_end);
- if (ret)
- goto err;
-
if (!remark &&
remark_if_split &&
bkey_cmp(atomic_end, k->k.p) < 0) {