struct btree_iter *iter;
struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
+ struct bkey_s_c k;
+ int ret;
bch2_trans_init(&trans, c, 0, 0);
retry:
goto out;
}
- xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ acl = ERR_PTR(ret);
+ goto out;
+ }
+
+ xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
struct posix_acl *acl;
+ struct bkey_s_c k;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
if (ret)
return ret == -ENOENT ? 0 : ret;
- xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
+ k = bch2_btree_iter_peek_slot(iter);
+ xattr = bkey_s_c_to_xattr(k);
+ if (ret)
+ goto err;
+
acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
ret = PTR_ERR_OR_ZERO(acl);
percpu_ref_put(&ca->ref);
goto err;
}
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(iter);
}
}
err:
return b;
}
-void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
- const struct bkey_i *k,
- enum btree_id btree_id, unsigned level)
+int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
+ const struct bkey_i *k,
+ enum btree_id btree_id, unsigned level)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
b = btree_cache_find(bc, k);
if (b)
- return;
+ return 0;
- bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
+ b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
+ return PTR_ERR_OR_ZERO(b);
}
void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
enum btree_id, unsigned, bool);
-void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
- const struct bkey_i *, enum btree_id, unsigned);
+int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
+ const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *);
}
noinline
-static void btree_iter_prefetch(struct btree_iter *iter)
+static int btree_iter_prefetch(struct btree_iter *iter)
{
struct bch_fs *c = iter->trans->c;
struct btree_iter_level *l = &iter->l[iter->level];
? (iter->level > 1 ? 0 : 2)
: (iter->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(iter, iter->level);
+ int ret = 0;
bch2_bkey_buf_init(&tmp);
- while (nr) {
+ while (nr && !ret) {
if (!bch2_btree_node_relock(iter, iter->level))
break;
break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
- iter->level - 1);
+ ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
+ iter->level - 1);
}
if (!was_locked)
btree_node_unlock(iter, iter->level);
bch2_bkey_buf_exit(&tmp, c);
+ return ret;
}
static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
btree_node_mem_ptr_set(iter, level + 1, b);
if (iter->flags & BTREE_ITER_PREFETCH)
- btree_iter_prefetch(iter);
+ ret = btree_iter_prefetch(iter);
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
goto out;
old_dst = bch2_btree_iter_peek_slot(dst_iter);
+ ret = bkey_err(old_dst);
+ if (ret)
+ goto out;
if (mode != BCH_RENAME)
*dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum);
goto out;
old_src = bch2_btree_iter_peek_slot(src_iter);
+ ret = bkey_err(old_src);
+ if (ret)
+ goto out;
+
*src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum);
/* Create new dst key: */
struct btree_iter *iter;
struct bkey_s_c k;
u64 inum = 0;
+ int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
hash_info, name, 0);
- if (IS_ERR(iter)) {
- BUG_ON(PTR_ERR(iter) == -EINTR);
+ ret = PTR_ERR_OR_ZERO(iter);
+ if (ret)
goto out;
- }
k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto out;
+
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
bch2_trans_iter_put(&trans, iter);
out:
+ BUG_ON(ret == -EINTR);
bch2_trans_exit(&trans);
return inum;
}
goto err;
k = bch2_btree_iter_peek_slot(dirent_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
/* already reserved */
if (k.k->type == KEY_TYPE_reservation &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(iter);
continue;
}
if (k.k->p.snapshot == snapshot &&
k.k->type != KEY_TYPE_inode &&
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) {
- bch2_btree_iter_next(iter);
+ bch2_btree_iter_advance(iter);
continue;
}
* writing to, because i_size could be up to one block
* less:
*/
- if (!bkey_cmp(old.k->p, new->k.p))
+ if (!bkey_cmp(old.k->p, new->k.p)) {
old = bch2_btree_iter_next(iter);
+ ret = bkey_err(old);
+ if (ret)
+ break;
+ }
if (old.k && !bkey_err(old) &&
old.k->p.inode == extent_iter->pos.inode &&
}
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k);
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(iter);
goto next;
}
out:
return k;
}
- bch2_btree_iter_set_pos(iter, end);
- return bkey_s_c_null;
+ if (bkey_cmp(iter->pos, end) >= 0)
+ bch2_btree_iter_set_pos(iter, end);
+ return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
s64 bch2_remap_range(struct bch_fs *c,
iter = bch2_trans_copy_iter(trans, start);
- bch2_btree_iter_next_slot(iter);
+ bch2_btree_iter_advance(iter);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type &&
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
struct btree_iter *iter;
struct bkey_s_c_xattr xattr;
+ struct bkey_s_c k;
int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, &hash,
if (ret)
goto err;
- xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter));
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ xattr = bkey_s_c_to_xattr(k);
ret = le16_to_cpu(xattr.v->x_val_len);
if (buffer) {
if (ret > size)