for (_k = i->start;
_k < vstruct_last(i);
_k = _n) {
- _n = bkey_next_skip_noops(_k, vstruct_last(i));
+ _n = bkey_next(_k);
k = bkey_disassemble(b, _k, &uk);
if (c)
rw_aux_tree(b, t)[j - 1].offset);
}
- k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ k = bkey_next(k);
BUG_ON(k >= btree_bkey_last(b, t));
}
}
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size) {
while (bkey_to_cacheline(b, t, k) < cacheline)
- prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ prev = k, k = bkey_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
}
while (k != btree_bkey_last(b, t))
- prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
+ prev = k, k = bkey_next(k);
t->max_key = bkey_unpack_pos(b, prev);
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
while ((p = __bkey_prev(b, t, k)) && !ret) {
- for (i = p; i != k; i = bkey_next_skip_noops(i, k))
+ for (i = p; i != k; i = bkey_next(i))
if (i->type >= min_key_type)
ret = i;
BUG_ON(ret >= orig_k);
for (i = ret
- ? bkey_next_skip_noops(ret, orig_k)
+ ? bkey_next(ret)
: btree_bkey_first(b, t);
i != orig_k;
- i = bkey_next_skip_noops(i, orig_k))
+ i = bkey_next(i))
BUG_ON(i->type >= min_key_type);
}
/* signal to make_bfloat() that they're uninitialized: */
min_key.u64s = max_key.u64s = 0;
- if (bkey_next_skip_noops(k, btree_bkey_last(b, t)) == btree_bkey_last(b, t)) {
+ if (bkey_next(k) == btree_bkey_last(b, t)) {
t->max_key = bkey_unpack_pos(b, k);
for (j = 1; j < t->size; j = j * 2 + 1)
struct bkey_packed *k = start;
while (1) {
- k = bkey_next_skip_noops(k, end);
+ k = bkey_next(k);
if (k == end)
break;
while (m != btree_bkey_last(b, t) &&
bkey_iter_cmp_p_or_unp(b, m,
lossy_packed_search, search) < 0)
- m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
+ m = bkey_next(m);
if (!packed_search)
while (m != btree_bkey_last(b, t) &&
bkey_iter_pos_cmp(b, m, search) < 0)
- m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
+ m = bkey_next(m);
if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
EBUG_ON(iter->data->k > iter->data->end);
- while (!__btree_node_iter_set_end(iter, 0) &&
- !__bch2_btree_node_iter_peek_all(iter, b)->u64s)
- iter->data->k++;
-
if (unlikely(__btree_node_iter_set_end(iter, 0))) {
bch2_btree_node_iter_set_drop(iter, iter->data);
return;
if (start == end)
return;
- for (p = start, k = bkey_next_skip_noops(start, end);
+ for (p = start, k = bkey_next(start);
k != end;
- p = k, k = bkey_next_skip_noops(k, end)) {
+ p = k, k = bkey_next(k)) {
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
{
struct bkey_packed *k;
- for (k = i->start;
- k != vstruct_last(i);
- k = bkey_next_skip_noops(k, vstruct_last(i)))
+ for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
k->needs_whiteout = v;
}
out = i->start;
for (k = start; k != end; k = n) {
- n = bkey_next_skip_noops(k, end);
+ n = bkey_next(k);
if (!bkey_deleted(k)) {
bkey_copy(out, k);
}
prev = k;
- k = bkey_next_skip_noops(k, vstruct_last(i));
+ k = bkey_next(k);
}
fsck_err:
return ret;
bp.v->mem_ptr = 0;
}
- k = bkey_next_skip_noops(k, vstruct_last(i));
+ k = bkey_next(k);
}
bch2_bset_build_aux_tree(b, b->set, false);