To help ameloriate issues with peek operations having to skip over
deletions in the journal - just bail out if all we're doing is
prefetching btree nodes.
Since btree node prefetching runs every time we iterate to a new node,
and has to sequentially scan ahead, this avoids another O(n^2).
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
bch2_bkey_buf_init(&tmp);
+ jiter->fail_if_too_many_whiteouts = true;
+
while (nr-- && !ret) {
if (!bch2_btree_node_relock(trans, path, path->level))
break;
: (level > 1 ? 1 : 16);
iter.prefetch = false;
+ iter.fail_if_too_many_whiteouts = true;
bch2_bkey_buf_init(&tmp);
while (nr--) {
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
{
struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
+ size_t iters = 0;
if (iter->prefetch && iter->journal.level)
btree_and_journal_iter_prefetch(iter);
if (iter->at_end)
return bkey_s_c_null;
+ iters++;
+
+ if (iters > 20 && iter->fail_if_too_many_whiteouts)
+ return bkey_s_c_null;
+
while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
bpos_lt(btree_k.k->p, iter->pos))
bch2_journal_iter_advance_btree(iter);
struct bpos pos;
bool at_end;
bool prefetch;
+ bool fail_if_too_many_whiteouts;
};
static inline int __journal_key_btree_cmp(enum btree_id l_btree_id,