/**
* btree_insert_key - insert a key one key into a leaf node
*/
-static bool btree_insert_key_leaf(struct btree_trans *trans,
+static void btree_insert_key_leaf(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
if (unlikely(!bch2_btree_bset_insert_key(trans, insert->path, b,
&insert_l(insert)->iter, insert->k)))
- return false;
+ return;
i->journal_seq = cpu_to_le64(max(trans->journal_res.seq,
le64_to_cpu(i->journal_seq)));
if (u64s_added > live_u64s_added &&
bch2_maybe_compact_whiteouts(c, b))
bch2_trans_node_reinit_iter(trans, b);
-
- return true;
}
/* Cached btree updates: */
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
- bool did_work;
EBUG_ON(trans->journal_res.ref !=
!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
i->k->k.needs_whiteout = false;
if (!i->cached)
- did_work = btree_insert_key_leaf(trans, i);
+ btree_insert_key_leaf(trans, i);
else if (!i->key_cache_already_flushed)
- did_work = bch2_btree_insert_key_cached(trans, i->path, i->k);
+ bch2_btree_insert_key_cached(trans, i->path, i->k);
else {
bch2_btree_key_cache_drop(trans, i->path);
- did_work = false;
- }
- if (!did_work)
return;
+ }
if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) &&
!(i->flags & BTREE_UPDATE_NOJOURNAL)) {