struct btree_path *path)
{
struct bch_fs *c = trans->c;
+ struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck = (void *) path->l[0].b;
BUG_ON(!ck->valid);
bch2_journal_pin_drop(&c->journal, &ck->journal);
}
- ck->valid = false;
+ bkey_cached_evict(bc, ck);
+ bkey_cached_free_fast(bc, ck);
+
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
}
static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
{
if (likely(trans->write_locked)) {
trans_for_each_update(trans, i)
- if (!same_leaf_as_prev(trans, i))
+ if (btree_node_locked_type(trans->paths + i->path, i->level) ==
+ BTREE_NODE_WRITE_LOCKED)
bch2_btree_node_unlock_write_inlined(trans,
trans->paths + i->path, insert_l(trans, i)->b);
trans->write_locked = false;
trans_for_each_update(trans, i) {
struct btree_path *path = trans->paths + i->path;
- if (!i->cached) {
+ if (!i->cached)
bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
- } else if (!i->key_cache_already_flushed)
+ else if (!i->key_cache_already_flushed)
bch2_btree_insert_key_cached(trans, flags, i);
- else {
+ else
bch2_btree_key_cache_drop(trans, path);
- btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
- }
}
return 0;