"data type inconsistency");
bkey_fsck_err_on(!a.io_time[READ] &&
- c->recovery.curr_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
+ !(c->recovery.passes_to_run &
+ BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs)),
c, alloc_key_cached_but_read_time_zero,
"cached bucket with read_time == 0");
break;
static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
{
- if (c->recovery.curr_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs)
+ if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_trans_mark_dev_sbs))
return false;
return bch2_is_superblock_bucket(ca, b);
if (!avail) {
if (req->watermark > BCH_WATERMARK_normal &&
- c->recovery.curr_pass <= BCH_RECOVERY_PASS_check_allocations)
+ c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations)
goto alloc;
if (cl && !waiting) {
goto alloc;
}
- if (!ob && freespace && c->recovery.curr_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+ if (!ob && freespace && c->recovery.pass_done < BCH_RECOVERY_PASS_check_alloc_info) {
freespace = false;
goto alloc;
}
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ bool will_check = c->recovery.passes_to_run &
+ BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
int ret = 0;
if (insert) {
bch2_bkey_val_to_text(&buf, c, orig_k);
bch_err(c, "%s", buf.buf);
- } else if (c->recovery.curr_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+ } else if (!will_check) {
prt_printf(&buf, "backpointer not found when deleting\n");
printbuf_indent_add(&buf, 2);
bch2_bkey_val_to_text(&buf, c, orig_k);
}
- if (c->recovery.curr_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers &&
- __bch2_inconsistent_error(c, &buf))
+ if (!will_check && __bch2_inconsistent_error(c, &buf))
ret = -BCH_ERR_erofs_unfixed_errors;
bch_err(c, "%s", buf.buf);
{
struct printbuf buf = PRINTBUF;
- if (c->recovery.curr_pass <= BCH_RECOVERY_PASS_check_allocations)
+ if (c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations)
return;
prt_printf(&buf,
prt_newline(&buf);
bch2_btree_lost_data(c, &buf, b->c.btree_id);
- if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
- c->recovery.curr_pass > BCH_RECOVERY_PASS_check_topology &&
+ if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
bch2_fs_emergency_read_only2(c, &buf))
ratelimit = false;
bool now = false, pending = false;
spin_lock(&c->btree_node_rewrites_lock);
- if (c->recovery.curr_pass > BCH_RECOVERY_PASS_journal_replay &&
+ if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay) &&
enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_node_rewrite)) {
list_add(&a->list, &c->btree_node_rewrites);
now = true;
c->opts.fsck = true;
set_bit(BCH_FS_in_fsck, &c->flags);
- c->recovery.curr_pass = BCH_RECOVERY_PASS_check_alloc_info;
int ret = bch2_run_online_recovery_passes(c, ~0ULL);
clear_bit(BCH_FS_in_fsck, &c->flags);
if (BCH_SNAPSHOT_WILL_DELETE(s.v)) {
set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
- if (c->recovery.curr_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
+ if (c->recovery.pass_done > BCH_RECOVERY_PASS_delete_dead_snapshots)
bch2_delete_dead_snapshots_async(c);
}
} else {