From 84b9f17195b2d4914763feee00ca44be42c9f8e2 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 15 May 2025 10:08:06 -0400 Subject: [PATCH] bcachefs: do_rebalance_scan() now only updates bch_extent_rebalance This ensures that our pending rebalance work accounting is accurate quickly. Signed-off-by: Kent Overstreet --- fs/bcachefs/move.c | 2 +- fs/bcachefs/move.h | 4 ++++ fs/bcachefs/rebalance.c | 42 ++++++++++++++++++++++++++--------------- 3 files changed, 32 insertions(+), 16 deletions(-) diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 3a92eced2e67b..49898d5743d4b 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -412,7 +412,7 @@ err: return ret; } -static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, +struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, struct per_snapshot_io_opts *io_opts, struct bpos extent_pos, /* extent_iter, extent_k may be in reflink btree */ struct btree_iter *extent_iter, diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h index fb38383ffc7b7..86b80499ac55f 100644 --- a/fs/bcachefs/move.h +++ b/fs/bcachefs/move.h @@ -122,6 +122,10 @@ int bch2_move_extent(struct moving_context *, struct bch_io_opts, struct data_update_opts); +struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *, + struct per_snapshot_io_opts *, struct bpos, + struct btree_iter *, struct bkey_s_c); + int bch2_move_data_btree(struct moving_context *, struct bpos, struct bpos, move_pred_fn, void *, enum btree_id, unsigned); int __bch2_move_data(struct moving_context *, diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 8fefe2b174c2a..c223bb092d330 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -459,22 +459,11 @@ out: return ret; } -static bool rebalance_pred(struct bch_fs *c, void *arg, - enum btree_id btree, struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_update_opts *data_opts) -{ - data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); - data_opts->target = io_opts->background_target; - data_opts->write_flags |= BCH_WRITE_only_specified_devs; - return data_opts->rewrite_ptrs != 0; -} - static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) { struct btree_trans *trans = ctxt->trans; + struct bch_fs *c = trans->c; struct bch_fs_rebalance *r = &trans->c->rebalance; - int ret; bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); ctxt->stats = &r->scan_stats; @@ -489,11 +478,34 @@ static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) r->state = BCH_REBALANCE_scanning; - ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?: - commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_clear_rebalance_needs_scan(trans, inum, cookie)); + struct per_snapshot_io_opts snapshot_io_opts; + per_snapshot_io_opts_init(&snapshot_io_opts, c); + + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, + r->scan_start.pos, r->scan_end.pos, + BTREE_ITER_all_snapshots| + BTREE_ITER_not_extents| + BTREE_ITER_prefetch, k, ({ + ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos); + struct bch_io_opts *io_opts = bch2_move_get_io_opts(trans, + &snapshot_io_opts, iter.pos, &iter, k); + PTR_ERR_OR_ZERO(io_opts); + })) ?: + commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_clear_rebalance_needs_scan(trans, inum, cookie)); + + per_snapshot_io_opts_exit(&snapshot_io_opts); bch2_move_stats_exit(&r->scan_stats, trans->c); + + /* + * Ensure that the rebalance_work entries we created are seen by the + * next iteration of do_rebalance(), so we don't end up stuck in + * rebalance_wait(): + */ + atomic64_inc(&r->scan_stats.sectors_seen); + bch2_btree_write_buffer_flush_sync(trans); + return ret; } -- 2.47.2