From: Kent Overstreet Date: Tue, 4 Feb 2025 01:15:52 +0000 (-0500) Subject: bcachefs: Read/move path counter work X-Git-Tag: v6.15-rc1~146^2~143 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=157ea5834133c02cb93e06e6a014cfc0b3b109e5;p=thirdparty%2Fkernel%2Flinux.git bcachefs: Read/move path counter work Reorganize counters a bit, grouping related counters together. New counters: - io_read_inline - io_read_hole Signed-off-by: Kent Overstreet --- diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 9b79cd18d16c8..7e484afea5516 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -93,7 +93,7 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc return true; } -static noinline void trace_move_extent_finish2(struct data_update *u, +static noinline void trace_io_move_finish2(struct data_update *u, struct bkey_i *new, struct bkey_i *insert) { @@ -113,11 +113,11 @@ static noinline void trace_move_extent_finish2(struct data_update *u, bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); prt_newline(&buf); - trace_move_extent_finish(c, buf.buf); + trace_io_move_finish(c, buf.buf); printbuf_exit(&buf); } -static void trace_move_extent_fail2(struct data_update *m, +static void trace_io_move_fail2(struct data_update *m, struct bkey_s_c new, struct bkey_s_c wrote, struct bkey_i *insert, @@ -128,7 +128,7 @@ static void trace_move_extent_fail2(struct data_update *m, struct printbuf buf = PRINTBUF; unsigned rewrites_found = 0; - if (!trace_move_extent_fail_enabled()) + if (!trace_io_move_fail_enabled()) return; prt_str(&buf, msg); @@ -168,7 +168,7 @@ static void trace_move_extent_fail2(struct data_update *m, bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); } - trace_move_extent_fail(c, buf.buf); + trace_io_move_fail(c, buf.buf); printbuf_exit(&buf); } @@ -216,7 +216,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, new = bkey_i_to_extent(bch2_keylist_front(keys)); if (!bch2_extents_match(k, old)) { - trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), + trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), NULL, "no match:"); goto nowork; } @@ -256,7 +256,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, if (m->data_opts.rewrite_ptrs && !rewrites_found && bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) { - trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:"); + trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:"); goto nowork; } @@ -273,7 +273,7 @@ restart_drop_conflicting_replicas: } if (!bkey_val_u64s(&new->k)) { - trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:"); + trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:"); goto nowork; } @@ -387,9 +387,9 @@ restart_drop_extra_replicas: if (!ret) { bch2_btree_iter_set_pos(&iter, next_pos); - this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size); - if (trace_move_extent_finish_enabled()) - trace_move_extent_finish2(m, &new->k_i, insert); + this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size); + if (trace_io_move_finish_enabled()) + trace_io_move_finish2(m, &new->k_i, insert); } err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -411,7 +411,7 @@ nowork: &m->stats->sectors_raced); } - count_event(c, move_extent_fail); + count_event(c, io_move_fail); bch2_btree_iter_advance(&iter); goto next; @@ -439,7 +439,7 @@ void bch2_data_update_read_done(struct data_update *m) m->op.crc = m->rbio.pick.crc; m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9; - this_cpu_add(m->op.c->counters[BCH_COUNTER_move_extent_write], m->k.k->k.size); + this_cpu_add(m->op.c->counters[BCH_COUNTER_io_move_write], m->k.k->k.size); closure_call(&m->op.cl, bch2_write, NULL, NULL); } diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index cb30bdf52284e..33642c5bb9c70 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -181,7 +181,7 @@ static noinline void promote_start(struct bch_read_bio *rbio) { struct promote_op *op = container_of(rbio, struct promote_op, write.rbio); - trace_and_count(op->write.op.c, read_promote, &rbio->bio); + trace_and_count(op->write.op.c, io_read_promote, &rbio->bio); INIT_WORK(&op->work, promote_start_work); queue_work(rbio->c->write_ref_wq, &op->work); @@ -320,7 +320,7 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans, *read_full = promote_full; return promote; nopromote: - trace_read_nopromote(c, ret); + trace_io_read_nopromote(c, ret); return NULL; } @@ -463,7 +463,9 @@ static void bch2_rbio_retry(struct work_struct *work) }; struct bch_io_failures failed = { .nr = 0 }; - trace_and_count(c, read_retry, &rbio->bio); + trace_io_read_retry(&rbio->bio); + this_cpu_add(c->counters[BCH_COUNTER_io_read_retry], + bvec_iter_sectors(rbio->bvec_iter)); if (rbio->retry == READ_RETRY_AVOID) bch2_mark_io_failure(&failed, &rbio->pick); @@ -802,7 +804,7 @@ static void bch2_read_endio(struct bio *bio) if (((rbio->flags & BCH_READ_retry_if_stale) && race_fault()) || (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) { - trace_and_count(c, read_reuse_race, &rbio->bio); + trace_and_count(c, io_read_reuse_race, &rbio->bio); if (rbio->flags & BCH_READ_retry_if_stale) bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN); @@ -891,6 +893,8 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, swap(iter.bi_size, bytes); bio_advance_iter(&orig->bio, &iter, bytes); zero_fill_bio_iter(&orig->bio, iter); + this_cpu_add(c->counters[BCH_COUNTER_io_read_inline], + bvec_iter_sectors(iter)); goto out_read_done; } retry_pick: @@ -1069,10 +1073,12 @@ retry_pick: rbio->bio.bi_end_io = bch2_read_endio; if (rbio->bounce) - trace_and_count(c, read_bounce, &rbio->bio); + trace_and_count(c, io_read_bounce, &rbio->bio); if (!(flags & BCH_READ_data_update)) this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); + else + this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio)); bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); /* @@ -1085,7 +1091,7 @@ retry_pick: if (!(flags & (BCH_READ_in_retry|BCH_READ_last_fragment))) { bio_inc_remaining(&orig->bio); - trace_and_count(c, read_split, &orig->bio); + trace_and_count(c, io_read_split, &orig->bio); } /* @@ -1173,6 +1179,8 @@ err: goto out_read_done; hole: + this_cpu_add(c->counters[BCH_COUNTER_io_read_hole], + bvec_iter_sectors(iter)); /* * won't normally happen in the BCH_READ_data_update * (bch2_move_extent()) path, but if we retry and the extent we wanted diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 7614370f45909..1be1edfbc8302 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -38,28 +38,28 @@ const char * const bch2_data_ops_strs[] = { NULL }; -static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k, +static void trace_io_move2(struct bch_fs *c, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - if (trace_move_extent_enabled()) { + if (trace_io_move_enabled()) { struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts); - trace_move_extent(c, buf.buf); + trace_io_move(c, buf.buf); printbuf_exit(&buf); } } -static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k) +static void trace_io_move_read2(struct bch_fs *c, struct bkey_s_c k) { - if (trace_move_extent_read_enabled()) { + if (trace_io_move_read_enabled()) { struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, k); - trace_move_extent_read(c, buf.buf); + trace_io_move_read(c, buf.buf); printbuf_exit(&buf); } } @@ -132,12 +132,12 @@ static void move_write(struct moving_io *io) return; } - if (trace_move_extent_write_enabled()) { + if (trace_io_move_write_enabled()) { struct bch_fs *c = io->write.op.c; struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k)); - trace_move_extent_write(c, buf.buf); + trace_io_move_write(c, buf.buf); printbuf_exit(&buf); } @@ -273,7 +273,8 @@ int bch2_move_extent(struct moving_context *ctxt, struct bch_fs *c = trans->c; int ret = -ENOMEM; - trace_move_extent2(c, k, &io_opts, &data_opts); + trace_io_move2(c, k, &io_opts, &data_opts); + this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); if (ctxt->stats) ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos); @@ -338,9 +339,7 @@ int bch2_move_extent(struct moving_context *ctxt, atomic_inc(&io->b->count); } - this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); - this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); - trace_move_extent_read2(c, k); + trace_io_move_read2(c, k); mutex_lock(&ctxt->lock); atomic_add(io->read_sectors, &ctxt->read_sectors); @@ -374,15 +373,15 @@ err: bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; - count_event(c, move_extent_start_fail); + count_event(c, io_move_start_fail); - if (trace_move_extent_start_fail_enabled()) { + if (trace_io_move_start_fail_enabled()) { struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, k); prt_str(&buf, ": "); prt_str(&buf, bch2_err_str(ret)); - trace_move_extent_start_fail(c, buf.buf); + trace_io_move_start_fail(c, buf.buf); printbuf_exit(&buf); } return ret; diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h index d0391c5d4c487..c82a891026d35 100644 --- a/fs/bcachefs/sb-counters_format.h +++ b/fs/bcachefs/sb-counters_format.h @@ -9,8 +9,20 @@ enum counters_flags { #define BCH_PERSISTENT_COUNTERS() \ x(io_read, 0, TYPE_SECTORS) \ + x(io_read_inline, 80, TYPE_SECTORS) \ + x(io_read_hole, 81, TYPE_SECTORS) \ + x(io_read_promote, 30, TYPE_COUNTER) \ + x(io_read_bounce, 31, TYPE_COUNTER) \ + x(io_read_split, 33, TYPE_COUNTER) \ + x(io_read_reuse_race, 34, TYPE_COUNTER) \ + x(io_read_retry, 32, TYPE_COUNTER) \ x(io_write, 1, TYPE_SECTORS) \ x(io_move, 2, TYPE_SECTORS) \ + x(io_move_read, 35, TYPE_SECTORS) \ + x(io_move_write, 36, TYPE_SECTORS) \ + x(io_move_finish, 37, TYPE_SECTORS) \ + x(io_move_fail, 38, TYPE_COUNTER) \ + x(io_move_start_fail, 39, TYPE_COUNTER) \ x(bucket_invalidate, 3, TYPE_COUNTER) \ x(bucket_discard, 4, TYPE_COUNTER) \ x(bucket_discard_fast, 79, TYPE_COUNTER) \ @@ -39,16 +51,6 @@ enum counters_flags { x(journal_reclaim_finish, 27, TYPE_COUNTER) \ x(journal_reclaim_start, 28, TYPE_COUNTER) \ x(journal_write, 29, TYPE_COUNTER) \ - x(read_promote, 30, TYPE_COUNTER) \ - x(read_bounce, 31, TYPE_COUNTER) \ - x(read_split, 33, TYPE_COUNTER) \ - x(read_retry, 32, TYPE_COUNTER) \ - x(read_reuse_race, 34, TYPE_COUNTER) \ - x(move_extent_read, 35, TYPE_SECTORS) \ - x(move_extent_write, 36, TYPE_SECTORS) \ - x(move_extent_finish, 37, TYPE_SECTORS) \ - x(move_extent_fail, 38, TYPE_COUNTER) \ - x(move_extent_start_fail, 39, TYPE_COUNTER) \ x(copygc, 40, TYPE_COUNTER) \ x(copygc_wait, 41, TYPE_COUNTER) \ x(gc_gens_end, 42, TYPE_COUNTER) \ diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 2f25dcfc0e25b..5718988dd7d63 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -295,12 +295,12 @@ TRACE_EVENT(write_super, /* io.c: */ -DEFINE_EVENT(bio, read_promote, +DEFINE_EVENT(bio, io_read_promote, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -TRACE_EVENT(read_nopromote, +TRACE_EVENT(io_read_nopromote, TP_PROTO(struct bch_fs *c, int ret), TP_ARGS(c, ret), @@ -319,22 +319,22 @@ TRACE_EVENT(read_nopromote, __entry->ret) ); -DEFINE_EVENT(bio, read_bounce, +DEFINE_EVENT(bio, io_read_bounce, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -DEFINE_EVENT(bio, read_split, +DEFINE_EVENT(bio, io_read_split, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -DEFINE_EVENT(bio, read_retry, +DEFINE_EVENT(bio, io_read_retry, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -DEFINE_EVENT(bio, read_reuse_race, +DEFINE_EVENT(bio, io_read_reuse_race, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); @@ -797,32 +797,32 @@ TRACE_EVENT(bucket_invalidate, /* Moving IO */ -DEFINE_EVENT(fs_str, move_extent, +DEFINE_EVENT(fs_str, io_move, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) ); -DEFINE_EVENT(fs_str, move_extent_read, +DEFINE_EVENT(fs_str, io_move_read, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) ); -DEFINE_EVENT(fs_str, move_extent_write, +DEFINE_EVENT(fs_str, io_move_write, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) ); -DEFINE_EVENT(fs_str, move_extent_finish, +DEFINE_EVENT(fs_str, io_move_finish, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) ); -DEFINE_EVENT(fs_str, move_extent_fail, +DEFINE_EVENT(fs_str, io_move_fail, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) ); -DEFINE_EVENT(fs_str, move_extent_start_fail, +DEFINE_EVENT(fs_str, io_move_start_fail, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) );