From bdf96c4ecd69523d97c98e47284caddde20b1df7 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 13 Nov 2025 10:05:09 -0800 Subject: [PATCH] perf tool_pmu: Use old_count when computing count values for time events When running in interval mode every third count of a time event isn't showing properly: ``` $ perf stat -e duration_time -a -I 1000 1.001082862 1,002,290,425 duration_time 2.004264262 1,003,183,516 duration_time 3.007381401 duration_time 4.011160141 1,003,705,631 duration_time 5.014515385 1,003,290,110 duration_time 6.018539680 duration_time 7.022065321 1,003,591,720 duration_time ``` The regression came in with a different fix, found through bisection, commit 68cb1567439f ("perf tool_pmu: Fix aggregation on duration_time"). The issue is caused by the enabled and running time of the event matching the old_count's and creating a delta of 0, which is indicative of an error. Fixes: 68cb1567439f ("perf tool_pmu: Fix aggregation on duration_time") Signed-off-by: Ian Rogers Signed-off-by: Namhyung Kim --- tools/perf/builtin-stat.c | 16 +++++++++-- tools/perf/util/tool_pmu.c | 59 +++++++++++++++++++++----------------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 095016b2209e9..654f840f7a2f1 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -283,17 +283,27 @@ static int read_single_counter(struct evsel *counter, int cpu_map_idx, int threa if (err && cpu_map_idx == 0 && (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME || evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) { - u64 val, *start_time; struct perf_counts_values *count = perf_counts(counter->counts, cpu_map_idx, thread); + struct perf_counts_values *old_count = NULL; + u64 val; + + if (counter->prev_raw_counts) + old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread); - start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread); if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME) val = ru_stats.ru_utime_usec_stat.mean; else val = ru_stats.ru_stime_usec_stat.mean; - count->ena = count->run = *start_time + val; + count->val = val; + if (old_count) { + count->run = old_count->run + 1; + count->ena = old_count->ena + 1; + } else { + count->run++; + count->ena++; + } return 0; } return err; diff --git a/tools/perf/util/tool_pmu.c b/tools/perf/util/tool_pmu.c index a72c665ee6440..6a9df3dc0e076 100644 --- a/tools/perf/util/tool_pmu.c +++ b/tools/perf/util/tool_pmu.c @@ -446,16 +446,39 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, } } +static void perf_counts__update(struct perf_counts_values *count, + const struct perf_counts_values *old_count, + bool raw, u64 val) +{ + /* + * The values of enabled and running must make a ratio of 100%. The + * exact values don't matter as long as they are non-zero to avoid + * issues with evsel__count_has_error. + */ + if (old_count) { + count->val = raw ? val : old_count->val + val; + count->run = old_count->run + 1; + count->ena = old_count->ena + 1; + count->lost = old_count->lost; + } else { + count->val = val; + count->run++; + count->ena++; + count->lost = 0; + } +} + int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) { __u64 *start_time, cur_time, delta_start; - u64 val; - int fd, err = 0; + int err = 0; struct perf_counts_values *count, *old_count = NULL; bool adjust = false; enum tool_pmu_event ev = evsel__tool_event(evsel); count = perf_counts(evsel->counts, cpu_map_idx, thread); + if (evsel->prev_raw_counts) + old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); switch (ev) { case TOOL_PMU__EVENT_HAS_PMEM: @@ -466,12 +489,11 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) case TOOL_PMU__EVENT_NUM_PACKAGES: case TOOL_PMU__EVENT_SLOTS: case TOOL_PMU__EVENT_SMT_ON: - case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: case TOOL_PMU__EVENT_CORE_WIDE: case TOOL_PMU__EVENT_TARGET_CPU: - if (evsel->prev_raw_counts) - old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); - val = 0; + case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: { + u64 val = 0; + if (cpu_map_idx == 0 && thread == 0) { if (!tool_pmu__read_event(ev, evsel, stat_config.system_wide, @@ -481,16 +503,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) val = 0; } } - if (old_count) { - count->val = old_count->val + val; - count->run = old_count->run + 1; - count->ena = old_count->ena + 1; - } else { - count->val = val; - count->run++; - count->ena++; - } + perf_counts__update(count, old_count, /*raw=*/false, val); return 0; + } case TOOL_PMU__EVENT_DURATION_TIME: /* * Pretend duration_time is only on the first CPU and thread, or @@ -506,9 +521,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) case TOOL_PMU__EVENT_USER_TIME: case TOOL_PMU__EVENT_SYSTEM_TIME: { bool system = evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME; + int fd = FD(evsel, cpu_map_idx, thread); start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread); - fd = FD(evsel, cpu_map_idx, thread); lseek(fd, SEEK_SET, 0); if (evsel->pid_stat) { /* The event exists solely on 1 CPU. */ @@ -542,17 +557,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread) if (adjust) { __u64 ticks_per_sec = sysconf(_SC_CLK_TCK); - delta_start *= 1000000000 / ticks_per_sec; + delta_start *= 1e9 / ticks_per_sec; } - count->val = delta_start; - count->lost = 0; - /* - * The values of enabled and running must make a ratio of 100%. The - * exact values don't matter as long as they are non-zero to avoid - * issues with evsel__count_has_error. - */ - count->ena++; - count->run++; + perf_counts__update(count, old_count, /*raw=*/true, delta_start); return 0; } -- 2.47.3