if (err && cpu_map_idx == 0 &&
(evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME ||
evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) {
- u64 val, *start_time;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
+ struct perf_counts_values *old_count = NULL;
+ u64 val;
+
+ if (counter->prev_raw_counts)
+ old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread);
- start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
- count->ena = count->run = *start_time + val;
+
count->val = val;
+ if (old_count) {
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ } else {
+ count->run++;
+ count->ena++;
+ }
return 0;
}
return err;
}
}
+static void perf_counts__update(struct perf_counts_values *count,
+ const struct perf_counts_values *old_count,
+ bool raw, u64 val)
+{
+ /*
+ * The values of enabled and running must make a ratio of 100%. The
+ * exact values don't matter as long as they are non-zero to avoid
+ * issues with evsel__count_has_error.
+ */
+ if (old_count) {
+ count->val = raw ? val : old_count->val + val;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ count->lost = old_count->lost;
+ } else {
+ count->val = val;
+ count->run++;
+ count->ena++;
+ count->lost = 0;
+ }
+}
+
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
__u64 *start_time, cur_time, delta_start;
- u64 val;
- int fd, err = 0;
+ int err = 0;
struct perf_counts_values *count, *old_count = NULL;
bool adjust = false;
enum tool_pmu_event ev = evsel__tool_event(evsel);
count = perf_counts(evsel->counts, cpu_map_idx, thread);
+ if (evsel->prev_raw_counts)
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
switch (ev) {
case TOOL_PMU__EVENT_HAS_PMEM:
case TOOL_PMU__EVENT_NUM_PACKAGES:
case TOOL_PMU__EVENT_SLOTS:
case TOOL_PMU__EVENT_SMT_ON:
- case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
case TOOL_PMU__EVENT_CORE_WIDE:
case TOOL_PMU__EVENT_TARGET_CPU:
- if (evsel->prev_raw_counts)
- old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
- val = 0;
+ case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: {
+ u64 val = 0;
+
if (cpu_map_idx == 0 && thread == 0) {
if (!tool_pmu__read_event(ev, evsel,
stat_config.system_wide,
val = 0;
}
}
- if (old_count) {
- count->val = old_count->val + val;
- count->run = old_count->run + 1;
- count->ena = old_count->ena + 1;
- } else {
- count->val = val;
- count->run++;
- count->ena++;
- }
+ perf_counts__update(count, old_count, /*raw=*/false, val);
return 0;
+ }
case TOOL_PMU__EVENT_DURATION_TIME:
/*
* Pretend duration_time is only on the first CPU and thread, or
case TOOL_PMU__EVENT_USER_TIME:
case TOOL_PMU__EVENT_SYSTEM_TIME: {
bool system = evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME;
+ int fd = FD(evsel, cpu_map_idx, thread);
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
- fd = FD(evsel, cpu_map_idx, thread);
lseek(fd, SEEK_SET, 0);
if (evsel->pid_stat) {
/* The event exists solely on 1 CPU. */
if (adjust) {
__u64 ticks_per_sec = sysconf(_SC_CLK_TCK);
- delta_start *= 1000000000 / ticks_per_sec;
+ delta_start *= 1e9 / ticks_per_sec;
}
- count->val = delta_start;
- count->lost = 0;
- /*
- * The values of enabled and running must make a ratio of 100%. The
- * exact values don't matter as long as they are non-zero to avoid
- * issues with evsel__count_has_error.
- */
- count->ena++;
- count->run++;
+ perf_counts__update(count, old_count, /*raw=*/true, delta_start);
return 0;
}