/* Don't show Latency column for non-parallel profiles by default. */
if (!symbol_conf.prefer_latency && rep->total_samples &&
rep->singlethreaded_samples * 100 / rep->total_samples >= 99)
- perf_hpp__cancel_latency();
+ perf_hpp__cancel_latency(session->evlist);
evlist__check_mem_load_aux(session->evlist);
}
}
-void perf_hpp__cancel_latency(void)
+void perf_hpp__cancel_latency(struct evlist *evlist)
{
struct perf_hpp_fmt *fmt, *lat, *acc, *tmp;
+ struct evsel *evsel;
if (is_strict_order(field_order))
return;
if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
perf_hpp__column_unregister(fmt);
}
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_list_node *node;
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, tmp) {
+ if (fmt_equal(lat, fmt) || fmt_equal(acc, fmt))
+ perf_hpp__column_unregister(fmt);
+ }
+ }
+ }
}
void perf_hpp__setup_output_field(struct perf_hpp_list *list)
void perf_hpp__init(void);
void perf_hpp__cancel_cumulate(struct evlist *evlist);
-void perf_hpp__cancel_latency(void);
+void perf_hpp__cancel_latency(struct evlist *evlist);
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
void perf_hpp__append_sort_keys(struct perf_hpp_list *list);