From e8e22b7a94f37ec5758f4f0bdcfc193dddc21130 Mon Sep 17 00:00:00 2001 From: Kamil Wiatrowski Date: Tue, 11 Aug 2020 15:04:30 +0100 Subject: [PATCH] intel_pmu 2.0 - Provide more data with metric Improve readability of scaled value with more data provided by metric, use multivalued type (pmu_counter) to provide clearly the data: scaled value, raw value, time enabled and time running. Change-Id: Id9201123a5de647bba412f09145b8dbb169987ba Signed-off-by: Kamil Wiatrowski --- src/intel_pmu.c | 82 +++++++++++++++++++++++-------------------------- src/types.db | 2 +- 2 files changed, 39 insertions(+), 45 deletions(-) diff --git a/src/intel_pmu.c b/src/intel_pmu.c index 49bc633ef..408c800a6 100644 --- a/src/intel_pmu.c +++ b/src/intel_pmu.c @@ -253,20 +253,16 @@ static int pmu_config(oconfig_item_t *ci) { return 0; } -#if 0 -static void pmu_submit_multicounter(const char *cgroup, const char *event, - const uint32_t *event_type, counter_t value, - const struct efd *efd) { +static void pmu_submit_counters(const char *cgroup, const char *event, + const uint32_t *event_type, counter_t scaled, + counter_t raw, counter_t enabled, + counter_t running) { value_list_t vl = VALUE_LIST_INIT; - //vl.values = &(value_t){.counter = value}; - //vl.values_len = 1; - value_t values[] = { - {.counter = value}, - {.counter = efd->val[0]}, - {.counter = efd->val[1]}, - {.counter = efd->val[2]} - }; + value_t values[] = {{.counter = scaled}, + {.counter = raw}, + {.counter = enabled}, + {.counter = running}}; vl.values = values; vl.values_len = STATIC_ARRAY_SIZE(values); @@ -281,8 +277,8 @@ static void pmu_submit_multicounter(const char *cgroup, const char *event, plugin_dispatch_values(&vl); } -#endif +#if 0 static void pmu_submit_counter(const char *cgroup, const char *event, const uint32_t *event_type, counter_t value, meta_data_t *meta) { @@ -325,6 +321,7 @@ meta_data_t *pmu_meta_data_create(const struct efd *efd) { return meta; } +#endif static void pmu_dispatch_data(intel_pmu_entity_t *ent) { @@ -340,8 +337,10 @@ static void pmu_dispatch_data(intel_pmu_entity_t *ent) { for (size_t i = 0; i < ent->cgroups_count; i++) { core_group_t *cgroup = ent->cores.cgroups + i + ent->first_cgroup; uint64_t cgroup_value = 0; + uint64_t cgroup_value_raw = 0; + uint64_t cgroup_time_enabled = 0; + uint64_t cgroup_time_running = 0; int event_enabled_cgroup = 0; - meta_data_t *meta = NULL; for (size_t j = 0; j < cgroup->num_cores; j++) { int core = (int)cgroup->cores[j]; @@ -350,42 +349,47 @@ static void pmu_dispatch_data(intel_pmu_entity_t *ent) { event_enabled_cgroup++; + cgroup_value_raw += e->efd[core].val[0]; + cgroup_time_enabled += e->efd[core].val[1]; + cgroup_time_running += e->efd[core].val[2]; + /* If there are more events than counters, the kernel uses time * multiplexing. With multiplexing, at the end of the run, * the counter is scaled basing on total time enabled vs time running. * final_count = raw_count * time_enabled/time_running */ - if (e->extra.multi_pmu && !g_ctx.dispatch_cloned_pmus) + if (e->extra.multi_pmu && !g_ctx.dispatch_cloned_pmus) { cgroup_value += event_scaled_value_sum(e, core); - else { - cgroup_value += event_scaled_value(e, core); - /* get meta data with information about scaling */ - if (cgroup->num_cores == 1) { - DEBUG(PMU_PLUGIN - ": %s/%s = %lu = [raw]%lu * [enabled]%lu / [running]%lu", - e->event, cgroup->desc, cgroup_value, e->efd[core].val[0], - e->efd[core].val[1], e->efd[core].val[2]); - meta = pmu_meta_data_create(&e->efd[core]); + int num_clones = e->num_clones; + for (struct event *ce = e->next; ce && num_clones > 0; + ce = ce->next) { + if (ce->orig == e) { + cgroup_value_raw += ce->efd[core].val[0]; + cgroup_time_enabled += ce->efd[core].val[1]; + cgroup_time_running += ce->efd[core].val[2]; + } } + } else { + cgroup_value += event_scaled_value(e, core); } - // pmu_submit_multicounter(cgroup->desc, e->event, event_type, - // cgroup_value, &e->efd[core]); } if (event_enabled_cgroup > 0) { #if COLLECT_DEBUG if (event_type) - DEBUG(PMU_PLUGIN ": %s:type=%d/%s = %lu", e->event, *event_type, - cgroup->desc, cgroup_value); + DEBUG(PMU_PLUGIN ": %s:type=%d/%s = %lu (%lu * %lu / %lu)", e->event, + *event_type, cgroup->desc, cgroup_value, cgroup_value_raw, + cgroup_time_enabled, cgroup_time_running); else - DEBUG(PMU_PLUGIN ": %s/%s = %lu", e->event, cgroup->desc, - cgroup_value); + DEBUG(PMU_PLUGIN ": %s/%s = %lu (%lu * %lu / %lu)", e->event, + cgroup->desc, cgroup_value, cgroup_value_raw, + cgroup_time_enabled, cgroup_time_running); #endif - /* dispatch per core group value */ - pmu_submit_counter(cgroup->desc, e->event, event_type, cgroup_value, - meta); - meta_data_destroy(meta); + /* dispatch per core group values */ + pmu_submit_counters(cgroup->desc, e->event, event_type, cgroup_value, + cgroup_value_raw, cgroup_time_enabled, + cgroup_time_running); } } } @@ -763,16 +767,6 @@ static int pmu_shutdown(void) { DEBUG(PMU_PLUGIN ": %s:%d", __FUNCTION__, __LINE__); - /*pmu_free_events(g_ctx.event_list); - g_ctx.event_list = NULL; - for (size_t i = 0; i < g_ctx.hw_events_count; i++) { - sfree(g_ctx.hw_events[i]); - } - sfree(g_ctx.hw_events); - g_ctx.hw_events_count = 0; - - config_cores_cleanup(&g_ctx.cores);*/ - for (intel_pmu_entity_t *ent = g_ctx.entl; ent != NULL;) { intel_pmu_entity_t *tmp = ent; ent = ent->next; diff --git a/src/types.db b/src/types.db index fb55fccf9..442a2548c 100644 --- a/src/types.db +++ b/src/types.db @@ -42,7 +42,6 @@ controller value:GAUGE:0:18446744073709551615 cookies value:DERIVE:0:U count value:GAUGE:0:U counter value:COUNTER:U:U -#pmu_counter scaled:COUNTER:U:U, raw:COUNTER:U:U, enabled:COUNTER:U:U ,running:COUNTER:U:U cpu value:DERIVE:0:U cpu_affinity value:GAUGE:0:1 cpufreq value:GAUGE:0:U @@ -223,6 +222,7 @@ ping value:GAUGE:0:65535 ping_droprate value:GAUGE:0:1 ping_stddev value:GAUGE:0:65535 players value:GAUGE:0:1000000 +pmu_counter scaled:COUNTER:0:U, raw:COUNTER:0:U, enabled:COUNTER:0:U ,running:COUNTER:0:U pools value:GAUGE:0:U power value:GAUGE:U:U pressure value:GAUGE:0:U -- 2.47.2