]> git.ipfire.org Git - thirdparty/dovecot/core.git/commitdiff
stats: Move group by setting parsing earlier
authorJosef 'Jeff' Sipek <jeff.sipek@open-xchange.com>
Fri, 28 Feb 2020 15:22:56 +0000 (10:22 -0500)
committerjeff.sipek <jeff.sipek@open-xchange.com>
Fri, 13 Mar 2020 08:25:17 +0000 (08:25 +0000)
Instead of doing the split and parsing in the metric code, we now do it in
the settings code.  This makes the following commits much simpler.

src/stats/stats-metrics.c
src/stats/stats-metrics.h
src/stats/stats-settings.c
src/stats/stats-settings.h

index d1f0a4a92aa3d384700819f2484041e26be24e86..869e1a78350a08c2ae65226b361b6cd4f6f36753 100644 (file)
@@ -153,8 +153,9 @@ static void stats_metrics_add_set(struct stats_metrics *metrics,
        fields = t_strsplit_spaces(set->fields, " ");
        metric = stats_metric_alloc(metrics->pool, set->name, fields);
 
-       if (*set->group_by != '\0')
-               metric->group_by = (const char *const *)p_strsplit_spaces(metrics->pool, set->group_by, " ");
+       if (array_is_created(&set->parsed_group_by))
+               metric->group_by = array_get(&set->parsed_group_by,
+                                            &metric->group_by_count);
 
        array_push_back(&metrics->metrics, &metric);
 
@@ -345,32 +346,46 @@ stats_metric_sub_metric_alloc(struct metric *metric, const char *name, pool_t po
        return sub_metric;
 }
 
+static bool
+stats_metric_group_by_discrete(const struct event_field *field,
+                              struct metric_value *value)
+{
+       switch (field->value_type) {
+       case EVENT_FIELD_VALUE_TYPE_STR:
+               value->type = METRIC_VALUE_TYPE_STR;
+               /* use sha1 of value to avoid excessive memory usage in case the
+                  actual value is quite long */
+               sha1_get_digest(field->value.str, strlen(field->value.str),
+                               value->hash);
+               return TRUE;
+       case EVENT_FIELD_VALUE_TYPE_INTMAX:
+               value->type = METRIC_VALUE_TYPE_INT;
+               value->intmax = field->value.intmax;
+               return TRUE;
+       case EVENT_FIELD_VALUE_TYPE_TIMEVAL:
+               return FALSE;
+       }
+
+       i_unreached();
+}
+
 static void
 stats_metric_group_by(struct metric *metric, struct event *event, pool_t pool)
 {
+       const struct stats_metric_settings_group_by *group_by = &metric->group_by[0];
+       const struct event_field *field = event_find_field(event, group_by->field);
        struct metric *sub_metric;
-       const char *const *group = metric->group_by;
-       const struct event_field *field =
-               event_find_field(event, *group);
        struct metric_value value;
 
        /* ignore missing field */
        if (field == NULL)
                return;
-       switch (field->value_type) {
-       case EVENT_FIELD_VALUE_TYPE_STR:
-               value.type = METRIC_VALUE_TYPE_STR;
-               /* use sha1 of value to avoid excessive memory usage in case the
-                  actual value is quite long */
-               sha1_get_digest(field->value.str, strlen(field->value.str),
-                               value.hash);
-               break;
-       case EVENT_FIELD_VALUE_TYPE_INTMAX:
-               value.type = METRIC_VALUE_TYPE_INT;
-               value.intmax = field->value.intmax;
+
+       switch (group_by->func) {
+       case STATS_METRIC_GROUPBY_DISCRETE:
+               if (!stats_metric_group_by_discrete(field, &value))
+                       return;
                break;
-       case EVENT_FIELD_VALUE_TYPE_TIMEVAL:
-               return;
        }
 
        if (!array_is_created(&metric->sub_metrics))
@@ -388,8 +403,10 @@ stats_metric_group_by(struct metric *metric, struct event *event, pool_t pool)
                        i_unreached();
                sub_metric = stats_metric_sub_metric_alloc(metric, value_label,
                                                           pool);
-               if (group[1] != NULL)
-                       sub_metric->group_by = group+1;
+               if (metric->group_by_count > 1) {
+                       sub_metric->group_by_count = metric->group_by_count - 1;
+                       sub_metric->group_by = &metric->group_by[1];
+               }
                sub_metric->group_value.intmax = value.intmax;
                memcpy(sub_metric->group_value.hash, value.hash, SHA1_RESULTLEN);
        } T_END;
index 6ec100d1744590d4c1556c35d20ec6633d6d9e90..5481acb3e28fd38a7825afb95f6172f092aa3090 100644 (file)
@@ -88,7 +88,8 @@ struct metric {
        unsigned int fields_count;
        struct metric_field *fields;
 
-       const char *const *group_by;
+       unsigned int group_by_count;
+       const struct stats_metric_settings_group_by *group_by;
        struct metric_value group_value;
        ARRAY(struct metric *) sub_metrics;
 
index 306ce2ee693e0ddc5015007d765707b5f801752e..f6b05887bf1345461f3be6e3e60246865f487a68 100644 (file)
@@ -280,8 +280,43 @@ static bool stats_exporter_settings_check(void *_set, pool_t pool ATTR_UNUSED,
        return TRUE;
 }
 
-static bool stats_metric_settings_check(void *_set, pool_t pool ATTR_UNUSED,
-                                       const char **error_r)
+static bool parse_metric_group_by(struct stats_metric_settings *set,
+                                 pool_t pool, const char **error_r)
+{
+       const char *const *tmp = t_strsplit_spaces(set->group_by, " ");
+
+       if (tmp[0] == NULL)
+               return TRUE;
+
+       p_array_init(&set->parsed_group_by, pool, str_array_length(tmp));
+
+       /* For each group_by field */
+       for (; *tmp != NULL; tmp++) {
+               struct stats_metric_settings_group_by group_by;
+               const char *const *params;
+
+               i_zero(&group_by);
+
+               /* <field name>:<aggregation func>... */
+               params = t_strsplit(*tmp, ":");
+
+               if (params[1] == NULL) {
+                       group_by.func = STATS_METRIC_GROUPBY_DISCRETE;
+               } else {
+                       *error_r = t_strdup_printf("unknown aggregation function "
+                                                  "'%s' on field '%s'", params[1], params[0]);
+                       return FALSE;
+               }
+
+               group_by.field = p_strdup(pool, params[0]);
+
+               array_push_back(&set->parsed_group_by, &group_by);
+       }
+
+       return TRUE;
+}
+
+static bool stats_metric_settings_check(void *_set, pool_t pool, const char **error_r)
 {
        struct stats_metric_settings *set = _set;
        const char *p;
@@ -302,6 +337,9 @@ static bool stats_metric_settings_check(void *_set, pool_t pool ATTR_UNUSED,
                }
        }
 
+       if (!parse_metric_group_by(set, pool, error_r))
+               return FALSE;
+
        return TRUE;
 }
 
index 00086a5863f132beafe136db871a8d1fe4ed7193..38bfbf38532ca9186ef581f2241953e4b31cb3cf 100644 (file)
@@ -69,6 +69,17 @@ struct stats_exporter_settings {
        enum event_exporter_time_fmt parsed_time_format;
 };
 
+/* <settings checks> */
+enum stats_metric_group_by_func {
+       STATS_METRIC_GROUPBY_DISCRETE = 0,
+};
+
+struct stats_metric_settings_group_by {
+       const char *field;
+       enum stats_metric_group_by_func func;
+};
+/* </settings checks> */
+
 struct stats_metric_settings {
        const char *name;
        const char *event_name;
@@ -79,6 +90,7 @@ struct stats_metric_settings {
        ARRAY(const char *) filter;
 
        unsigned int parsed_source_linenum;
+       ARRAY(struct stats_metric_settings_group_by) parsed_group_by;
 
        /* exporter related fields */
        const char *exporter;