]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
REGTESTS: contrib/prometheus-exporter: test NaN values
authorWilliam Dauchy <wdauchy@gmail.com>
Thu, 18 Feb 2021 22:05:33 +0000 (23:05 +0100)
committerChristopher Faulet <cfaulet@haproxy.com>
Fri, 19 Feb 2021 17:03:59 +0000 (18:03 +0100)
In order to make sure we detect when we change default behaviour for
some metrics, test the NaN value when it is expected.

Those metrics were listed since our last rework as their default value
changed, unless the appropriate config is set.

Signed-off-by: William Dauchy <wdauchy@gmail.com>
reg-tests/contrib/prometheus.vtc

index cdd0f0f5588940d9fb711e0b31fd6a20239aa703..1ebeb29cb98354ecad9eb181b2c3ebd51d3cf792 100644 (file)
@@ -10,6 +10,11 @@ server s1 {
        txresp
 } -repeat 2 -start
 
+server s2 {
+       rxreq
+       txresp
+} -repeat 2 -start
+
 haproxy h1 -conf {
     defaults
        mode http
@@ -29,11 +34,13 @@ haproxy h1 -conf {
     backend be
        stick-table type ip size 1m expire 10s store http_req_rate(10s)
        server s1 ${s1_addr}:${s1_port}
+       server s2 ${s2_addr}:${s2_port} check maxqueue 10 maxconn 12 pool-max-conn 42
 } -start
 
 client c1 -connect ${h1_stats_sock} {
        txreq -url "/metrics"
        rxresp
+       # test general metrics
        expect resp.status == 200
        expect resp.body ~ ".*haproxy_process.*"
        expect resp.body ~ ".*haproxy_frontend.*"
@@ -42,6 +49,29 @@ client c1 -connect ${h1_stats_sock} {
        expect resp.body ~ ".*haproxy_server.*"
        expect resp.body ~ ".*haproxy_sticktable.*"
 
+       # test expected NaN values
+       expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s2\"} 0.*"
+       expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s2\"} 0.*"
+
+       expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s2\"} 10.*"
+
+       expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s2\"} 12.*"
+
+       expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"stats\"} NaN.*"
+       expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"be\"} 0.*"
+       expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s2\"} 0.*"
+
+       expect resp.body ~ ".*haproxy_server_current_throttle{proxy=\"be\",server=\"s1\"} NaN.*"
+
+       expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s1\"} NaN.*"
+       expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s2\"} 42.*"
+
+       # test scope
        txreq -url "/metrics?scope="
        rxresp
        expect resp.status == 200