Part of #40708.
/** Moving average of the cc->cwnd from each closed slow-start circuit. */
double cc_stats_circ_close_ss_cwnd_ma = 0;
-/* Running count of the above moving averages. Needed so we can update it. */
-static double stats_circ_close_cwnd_ma_count = 0;
-static double stats_circ_close_ss_cwnd_ma_count = 0;
-
/********* END VARIABLES ************/
/* Implement circuit handle helpers. */
* and a max RTT, and they are not the same. This prevents us from
* averaging and reporting unused and low-use circuits here */
if (circ->ccontrol->max_rtt_usec != circ->ccontrol->min_rtt_usec) {
- stats_circ_close_ss_cwnd_ma_count++;
cc_stats_circ_close_ss_cwnd_ma =
stats_update_running_avg(cc_stats_circ_close_ss_cwnd_ma,
- circ->ccontrol->cwnd,
- stats_circ_close_ss_cwnd_ma_count);
+ circ->ccontrol->cwnd);
}
} else {
- stats_circ_close_cwnd_ma_count++;
cc_stats_circ_close_cwnd_ma =
stats_update_running_avg(cc_stats_circ_close_cwnd_ma,
- circ->ccontrol->cwnd,
- stats_circ_close_cwnd_ma_count);
+ circ->ccontrol->cwnd);
}
}
uint64_t cc_stats_flow_num_xoff_sent;
uint64_t cc_stats_flow_num_xon_sent;
double cc_stats_flow_xoff_outbuf_ma = 0;
-static double cc_stats_flow_xoff_outbuf_ma_count = 0;
double cc_stats_flow_xon_outbuf_ma = 0;
-static double cc_stats_flow_xon_outbuf_ma_count = 0;
/* In normal operation, we can get a burst of up to 32 cells before returning
* to libevent to flush the outbuf. This is a heuristic from hardcoded values
total_buffered, buffer_limit_xoff);
tor_trace(TR_SUBSYS(cc), TR_EV(flow_decide_xoff_sending), stream);
- cc_stats_flow_xoff_outbuf_ma_count++;
cc_stats_flow_xoff_outbuf_ma =
stats_update_running_avg(cc_stats_flow_xoff_outbuf_ma,
- total_buffered,
- cc_stats_flow_xoff_outbuf_ma_count);
+ total_buffered);
circuit_send_stream_xoff(stream);
total_buffered);
tor_trace(TR_SUBSYS(cc), TR_EV(flow_decide_xon_rate_change), stream);
- cc_stats_flow_xon_outbuf_ma_count++;
cc_stats_flow_xon_outbuf_ma =
stats_update_running_avg(cc_stats_flow_xon_outbuf_ma,
- total_buffered,
- cc_stats_flow_xon_outbuf_ma_count);
+ total_buffered);
circuit_send_stream_xon(stream);
}
double cc_stats_vegas_delta_drop_ma = 0;
double cc_stats_vegas_ss_csig_blocked_ma = 0;
double cc_stats_vegas_csig_blocked_ma = 0;
-/* Running count of this moving average. Needed so we can update it. */
-static double stats_cwnd_exit_ss_ma_count = 0;
-static double stats_cwnd_gamma_drop_ma_count = 0;
-static double stats_cwnd_delta_drop_ma_count = 0;
-static double stats_ss_csig_blocked_ma_count = 0;
-static double stats_csig_blocked_ma_count = 0;
/** Stats on how many times we reached "delta" param. */
uint64_t cc_stats_vegas_above_delta = 0;
congestion_control_vegas_log(circ, cc);
/* Update running cc->cwnd average for metrics. */
- stats_cwnd_exit_ss_ma_count++;
cc_stats_vegas_exit_ss_cwnd_ma =
stats_update_running_avg(cc_stats_vegas_exit_ss_cwnd_ma,
- cc->cwnd, stats_cwnd_exit_ss_ma_count);
+ cc->cwnd);
/* We need to report that slow start has exited ASAP,
* for sbws bandwidth measurement. */
/* Account the amount we reduced the cwnd by for the gamma cutoff */
cwnd_diff = (old_cwnd > cc->cwnd ? old_cwnd - cc->cwnd : 0);
- stats_cwnd_gamma_drop_ma_count++;
cc_stats_vegas_gamma_drop_ma =
stats_update_running_avg(cc_stats_vegas_gamma_drop_ma,
- cwnd_diff, stats_cwnd_gamma_drop_ma_count);
+ cwnd_diff);
- stats_ss_csig_blocked_ma_count++;
/* Compute the percentage we experience a blocked csig vs RTT sig */
if (cc->blocked_chan) {
cc_stats_vegas_ss_csig_blocked_ma =
stats_update_running_avg(cc_stats_vegas_ss_csig_blocked_ma,
- 100, stats_ss_csig_blocked_ma_count);
+ 100);
} else {
cc_stats_vegas_ss_csig_blocked_ma =
stats_update_running_avg(cc_stats_vegas_ss_csig_blocked_ma,
- 0, stats_ss_csig_blocked_ma_count);
+ 0);
}
congestion_control_vegas_exit_slow_start(circ, cc);
/* Account the amount we reduced the cwnd by for the gamma cutoff */
cwnd_diff = (old_cwnd > cc->cwnd ? old_cwnd - cc->cwnd : 0);
- stats_cwnd_delta_drop_ma_count++;
cc_stats_vegas_delta_drop_ma =
stats_update_running_avg(cc_stats_vegas_delta_drop_ma,
- cwnd_diff, stats_cwnd_delta_drop_ma_count);
+ cwnd_diff);
cc_stats_vegas_above_delta++;
} else if (queue_use > cc->vegas_params.beta || cc->blocked_chan) {
- stats_csig_blocked_ma_count++;
/* Compute the percentage we experience a blocked csig vs RTT sig */
if (cc->blocked_chan) {
cc_stats_vegas_csig_blocked_ma =
stats_update_running_avg(cc_stats_vegas_csig_blocked_ma,
- 100, stats_csig_blocked_ma_count);
+ 100);
} else {
cc_stats_vegas_csig_blocked_ma =
stats_update_running_avg(cc_stats_vegas_csig_blocked_ma,
- 0, stats_csig_blocked_ma_count);
+ 0);
}
cc->cwnd -= CWND_INC(cc);
#ifndef TOR_STATS_H
#define TOR_STATS_H
-/** Update an average making it a "running average". The "avg" is the current
- * value that will be updated to the new one. The "value" is the new value to
- * add to the average and "n" is the new count as in including the "value". */
+/**
+ * Compute an N-count EWMA, aka N-EWMA. N-EWMA is defined as:
+ * EWMA = alpha*value + (1-alpha)*EWMA_prev
+ * with alpha = 2/(N+1).
+ *
+ * This works out to:
+ * EWMA = value*2/(N+1) + EMA_prev*(N-1)/(N+1)
+ * = (value*2 + EWMA_prev*(N-1))/(N+1)
+ */
static inline double
-stats_update_running_avg(double avg, double value, double n)
+n_count_ewma_double(double avg, double value, uint64_t N)
{
- return ((avg * (n - 1)) + value) / n;
+ /* If the average was not previously computed, return value.
+ * The less than is because we have stupid C warning flags that
+ * prevent exact comparison to 0.0, so we can't do an exact
+ * check for unitialized double values. Yay pedantry!
+ * Love it when it introduces surprising edge case bugs like
+ * this will. */
+ if (avg < 0.0000002)
+ return value;
+ else
+ return (2*value + (N-1)*avg)/(N+1);
}
+/* For most stats, an N_EWMA of 100 is sufficient */
+#define DEFAULT_STATS_N_EWMA_COUNT 100
+#define stats_update_running_avg(avg, value) \
+ n_count_ewma_double(avg, value, DEFAULT_STATS_N_EWMA_COUNT)
+
#endif /* !defined(TOR_STATS_H) */