upStatus = newStatus;
if (!upStatus) {
latencyUsec = 0.0;
+ latencyUsecTCP = 0.0;
}
}
void setDown()
{
d_config.availability = Availability::Down;
latencyUsec = 0.0;
+ latencyUsecTCP = 0.0;
}
void setAuto() {
d_config.availability = Availability::Auto;
tcpAvgConnectionDuration = (99.0 * tcpAvgConnectionDuration / 100.0) + (durationMs / 100.0);
}
+ void updateTCPLatency(double udiff)
+ {
+ latencyUsecTCP = (127.0 * latencyUsecTCP / 128.0) + udiff / 128.0;
+ }
+
void incQueriesCount()
{
++queries;
try {
if (!d_healthCheckQuery) {
const double udiff = request.d_query.d_idstate.sentTime.udiff();
- d_ds->latencyUsecTCP = (127.0 * d_ds->latencyUsecTCP / 128.0) + udiff / 128.0;
+ d_ds->updateTCPLatency(udiff);
}
request.d_sender->handleResponse(now, TCPResponse(std::move(request.d_buffer), std::move(request.d_query.d_idstate), shared_from_this()));
--conn->d_ds->outstanding;
auto ids = std::move(it->second.d_query.d_idstate);
const double udiff = ids.sentTime.udiff();
- conn->d_ds->latencyUsecTCP = (127.0 * conn->d_ds->latencyUsecTCP / 128.0) + udiff / 128.0;
+ conn->d_ds->updateTCPLatency(udiff);
d_pendingResponses.erase(it);
/* marking as idle for now, so we can accept new queries if our queues are empty */
race:handleStats
race:ClientState::updateTCPMetrics
race:DownstreamState::updateTCPMetrics
+race:DownstreamState::updateTCPLatency
# There is a race when we update the status of a backend,
# but eventual consistency is fine there
race:DownstreamState::setDown