Safe right now as LWResult::d_rcode gets assigned from the 4 bit
rcode in the header. But that might change one day. I'd rather
make LWResult::d_rcode an uint8_t, but that causes a conflict with
the OOB resolving code that does not make a difference between res
and d_rcode.
}
vector<DNSRecord> d_records;
+ uint32_t d_usec{0};
int d_rcode{0};
bool d_validpacket{false};
bool d_aabit{false}, d_tcbit{false};
- uint32_t d_usec{0};
bool d_haveEDNS{false};
};
}
return *this;
}
- static const size_t numberoOfRCodes = 16;
- std::array<uint64_t, numberoOfRCodes> rcodeCounters;
+ static const size_t numberOfRCodes = 16;
+ std::array<uint64_t, numberOfRCodes> rcodeCounters;
};
// An RCodes histogram
RCodeCounters auth{};
}
accountAuthLatency(lwr.d_usec, remoteIP.sin4.sin_family);
- ++t_Counters.at(rec::RCode::auth).rcodeCounters.at(static_cast<uint8_t>(lwr.d_rcode));
+ if (lwr.d_rcode >= 0 && lwr.d_rcode < static_cast<decltype(lwr.d_rcode)>(rec::Counters::RCodeCounters::numberOfRCodes)) {
+ ++t_Counters.at(rec::RCode::auth).rcodeCounters.at(static_cast<uint8_t>(lwr.d_rcode));
+ }
if (!dontThrottle) {
dontThrottle = shouldNotThrottle(&nsName, &remoteIP);