void CommunicatorClass::addSuckRequest(const DNSName &domain, const ComboAddress& master)
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
SuckRequest sr;
sr.domain = domain;
sr.master = master;
vector<DNSRecord> rrset;
{
DNSZoneRecord zrr;
- B.lookup(QType(g.first.second), g.first.first+domain, di.id);
- while(B.get(zrr)) {
+ di.backend->lookup(QType(g.first.second), g.first.first+domain, di.id);
+ while(di.backend->get(zrr)) {
zrr.dr.d_name.makeUsRelative(domain);
rrset.push_back(zrr.dr);
}
void CommunicatorClass::suck(const DNSName &domain, const ComboAddress& remote)
{
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
if(d_inprogress.count(domain)) {
return;
}
}
catch(ResolverException &re) {
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
// The AXFR probably failed due to a problem on the master server. If SOA-checks against this master
// still succeed, we would constantly try to AXFR the zone. To avoid this, we add the zone to the list of
// failed slave-checks. This will suspend slave-checks (and subsequent AXFR) for this zone for some time.
Identifier send(DomainNotificationInfo& dni)
{
- random_shuffle(dni.di.masters.begin(), dni.di.masters.end());
+ shuffle(dni.di.masters.begin(), dni.di.masters.end(), pdns::dns_random_engine());
try {
return std::make_tuple(dni.di.zone,
*dni.di.masters.begin(),
void CommunicatorClass::addSlaveCheckRequest(const DomainInfo& di, const ComboAddress& remote)
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
DomainInfo ours = di;
ours.backend = 0;
void CommunicatorClass::addTrySuperMasterRequest(const DNSPacket& p)
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
DNSPacket ours = p;
if(d_potentialsupermasters.insert(ours).second)
d_any_sem.post(); // kick the loop!
vector<DomainNotificationInfo> sdomains;
set<DNSPacket, cmp> trysuperdomains;
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
set<DomainInfo> requeue;
+ rdomains.reserve(d_tocheck.size());
for(const auto& di: d_tocheck) {
if(d_inprogress.count(di.zone)) {
g_log<<Logger::Debug<<"Got NOTIFY for "<<di.zone<<" while AXFR in progress, requeueing SOA check"<<endl;
TSIGRecordContent trc;
DNSName tsigkeyname;
dp.getTSIGDetails(&trc, &tsigkeyname);
- P->trySuperMasterSynchronous(dp, tsigkeyname); // FIXME could use some error loging
+ P->trySuperMasterSynchronous(dp, tsigkeyname); // FIXME could use some error logging
}
if(rdomains.empty()) { // if we have priority domains, check them first
B->getUnfreshSlaveInfos(&rdomains);
}
+ sdomains.reserve(rdomains.size());
DNSSECKeeper dk(B); // NOW HEAR THIS! This DK uses our B backend, so no interleaved access!
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
domains_by_name_t& nameindex=boost::multi_index::get<IDTag>(d_suckdomains);
time_t now = time(0);
dni.localaddr.sin4.sin_family = 0;
}
- sdomains.push_back(dni);
+ sdomains.push_back(std::move(dni));
}
}
if(sdomains.empty())
{
if(d_slaveschanged) {
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
g_log<<Logger::Warning<<"No new unfresh slave domains, "<<d_suckdomains.size()<<" queued for AXFR already, "<<d_inprogress.size()<<" in progress"<<endl;
}
d_slaveschanged = !rdomains.empty();
return;
}
else {
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
g_log<<Logger::Warning<<sdomains.size()<<" slave domain"<<(sdomains.size()>1 ? "s" : "")<<" need"<<
(sdomains.size()>1 ? "" : "s")<<
" checking, "<<d_suckdomains.size()<<" queued for AXFR"<<endl;
if(!ssr.d_freshness.count(di.id)) { // If we don't have an answer for the domain
uint64_t newCount = 1;
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
const auto failedEntry = d_failedSlaveRefresh.find(di.zone);
if (failedEntry != d_failedSlaveRefresh.end())
newCount = d_failedSlaveRefresh[di.zone].first + 1;
}
{
- Lock l(&d_lock);
+ std::lock_guard<std::mutex> l(d_lock);
const auto wasFailedDomain = d_failedSlaveRefresh.find(di.zone);
if (wasFailedDomain != d_failedSlaveRefresh.end())
d_failedSlaveRefresh.erase(di.zone);